[
  {
    "path": ".circleci/config.yml",
    "content": "version: 2\n\n## Definitions\nbuild_allways: &build_allways\n  filters:\n    tags:\n      only: /.*/\ndefaults: &defaults\n  environment:\n    CONTROLLER_IMAGE_NAME: kubeless/function-controller\n    BUILDER_IMAGE_NAME: kubeless/function-image-builder\n    CGO_ENABLED: \"0\"\n    TEST_DEBUG: \"1\"\n    GKE_VERSION: 1.12\n    MINIKUBE_VERSION: v1.2.0\n    MANIFESTS: kubeless kubeless-non-rbac kubeless-openshift\nexports: &exports\n  # It is not possible to resolve env vars in the environment section:\n  # https://discuss.circleci.com/t/using-environment-variables-in-config-yml-not-working/14237\n  run: |\n    CONTROLLER_TAG=${CIRCLE_TAG:-build-$CIRCLE_WORKFLOW_ID}\n    echo \"export CONTROLLER_TAG=${CONTROLLER_TAG}\" >> $BASH_ENV\n    echo \"export CONTROLLER_IMAGE=${CONTROLLER_IMAGE_NAME}:${CONTROLLER_TAG}\" >> $BASH_ENV\n    echo \"export FUNCTION_IMAGE_BUILDER=${BUILDER_IMAGE_NAME}:${CONTROLLER_TAG}\" >> $BASH_ENV\n    echo \"export KUBECFG_JPATH=/home/circleci/src/github.com/kubeless/kubeless/ksonnet-lib\" >> $BASH_ENV\n    echo \"export PATH=$(pwd)/bats/libexec:$GOPATH/bin:$PATH\" >> $BASH_ENV\n    echo \"export GIT_SHA1=${CIRCLE_SHA1}\" >> $BASH_ENV\nrestore_workspace: &restore_workspace\n  run: |\n    make bootstrap\n    sudo cp -r /tmp/go/bin/* /usr/local/bin/\n    cp -r /tmp/go/*yaml .\n#### End of definitions\n\nworkflows:\n  version: 2\n  kubeless:\n    jobs:\n      - build:\n          <<: *build_allways\n      - minikube:\n          <<: *build_allways\n          requires:\n            - build\n      - build-cross-binaries:\n          <<: *build_allways\n          requires:\n            - build\n      - minikube_build_functions:\n          <<: *build_allways\n          requires:\n            - build\n      - GKE:\n          <<: *build_allways\n          requires:\n            - build\n      - push_latest_images:\n          filters:\n            branches:\n              only: master\n          requires:\n            - minikube\n            - minikube_build_functions\n            - GKE\n      - release:\n          filters:\n            tags:\n              only: /v.*/\n            branches:\n              ignore: /.*/\n          requires:\n            - minikube\n            - minikube_build_functions\n            - GKE\njobs:\n  build:\n    <<: *defaults\n    docker:\n      - image: circleci/golang:1.15\n    steps:\n      - checkout\n      - restore_cache:\n          keys:\n            - go-mod-v4-{{ checksum \"go.sum\" }}\n      - <<: *exports\n      - run: go mod download\n      - run: make bootstrap\n      - run: make VERSION=${CONTROLLER_TAG} binary\n      - run: make test\n      - run: make validation\n      - run: make all-yaml\n      - run: |\n          mkdir build-manifests\n          IFS=' ' read -r -a manifests <<< \"$MANIFESTS\"\n          for f in \"${manifests[@]}\"; do\n            sed -i.bak 's/:latest/'\":${CONTROLLER_TAG}\"'/g' ${f}.yaml\n            cp ${f}.yaml build-manifests/\n          done\n      - persist_to_workspace:\n          root: /go\n          paths:\n            - ./bin\n      - persist_to_workspace:\n          root: ./\n          paths:\n            - ./*yaml\n      - store_artifacts:\n          path: /go/bin/kubeless\n          destination: ./bin/kubeless\n      - store_artifacts:\n          path: ./build-manifests/\n      - save_cache:\n          key: go-mod-v4-{{ checksum \"go.sum\" }}\n          paths:\n            - /go/pkg/mod\n  minikube:\n    <<: *defaults\n    machine:\n      image: ubuntu-1604:202007-01\n    steps:\n      - checkout\n      - run: sudo apt-get update -y\n      - run: sudo apt-get install -y tar gzip bzip2 xz-utils\n      - attach_workspace:\n          at: /tmp/go\n      - <<: *exports\n      - <<: *restore_workspace\n      - run: ./script/pull-or-build-image.sh function-controller\n      - run: ./script/integration-tests minikube deployment\n      - run: ./script/integration-tests minikube basic\n  build-cross-binaries:\n    <<: *defaults\n    docker:\n      - image: circleci/golang:1.15\n    steps:\n      - <<: *exports\n      - checkout\n      - attach_workspace:\n          at: /tmp/go\n      - <<: *restore_workspace\n      - run: make VERSION=${CIRCLE_TAG} binary-cross\n      - store_artifacts:\n          path: bundles\n  minikube_build_functions:\n    <<: *defaults\n    machine:\n      image: ubuntu-1604:202007-01\n    steps:\n      - checkout\n      - <<: *exports\n      - attach_workspace:\n          at: /tmp/go\n      - <<: *restore_workspace\n      - run: ./script/pull-or-build-image.sh function-controller\n      - run: ./script/pull-or-build-image.sh function-image-builder\n      - run: \"echo '{\\\"insecure-registries\\\" : [\\\"0.0.0.0/0\\\"]}' > /tmp/daemon.json\"\n      - run: sudo mv /tmp/daemon.json /etc/docker/daemon.json\n      - run: sudo service docker restart\n      - run: docker info\n      - run: docker run -d -p 5000:5000 --restart=always --name registry -v /data/docker-registry:/var/lib/registry registry:2\n      - run: \"sed -i.bak 's/enable-build-step: \\\"false\\\"/enable-build-step: \\\"true\\\"/g' kubeless.yaml\"\n      - run: \"sed -i.bak 's/function-registry-tls-verify: \\\"true\\\"/function-registry-tls-verify: \\\"false\\\"/g' kubeless.yaml\"\n      - run: ./script/integration-tests minikube deployment\n      - run: ./script/integration-tests minikube prebuilt_functions\n  GKE:\n    <<: *defaults\n    docker:\n      - image: circleci/golang:1.15\n    steps:\n      - run: |\n          # In case of GKE we will only want to build if it is\n          # a build of a branch in the kubeless/kubeless repository\n          if [[ -n \"$GKE_ADMIN\" && -z \"$CIRCLE_PULL_REQUESTS\" ]]; then\n            export SHOULD_TEST=1\n          fi\n          if [[ \"$SHOULD_TEST\" != \"1\" ]]; then\n            circleci step halt\n          fi\n      - checkout\n      - <<: *exports\n      - attach_workspace:\n          at: /tmp/go\n      - <<: *restore_workspace\n      - setup_remote_docker\n      - run: ./script/enable-gcloud.sh $(pwd) > /dev/null\n      - run: echo \"export ESCAPED_GKE_CLUSTER=$(echo ${GKE_CLUSTER}-ci-${CIRCLE_BRANCH:-$CIRCLE_TAG} | sed 's/[^a-z0-9-]//g')\" >> $BASH_ENV\n      - run: ./script/start-gke-env.sh $ESCAPED_GKE_CLUSTER $ZONE $GKE_VERSION $GKE_ADMIN > /dev/null\n      - run: ./script/pull-or-build-image.sh function-controller\n      - run: ./script/integration-tests gke_${GKE_PROJECT}_${ZONE}_${ESCAPED_GKE_CLUSTER} deployment\n      - run: ./script/integration-tests gke_${GKE_PROJECT}_${ZONE}_${ESCAPED_GKE_CLUSTER} basic\n      - run: ./script/integration-tests gke_${GKE_PROJECT}_${ZONE}_${ESCAPED_GKE_CLUSTER} cronjob\n  push_latest_images:\n    <<: *defaults\n    docker:\n      - image: circleci/golang:1.15\n    steps:\n      - <<: *exports\n      - setup_remote_docker\n      - run: docker login -u=\"$DOCKER_USERNAME\" -p=\"$DOCKER_PASSWORD\"   \n      - run: |\n          images=( \n            $CONTROLLER_IMAGE_NAME\n            $BUILDER_IMAGE_NAME \n          )\n          for image in \"${images[@]}\"; do\n            echo \"Pulling ${image}:${CONTROLLER_TAG}\"\n            docker pull ${image}:${CONTROLLER_TAG}\n            docker tag ${image}:${CONTROLLER_TAG} ${image}:latest\n            docker push ${image}:latest\n          done\n  release:\n    <<: *defaults\n    docker:\n      - image: circleci/golang:1.15\n    steps:\n      - <<: *exports\n      - checkout\n      - attach_workspace:\n          at: /tmp/go\n      - <<: *restore_workspace\n      - run: make VERSION=${CIRCLE_TAG} binary-cross\n      - run: for d in bundles/kubeless_*; do zip -r9 $d.zip $d/; done\n      - run: ./script/create_release.sh ${CIRCLE_TAG} \"${MANIFESTS}\"\n"
  },
  {
    "path": ".github/PULL_REQUEST_TEMPLATE.md",
    "content": "**Issue Ref**: [Issue number related to this PR or None]\n \n**Description**: \n\n[PR Description]\n\n**TODOs**:\n - [ ] Ready to review\n - [ ] Automated Tests\n - [ ] Docs\n"
  },
  {
    "path": ".github/issue_template.md",
    "content": "**Is this a BUG REPORT or FEATURE REQUEST?**:\n\n**What happened**:\n\n**What you expected to happen**:\n\n**How to reproduce it (as minimally and precisely as possible)**:\n\n**Anything else we need to know?**:\n\n**Environment**:\n- Kubernetes version (use `kubectl version`):\n- Kubeless version (use `kubeless version`):\n- Cloud provider or physical cluster:\n"
  },
  {
    "path": ".gitignore",
    "content": "### Go ###\n# Binaries for programs and plugins\n*.exe\n*.dll\n*.so\n*.dylib\n\n# Test binary, build with `go test -c`\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n\n# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736\n.glide/\n\n### Linux ###\n*~\n\n# temporary files which can be created if a process still has a handle open of a deleted file\n.fuse_hidden*\n\n# KDE directory preferences\n.directory\n\n# Linux trash folder which might appear on any partition or disk\n.Trash-*\n\n# .nfs files are created when an open file is removed but is still being accessed\n.nfs*\n\n### OSX ###\n*.DS_Store\n.AppleDouble\n.LSOverride\n\n### vscode ###\n.vscode/\n\n# Icon must end with two \\r\nIcon\n\n\n# Thumbnails\n._*\n\n# Files that might appear in the root of a volume\n.DocumentRevisions-V100\n.fseventsd\n.Spotlight-V100\n.TemporaryItems\n.Trashes\n.VolumeIcon.icns\n.com.apple.timemachine.donotpresent\n\n# Directories potentially created on remote AFP share\n.AppleDB\n.AppleDesktop\nNetwork Trash Folder\nTemporary Items\n.apdisk\n\n### Vim ###\n# swap\n[._]*.s[a-v][a-z]\n[._]*.sw[a-p]\n[._]s[a-v][a-z]\n[._]sw[a-p]\n# session\nSession.vim\n# temporary\n.netrwhist\n# auto-generated tag files\ntags\n\n\n## Ignore Visual Studio temporary files, build results, and\n## files generated by popular Visual Studio add-ons.\n\n# User-specific files\n*.suo\n*.user\n*.userosscache\n*.sln.docstates\n\n# Build results\n[Dd]ebug/\n[Dd]ebugPublic/\n[Rr]elease/\n[Rr]eleases/\nx64/\nx86/\nbld/\n[Bb]in/\n[Oo]bj/\n[Ll]og/\n\n# JVM\n.classpath\n.project\n.settings\n\n# Visual Studio 2015 cache/options directory\n.vs/\n\n# .NET Core\nproject.lock.json\nproject.fragment.lock.json\nartifacts/\n**/Properties/launchSettings.json\n\n*_i.c\n*_p.c\n*_i.h\n*.ilk\n*.meta\n*.obj\n*.pch\n*.pdb\n*.pgc\n*.pgd\n*.rsp\n*.sbr\n*.tlb\n*.tli\n*.tlh\n*.tmp\n*.tmp_proj\n*.log\n*.vspscc\n*.vssscc\n.builds\n*.pidb\n*.svclog\n*.scc\n\n# NuGet Packages\n*.nupkg\n# The packages folder can be ignored because of Package Restore\n**/packages/*\n# except build/, which is used as an MSBuild target.\n!**/packages/build/\n# Uncomment if necessary however generally it will be regenerated when needed\n#!**/packages/repositories.config\n# NuGet v3's project.json files produces more ignorable files\n*.nuget.props\n*.nuget.targets\n\n# IDEA Files\n.idea/\n*.iml\n*.ipr\n*.iws\n\n# Kubeless specific\nbats/\nbundles/\ndocker/function-controller/kubeless-function-controller\ndocker/function-image-builder/imbuilder\nksonnet-lib/\nkubeless-openshift.yaml\nkubeless-non-rbac.yaml\nkubeless.yaml\nkafka-zookeeper.yaml\nkafka-zookeeper-openshift.yaml\nnats.yaml\nkinesis.yaml\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "# Contributor Covenant Code of Conduct\n\n## Our Pledge\n\nIn the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.\n\n## Our Standards\n\nExamples of behavior that contributes to creating a positive environment include:\n\n* Using welcoming and inclusive language\n* Being respectful of differing viewpoints and experiences\n* Gracefully accepting constructive criticism\n* Focusing on what is best for the community\n* Showing empathy towards other community members\n\nExamples of unacceptable behavior by participants include:\n\n* The use of sexualized language or imagery and unwelcome sexual attention or advances\n* Trolling, insulting/derogatory comments, and personal or political attacks\n* Public or private harassment\n* Publishing others' private information, such as a physical or electronic address, without explicit permission\n* Other conduct which could reasonably be considered inappropriate in a professional setting\n\n## Our Responsibilities\n\nProject maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.\n\nProject maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.\n\n## Scope\n\nThis Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.\n\n## Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at kubernetes@bitnami.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.\n\nProject maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.\n\n## Attribution\n\nThis Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]\n\n[homepage]: http://contributor-covenant.org\n[version]: http://contributor-covenant.org/version/1/4/\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing Guidelines\n\n## License and CLA\n\nThe Kubeless license is Apache Software License V2\n\nWe do not currently ask for a Contributor License Agreement to be signed.\n\n## Support Channels\n\nWhether you are a user or contributor, official support channels include:\n\n- GitHub [issues](https://github.com/kubeless/kubeless/issues/new)\n- Slack: #kubeless room in the [Kubernetes Slack](http://slack.k8s.io/)\n\nBefore opening a new issue or submitting a new pull request, it's helpful to search the project - it's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of.\n\n## How to become a contributor and submit your own code\n\n### Setup your development environment\n\nConsult the [Developer's guide](./docs/dev-guide.md) to setup yourself up.\n\n### Contributing a patch\n\n1. Submit an issue describing your proposed change to the repo in question.\n2. The [repo owners](OWNERS) will respond to your issue promptly.\n3. If your proposed change is accepted, fork the desired repo, develop and test your code changes.\n4. Submit a pull request making sure you fill up clearly the description, point out the particular\n   issue your PR is mitigating, and ask for code review. If the PR is related to Kafka, include at least the tag [Kafka] in the title. You will be asked to add tests (either unit or e2e tests depending on the patch) and update any affected documentation.\n\n## Issues\n\nIssues are used as the primary method for tracking anything to do with the Kubeless project.\n\n### Issue Type\n\n* Question: These are support or functionality inquiries that we want to have a record of for future reference. Generally these are questions that are too complex or large to store in the Slack channel or have particular interest to the community as a whole. Depending on the discussion, these can turn into \"Feature\" or \"Bug\" issues.\n* Proposal: Used for items (like this one) that propose a new ideas or functionality that require a larger community discussion. This allows for feedback from others in the community before a feature is actually developed. This is not needed for small additions. Final word on whether or not a feature needs a proposal is up to the core maintainers. All issues that are proposals should both have a label and an issue title of \"Proposal: [the rest of the title].\" A proposal can become a \"Feature\" and does not require a milestone.\n* Features: These track specific feature requests and ideas until they are complete. They can evolve from a \"Proposal\" or can be submitted individually depending on the size.\n* Bugs: These track bugs with the code or problems with the documentation (i.e. missing or incomplete)\n\n\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"{}\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright (c) 2016-2017 Bitnami\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "Makefile",
    "content": "GO = go\nGO_FLAGS =\nGOFMT = gofmt\nKUBECFG = kubecfg\nDOCKER = docker\nCONTROLLER_IMAGE = kubeless-function-controller:latest\nFUNCTION_IMAGE_BUILDER = kubeless-function-image-builder:latest\nOS = linux\nARCH = amd64\nBUNDLES = bundles\nGO_PACKAGES = ./cmd/... ./pkg/...\nGO_FILES := $(shell find $(shell $(GO) list -f '{{.Dir}}' $(GO_PACKAGES)) -name \\*.go)\n\nexport KUBECFG_JPATH := $(CURDIR)/ksonnet-lib\nexport PATH := $(PATH):$(CURDIR)/bats/bin\n\n.PHONY: all\n\nKUBELESS_ENVS := \\\n\t-e OS_PLATFORM_ARG \\\n\t-e OS_ARCH_ARG \\\n\ndefault: binary\n\nbinary:\n\tCGO_ENABLED=0 ./script/binary\n\nbinary-cross:\n\t./script/binary-cli\n\n\n%.yaml: %.jsonnet\n\t$(KUBECFG) show -U https://raw.githubusercontent.com/kubeless/runtimes/master -o yaml $< > $@.tmp\n\tmv $@.tmp $@\n\nall-yaml: kubeless.yaml kubeless-non-rbac.yaml kubeless-openshift.yaml\n\nkubeless.yaml: kubeless.jsonnet kubeless-non-rbac.jsonnet\n\nkubeless-non-rbac.yaml: kubeless-non-rbac.jsonnet\n\nkubeless-openshift.yaml: kubeless-openshift.jsonnet\n\ndocker/function-controller: controller-build\n\tcp $(BUNDLES)/kubeless_$(OS)-$(ARCH)/kubeless-function-controller $@\n\ncontroller-build:\n\t./script/binary-controller -os=$(OS) -arch=$(ARCH)\n\nfunction-controller: docker/function-controller\n\t$(DOCKER) build -t $(CONTROLLER_IMAGE) $<\n\ndocker/function-image-builder: function-image-builder-build\n\tcp $(BUNDLES)/kubeless_$(OS)-$(ARCH)/imbuilder $@\n\nfunction-image-builder-build:\n\t./script/binary-controller -os=$(OS) -arch=$(ARCH) imbuilder github.com/kubeless/kubeless/pkg/function-image-builder\n\nfunction-image-builder: docker/function-image-builder\n\t$(DOCKER) build -t $(FUNCTION_IMAGE_BUILDER) $<\n\nupdate:\n\t./hack/update-codegen.sh\n\ntest:\n\t$(GO) test $(GO_FLAGS) $(GO_PACKAGES)\n\nvalidation:\n\t./script/validate-lint\n\t./script/validate-gofmt\n\t./script/validate-git-marks\n\nintegration-tests:\n\t./script/integration-tests minikube deployment\n\t./script/integration-tests minikube basic\n\nfmt:\n\t$(GOFMT) -s -w $(GO_FILES)\n\nbats:\n\tgit clone --branch=v0.4.0 --depth=1 https://github.com/sstephenson/bats.git\n\nksonnet-lib:\n\tgit clone --depth=1 https://github.com/ksonnet/ksonnet-lib.git\n\n.PHONY: bootstrap\nbootstrap: bats ksonnet-lib\n\n\tGO111MODULE=\"off\" go get -u github.com/mitchellh/gox\n\tGO111MODULE=\"off\" go get -u golang.org/x/lint/golint\n\n\t@if ! which kubecfg >/dev/null; then \\\n\tsudo wget -q -O /usr/local/bin/kubecfg https://github.com/ksonnet/kubecfg/releases/download/v0.9.0/kubecfg-$$(go env GOOS)-$$(go env GOARCH); \\\n\tsudo chmod +x /usr/local/bin/kubecfg; \\\n\tfi\n\n\t@if ! which kubectl >/dev/null; then \\\n\tKUBECTL_VERSION=$$(wget -qO- https://storage.googleapis.com/kubernetes-release/release/stable.txt); \\\n\tsudo wget -q -O /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/$$KUBECTL_VERSION/bin/$$(go env GOOS)/$$(go env GOARCH)/kubectl; \\\n\tsudo chmod +x /usr/local/bin/kubectl; \\\n\tfi\n"
  },
  {
    "path": "OWNERS",
    "content": "Kubeless - A Bitnami Project\n\nEngineering manager:\n  - ppbaena\n\nEmeritus maintainers:\n  - ngtuna\n  - andresmgot\n  - anguslees\n  - sebgoa\n\n"
  },
  {
    "path": "README.md",
    "content": "# <img src=\"https://cloud.githubusercontent.com/assets/4056725/25480209/1d5bf83c-2b48-11e7-8db8-bcd650f31297.png\" alt=\"Kubeless logo\" width=\"400\">\n\n[![CircleCI](https://circleci.com/gh/kubeless/kubeless.svg?style=svg)](https://circleci.com/gh/kubeless/kubeless)\n[![Slack](https://img.shields.io/badge/slack-join%20chat%20%E2%86%92-e01563.svg)](http://slack.k8s.io)\n[![Not Maintained](https://img.shields.io/badge/Maintenance%20Level-Not%20Maintained-yellow.svg)](https://gist.github.com/cheerfulstoic/d107229326a01ff0f333a1d3476e068d)\n\n## WARNING: Kubeless is no longer actively maintained by VMware.\n\nVMware has made the difficult decision to stop driving this project and therefore we will no longer actively respond to issues or pull requests. If you would like to take over maintaining this project independently from VMware, please let us know so we can add a link to your forked project here.\n\nThank You.\n\n## Overview\n\n`kubeless` is a Kubernetes-native serverless framework that lets you deploy small bits of code without having to worry about the underlying infrastructure plumbing. It leverages Kubernetes resources to provide auto-scaling, API routing, monitoring, troubleshooting and more.\n\nKubeless stands out as we use a [Custom Resource Definition](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) to be able to create functions as custom kubernetes resources. We then run an in-cluster controller that watches these custom resources and launches _runtimes_ on-demand. The controller dynamically injects the functions code into the runtimes and make them available over HTTP or via a PubSub mechanism.\n\nKubeless is purely open-source and non-affiliated to any commercial organization. Chime in at anytime, we would love the help and feedback !\n\n## Tools\n\n- A [UI](https://github.com/kubeless/kubeless-ui) is available. It can run locally or in-cluster.\n- A [serverless framework plugin](https://github.com/serverless/serverless-kubeless) is available.\n\n## Quick start\n\nCheck out the instructions for quickly set up Kubeless [here](http://kubeless.io/docs/quick-start).\n\n## Building\n\nConsult the [developer's guide](docs/dev-guide.md) for a complete set of instruction\nto build kubeless.\n\n## Compatibility Matrix with Kubernetes\n\nKubeless fully supports Kubernetes versions greater than 1.9 (tested until 1.15). For other versions some of the features in Kubeless may not be available. Our CI run tests against two different platforms: GKE (1.12) and Minikube (1.15). Other platforms are supported but fully compatibiliy cannot be assured.\n\n## _Roadmap_\n\nWe would love to get your help, feel free to lend a hand. We are currently looking to implement the following high level features:\n\n- Add other runtimes, currently Golang, Python, NodeJS, Ruby, PHP, .NET and Ballerina are supported. We are also providing a way to use custom runtime. Please check [this doc](./docs/runtimes.md) for more details.\n- Investigate other messaging bus (e.g SQS, rabbitMQ)\n- Optimize for functions startup time\n- Add distributed tracing (maybe using istio)\n\n## Community\n\n**Issues**: If you find any issues, please [file it](https://github.com/kubeless/kubeless/issues).\n\n**Slack**: We're fairly active on [slack](http://slack.k8s.io) and you can find us in the #kubeless channel.\n"
  },
  {
    "path": "cmd/function-controller/function-controller.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Kubeless controller binary.\n//\n// See github.com/kubeless/kubeless/tree/master/pkg/controller\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os/signal\"\n\t\"syscall\"\n\n\tmonitoringv1alpha1 \"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1\"\n\t\"github.com/kubeless/kubeless/pkg/controller\"\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/kubeless/kubeless/pkg/version\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n)\n\nconst (\n\tglobalUsage = `` //TODO: adding explanation\n)\n\nvar rootCmd = &cobra.Command{\n\tUse:   \"kubeless-controller\",\n\tShort: \"Kubeless controller\",\n\tLong:  globalUsage,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tkubelessClient, err := utils.GetFunctionClientInCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Cannot get kubeless client: %v\", err)\n\t\t}\n\n\t\tfunctionCfg := controller.Config{\n\t\t\tKubeCli:        utils.GetClient(),\n\t\t\tFunctionClient: kubelessClient,\n\t\t}\n\n\t\trestCfg, err := utils.GetInClusterConfig()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Cannot get REST client: %v\", err)\n\t\t}\n\t\t// ServiceMonitor client is needed for handling monitoring resources\n\t\tsmclient, err := monitoringv1alpha1.NewForConfig(restCfg)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tfunctionController := controller.NewFunctionController(functionCfg, smclient)\n\n\t\tstopCh := make(chan struct{})\n\t\tdefer close(stopCh)\n\n\t\tgo functionController.Run(stopCh)\n\n\t\tsigterm := make(chan os.Signal, 1)\n\t\tsignal.Notify(sigterm, syscall.SIGTERM)\n\t\tsignal.Notify(sigterm, syscall.SIGINT)\n\t\t<-sigterm\n\t},\n}\n\nfunc main() {\n\tlogrus.Infof(\"Running Kubeless controller manager version: %v\", version.Version)\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}\n"
  },
  {
    "path": "cmd/kubeless/autoscale/autoscale.go",
    "content": "/*\nCopyright 2016 Skippbox, Ltd.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n    http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage autoscale\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com/spf13/cobra\"\n\t\"k8s.io/api/autoscaling/v2beta1\"\n\t\"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\n// AutoscaleCmd contains first-class command for autoscale\nvar AutoscaleCmd = &cobra.Command{\n\tUse:   \"autoscale SUBCOMMAND\",\n\tShort: \"manage autoscale to function on Kubeless\",\n\tLong:  `autoscale command allows user to list, create, delete autoscale rule for function on Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Help()\n\t},\n}\n\nfunc init() {\n\tcmds := []*cobra.Command{autoscaleCreateCmd, autoscaleListCmd, autoscaleDeleteCmd}\n\n\tfor _, cmd := range cmds {\n\t\tAutoscaleCmd.AddCommand(cmd)\n\t\tcmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the autoscale\")\n\n\t}\n}\n\nfunc getHorizontalAutoscaleDefinition(name, ns, metric string, min, max int32, value string, labels map[string]string) (v2beta1.HorizontalPodAutoscaler, error) {\n\tm := []v2beta1.MetricSpec{}\n\tswitch metric {\n\tcase \"cpu\":\n\t\ti, err := strconv.ParseInt(value, 10, 32)\n\t\tif err != nil {\n\t\t\treturn v2beta1.HorizontalPodAutoscaler{}, err\n\t\t}\n\t\ti32 := int32(i)\n\t\tm = []v2beta1.MetricSpec{\n\t\t\t{\n\t\t\t\tType: v2beta1.ResourceMetricSourceType,\n\t\t\t\tResource: &v2beta1.ResourceMetricSource{\n\t\t\t\t\tName:                     v1.ResourceCPU,\n\t\t\t\t\tTargetAverageUtilization: &i32,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\tcase \"qps\":\n\t\tq, err := resource.ParseQuantity(value)\n\t\tif err != nil {\n\t\t\treturn v2beta1.HorizontalPodAutoscaler{}, err\n\t\t}\n\t\tm = []v2beta1.MetricSpec{\n\t\t\t{\n\t\t\t\tType: v2beta1.ObjectMetricSourceType,\n\t\t\t\tObject: &v2beta1.ObjectMetricSource{\n\t\t\t\t\tMetricName:  \"function_calls\",\n\t\t\t\t\tTargetValue: q,\n\t\t\t\t\tTarget: v2beta1.CrossVersionObjectReference{\n\t\t\t\t\t\tKind: \"Service\",\n\t\t\t\t\t\tName: name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif err != nil {\n\t\t\treturn v2beta1.HorizontalPodAutoscaler{}, err\n\t\t}\n\tdefault:\n\t\treturn v2beta1.HorizontalPodAutoscaler{}, fmt.Errorf(\"metric %s is not supported\", metric)\n\t}\n\n\treturn v2beta1.HorizontalPodAutoscaler{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"autoscaling/v2beta1\",\n\t\t\tKind:       \"HorizontalPodAutoscaler\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      name,\n\t\t\tNamespace: ns,\n\t\t\tLabels:    labels,\n\t\t},\n\t\tSpec: v2beta1.HorizontalPodAutoscalerSpec{\n\t\t\tScaleTargetRef: v2beta1.CrossVersionObjectReference{\n\t\t\t\tAPIVersion: \"apps/v1beta1\",\n\t\t\t\tKind:       \"Deployment\",\n\t\t\t\tName:       name,\n\t\t\t},\n\t\t\tMinReplicas: &min,\n\t\t\tMaxReplicas: max,\n\t\t\tMetrics:     m,\n\t\t},\n\t}, nil\n}\n"
  },
  {
    "path": "cmd/kubeless/autoscale/autoscaleCreate.go",
    "content": "package autoscale\n\nimport (\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n)\n\nvar autoscaleCreateCmd = &cobra.Command{\n\tUse:   \"create <name> FLAG\",\n\tShort: \"automatically scale function based on monitored metrics\",\n\tLong:  `automatically scale function based on monitored metrics`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - function name\")\n\t\t}\n\t\tfuncName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = utils.GetDefaultNamespace()\n\t\t}\n\n\t\tfunction, err := utils.GetFunction(funcName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to find the function %s. Received %s: \", funcName, err)\n\t\t}\n\n\t\tmin, err := cmd.Flags().GetInt32(\"min\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t} else if min <= 0 {\n\t\t\tlogrus.Fatalf(\"min can't be negative or zero\")\n\t\t}\n\t\tmax, err := cmd.Flags().GetInt32(\"max\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t} else if max < min {\n\t\t\tlogrus.Fatalf(\"max must be greater than or equal to min\")\n\t\t}\n\n\t\tmetric, err := cmd.Flags().GetString(\"metric\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif metric != \"cpu\" && metric != \"qps\" {\n\t\t\tlogrus.Fatalf(\"only supported metrics: cpu, qps\")\n\t\t}\n\n\t\tvalue, err := cmd.Flags().GetString(\"value\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\thpa, err := getHorizontalAutoscaleDefinition(funcName, ns, metric, min, max, value, function.ObjectMeta.Labels)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tfunction.Spec.HorizontalPodAutoscaler = hpa\n\n\t\tkubelessClient, err := utils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tlogrus.Infof(\"Adding autoscaling rule to the function...\")\n\t\terr = utils.UpdateFunctionCustomResource(kubelessClient, &function)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tlogrus.Infof(\"Autoscaling rule for %s submitted for deployment\", funcName)\n\t},\n}\n\nfunc init() {\n\tautoscaleCreateCmd.Flags().Int32(\"min\", 1, \"minimum number of replicas\")\n\tautoscaleCreateCmd.Flags().Int32(\"max\", 1, \"maximum number of replicas\")\n\tautoscaleCreateCmd.Flags().String(\"metric\", \"cpu\", \"metric to use for calculating the autoscale. Supported metrics: cpu, qps\")\n\tautoscaleCreateCmd.Flags().String(\"value\", \"\", \"value of the average of the metric across all replicas. If metric is cpu, value is a number represented as percentage. If metric is qps, value must be in format of Quantity\")\n\tautoscaleCreateCmd.MarkFlagRequired(\"value\")\n}\n"
  },
  {
    "path": "cmd/kubeless/autoscale/autoscaleDelete.go",
    "content": "/*\nCopyright 2016 Skippbox, Ltd.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n    http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage autoscale\n\nimport (\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"k8s.io/api/autoscaling/v2beta1\"\n)\n\nvar autoscaleDeleteCmd = &cobra.Command{\n\tUse:   \"delete <name>\",\n\tShort: \"delete an autoscale from Kubeless\",\n\tLong:  `delete an autoscale from Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - autoscale name\")\n\t\t}\n\t\tfuncName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = utils.GetDefaultNamespace()\n\t\t}\n\n\t\tfunction, err := utils.GetFunction(funcName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to find the function %s. Received %s: \", funcName, err)\n\t\t}\n\n\t\tif function.Spec.HorizontalPodAutoscaler.Name != \"\" {\n\t\t\tfunction.Spec.HorizontalPodAutoscaler = v2beta1.HorizontalPodAutoscaler{}\n\t\t\tkubelessClient, err := utils.GetKubelessClientOutCluster()\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tlogrus.Infof(\"Removing autoscaling rule from the function...\")\n\t\t\terr = utils.UpdateFunctionCustomResource(kubelessClient, &function)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tlogrus.Infof(\"Remove Autoscaling rule from %s successfully\", funcName)\n\t\t} else {\n\t\t\tlogrus.Fatalf(\"Not found an autoscale definition for %s\", funcName)\n\t\t}\n\t},\n}\n"
  },
  {
    "path": "cmd/kubeless/autoscale/autoscaleList.go",
    "content": "/*\nCopyright 2016 Skippbox, Ltd.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n    http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage autoscale\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/gosuri/uitable\"\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"k8s.io/api/autoscaling/v2beta1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\nvar autoscaleListCmd = &cobra.Command{\n\tUse:     \"list FLAG\",\n\tAliases: []string{\"ls\"},\n\tShort:   \"list all autoscales in Kubeless\",\n\tLong:    `list all autoscales in Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\toutput, err := cmd.Flags().GetString(\"out\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = utils.GetDefaultNamespace()\n\t\t}\n\n\t\tclient := utils.GetClientOutOfCluster()\n\n\t\tif err := doAutoscaleList(cmd.OutOrStdout(), client, ns, output); err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t},\n}\n\nfunc init() {\n\tautoscaleListCmd.Flags().StringP(\"out\", \"o\", \"\", \"Output format. One of: json|yaml\")\n}\n\nfunc doAutoscaleList(w io.Writer, client kubernetes.Interface, ns, output string) error {\n\tasList, err := client.AutoscalingV2beta1().HorizontalPodAutoscalers(ns).List(metav1.ListOptions{\n\t\tLabelSelector: \"created-by=kubeless\",\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn printAutoscale(w, asList.Items, output)\n}\n\n// printAutoscale formats the output of autoscale list\nfunc printAutoscale(w io.Writer, ass []v2beta1.HorizontalPodAutoscaler, output string) error {\n\tif output == \"\" {\n\t\ttable := uitable.New()\n\t\ttable.MaxColWidth = 50\n\t\ttable.Wrap = true\n\t\ttable.AddRow(\"NAME\", \"NAMESPACE\", \"TARGET\", \"MIN\", \"MAX\", \"METRIC\", \"VALUE\")\n\t\tfor _, i := range ass {\n\t\t\tn := i.Name\n\t\t\tns := i.Namespace\n\t\t\tta := i.Spec.ScaleTargetRef.Name\n\t\t\tmin := i.Spec.MinReplicas\n\t\t\tmax := i.Spec.MaxReplicas\n\t\t\tm := \"\"\n\t\t\tv := \"\"\n\t\t\tif len(i.Spec.Metrics) == 0 {\n\t\t\t\tlogrus.Errorf(\"The autoscale %s has bad format. It has no metric defined.\", i.Name)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif i.Spec.Metrics[0].Object != nil {\n\t\t\t\tm = i.Spec.Metrics[0].Object.MetricName\n\t\t\t\tv = i.Spec.Metrics[0].Object.TargetValue.String()\n\t\t\t} else if i.Spec.Metrics[0].Resource != nil {\n\t\t\t\tm = string(i.Spec.Metrics[0].Resource.Name)\n\t\t\t\tv = fmt.Sprint(*i.Spec.Metrics[0].Resource.TargetAverageUtilization)\n\t\t\t}\n\n\t\t\ttable.AddRow(n, ns, ta, fmt.Sprint(*min), fmt.Sprint(max), m, v)\n\t\t}\n\t\tfmt.Fprintln(w, table)\n\t} else {\n\t\tfor _, i := range ass {\n\t\t\tswitch output {\n\t\t\tcase \"json\":\n\t\t\t\tb, err := json.MarshalIndent(i, \"\", \"  \")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfmt.Fprintln(w, string(b))\n\t\t\tcase \"yaml\":\n\t\t\t\tb, err := yaml.Marshal(i)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfmt.Fprintln(w, string(b))\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"Wrong output format. Only accept json|yaml file\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/kubeless/autoscale/autoscaleList_test.go",
    "content": "package autoscale\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\t\"testing\"\n\n\tav2alpha1 \"k8s.io/api/autoscaling/v2beta1\"\n\t\"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\nfunc listAutoscaleOutput(t *testing.T, client kubernetes.Interface, ns, output string) string {\n\tvar buf bytes.Buffer\n\n\tif err := doAutoscaleList(&buf, client, ns, output); err != nil {\n\t\tt.Fatalf(\"doList returned error: %v\", err)\n\t}\n\n\treturn buf.String()\n}\n\nfunc TestAutoscaleList(t *testing.T) {\n\treplicas := int32(1)\n\ttargetAverageUtilization := int32(50)\n\tq, _ := resource.ParseQuantity(\"10k\")\n\n\tas1 := av2alpha1.HorizontalPodAutoscaler{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"foo\",\n\t\t\tNamespace: \"myns\",\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"created-by\": \"kubeless\",\n\t\t\t},\n\t\t},\n\t\tSpec: av2alpha1.HorizontalPodAutoscalerSpec{\n\t\t\tScaleTargetRef: av2alpha1.CrossVersionObjectReference{\n\t\t\t\tKind: \"Deployment\",\n\t\t\t\tName: \"foo\",\n\t\t\t},\n\t\t\tMinReplicas: &replicas,\n\t\t\tMaxReplicas: replicas,\n\t\t\tMetrics: []av2alpha1.MetricSpec{\n\t\t\t\t{\n\t\t\t\t\tType: av2alpha1.ResourceMetricSourceType,\n\t\t\t\t\tResource: &av2alpha1.ResourceMetricSource{\n\t\t\t\t\t\tName:                     v1.ResourceCPU,\n\t\t\t\t\t\tTargetAverageUtilization: &targetAverageUtilization,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tas2 := av2alpha1.HorizontalPodAutoscaler{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"bar\",\n\t\t\tNamespace: \"myns\",\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"created-by\": \"kubeless\",\n\t\t\t},\n\t\t},\n\t\tSpec: av2alpha1.HorizontalPodAutoscalerSpec{\n\t\t\tScaleTargetRef: av2alpha1.CrossVersionObjectReference{\n\t\t\t\tKind: \"Deployment\",\n\t\t\t\tName: \"foo\",\n\t\t\t},\n\t\t\tMinReplicas: &replicas,\n\t\t\tMaxReplicas: replicas,\n\t\t\tMetrics: []av2alpha1.MetricSpec{\n\t\t\t\t{\n\t\t\t\t\tType: av2alpha1.ObjectMetricSourceType,\n\t\t\t\t\tObject: &av2alpha1.ObjectMetricSource{\n\t\t\t\t\t\tMetricName:  \"function_calls\",\n\t\t\t\t\t\tTargetValue: q,\n\t\t\t\t\t\tTarget: av2alpha1.CrossVersionObjectReference{\n\t\t\t\t\t\t\tKind: \"Service\",\n\t\t\t\t\t\t\tName: \"foo\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tas3 := av2alpha1.HorizontalPodAutoscaler{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"foobar\",\n\t\t\tNamespace: \"myns\",\n\t\t},\n\t}\n\n\tclient := fake.NewSimpleClientset(&as1, &as2, &as3)\n\n\toutput := listAutoscaleOutput(t, client, \"myns\", \"\")\n\tt.Log(\"output is\", output)\n\n\tif !strings.Contains(output, \"foo\") || !strings.Contains(output, \"bar\") {\n\t\tt.Errorf(\"table output didn't mention both autoscales\")\n\t}\n\n\tif strings.Contains(output, \"foobar\") {\n\t\tt.Errorf(\"table output shouldn't mention foobar autoscale as it isn't created by kubeless\")\n\t}\n\n\t// json output\n\toutput = listAutoscaleOutput(t, client, \"myns\", \"json\")\n\tt.Log(\"output is\", output)\n\n\t// yaml output\n\toutput = listAutoscaleOutput(t, client, \"myns\", \"yaml\")\n\tt.Log(\"output is\", output)\n}\n"
  },
  {
    "path": "cmd/kubeless/autoscale/autoscale_test.go",
    "content": "package autoscale\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"k8s.io/api/autoscaling/v2beta1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nfunc TestGetHorizontalAutoscaleDefinition(t *testing.T) {\n\tvar min, max int32\n\tmin = 1\n\tmax = 3\n\tfuncName := \"test-autoscale\"\n\tns := \"default\"\n\tvalue := \"10\"\n\tlabels := map[string]string{\n\t\t\"foo\": \"bar\",\n\t}\n\tmetric := \"cpu\"\n\thpa, err := getHorizontalAutoscaleDefinition(funcName, ns, metric, min, max, value, labels)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error %v\", err)\n\t}\n\texpectedMeta := metav1.ObjectMeta{\n\t\tName:      funcName,\n\t\tNamespace: ns,\n\t\tLabels:    labels,\n\t}\n\tif hpa.Spec.ScaleTargetRef.Name != funcName {\n\t\tt.Fatalf(\"Creating wrong scale target name\")\n\t}\n\tif !reflect.DeepEqual(expectedMeta, hpa.ObjectMeta) {\n\t\tt.Errorf(\"Expected \\n%v to be equal to \\n%v\", expectedMeta, hpa.ObjectMeta)\n\t}\n\tif *hpa.Spec.MinReplicas != min {\n\t\tt.Errorf(\"Unexpected min replicas. Expecting %d got %d\", min, *hpa.Spec.MinReplicas)\n\t}\n\tif hpa.Spec.MaxReplicas != max {\n\t\tt.Errorf(\"Unexpected max replicas. Expecting %d got %d\", max, hpa.Spec.MaxReplicas)\n\t}\n\tif hpa.Spec.Metrics[0].Type != v2beta1.ResourceMetricSourceType ||\n\t\t*hpa.Spec.Metrics[0].Resource.TargetAverageUtilization != int32(10) {\n\t\tt.Error(\"Unexpected metric\")\n\t}\n\n\tmetric = \"qps\"\n\thpa, err = getHorizontalAutoscaleDefinition(funcName, ns, metric, min, max, value, labels)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error %v\", err)\n\t}\n\tif hpa.Spec.Metrics[0].Type != v2beta1.ObjectMetricSourceType ||\n\t\thpa.Spec.Metrics[0].Object.TargetValue.String() != \"10\" {\n\t\tt.Error(\"Unexpected metric\")\n\t}\n}\n"
  },
  {
    "path": "cmd/kubeless/completion/completion.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage completion\n\nimport (\n\t\"github.com/spf13/cobra\"\n\t\"os\"\n)\n\n// CompletionCmd contains first-class command for completion\nvar CompletionCmd = &cobra.Command{\n\tUse:   \"completion [bash|zsh|fish|powershell]\",\n\tShort: \"Generate completion script\",\n\tLong: `To load completions:\n\nBash:\n\n$ source <(kubeless completion bash)\n\n# To load completions for each session, execute once:\nLinux:\n  $ kubeless completion bash > /etc/bash_completion.d/kubeless\nMacOS:\n  $ kubeless completion bash > /usr/local/etc/bash_completion.d/kubeless\n\nZsh:\n\n# If shell completion is not already enabled in your environment you will need\n# to enable it.  You can execute the following once:\n\n$ echo \"autoload -U compinit; compinit\" >> ~/.zshrc\n\n# To load completions for each session, execute once:\n$ kubeless completion zsh > \"${fpath[1]}/_kubeless\"\n\n# You will need to start a new shell for this setup to take effect.\n\nFish:\n\n$ kubeless completion fish | source\n\n# To load completions for each session, execute once:\n$ kubeless completion fish > ~/.config/fish/completions/kubeless.fish\n`,\n\tDisableFlagsInUseLine: true,\n\tValidArgs:             []string{\"bash\", \"zsh\", \"fish\", \"powershell\"},\n\tArgs:                  cobra.ExactValidArgs(1),\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tswitch args[0] {\n\t\tcase \"bash\":\n\t\t\tcmd.Root().GenBashCompletion(os.Stdout)\n\t\tcase \"zsh\":\n\t\t\tcmd.Root().GenZshCompletion(os.Stdout)\n\t\tcase \"fish\":\n\t\t\tcmd.Root().GenFishCompletion(os.Stdout, true)\n\t\tcase \"powershell\":\n\t\t\tcmd.Root().GenPowerShellCompletion(os.Stdout)\n\t\t}\n\t},\n}\n"
  },
  {
    "path": "cmd/kubeless/function/call.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage function\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/rest\"\n)\n\nvar callCmd = &cobra.Command{\n\tUse:   \"call <function_name> FLAG\",\n\tShort: \"call function from cli\",\n\tLong:  `call function from cli`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tvar (\n\t\t\tstr []byte\n\t\t\tget bool = false\n\t\t)\n\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - function name\")\n\t\t}\n\t\tfuncName := args[0]\n\n\t\tdata, err := cmd.Flags().GetString(\"data\")\n\t\tif data == \"\" {\n\t\t\tget = true\n\t\t} else {\n\t\t\tstr = []byte(data)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = utils.GetDefaultNamespace()\n\t\t}\n\n\t\tclientset := utils.GetClientOutOfCluster()\n\t\tsvc, err := clientset.CoreV1().Services(ns).Get(funcName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to find the service for %s\", funcName)\n\t\t}\n\n\t\tport := strconv.Itoa(int(svc.Spec.Ports[0].Port))\n\t\tif svc.Spec.Ports[0].Name != \"\" {\n\t\t\tport = svc.Spec.Ports[0].Name\n\t\t}\n\n\t\treq := &rest.Request{}\n\t\tif get {\n\t\t\treq = clientset.CoreV1().RESTClient().Get().Namespace(ns).Resource(\"services\").SubResource(\"proxy\").Name(funcName + \":\" + port)\n\t\t} else {\n\t\t\treq = clientset.CoreV1().RESTClient().Post().Namespace(ns).Resource(\"services\").SubResource(\"proxy\").Name(funcName + \":\" + port).Body(bytes.NewBuffer(str))\n\t\t\tif utils.IsJSON(string(str)) {\n\t\t\t\treq.SetHeader(\"Content-Type\", \"application/json\")\n\t\t\t\treq.SetHeader(\"event-type\", \"application/json\")\n\t\t\t} else {\n\t\t\t\treq.SetHeader(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t\t\t\treq.SetHeader(\"event-type\", \"application/x-www-form-urlencoded\")\n\t\t\t}\n\t\t\t// REST package removes trailing slash when building URLs\n\t\t\t// Causing POST requests to be redirected with an empty body\n\t\t\t// So we need to manually build the URL\n\t\t\treq = req.AbsPath(req.URL().Path + \"/\")\n\t\t}\n\t\ttimestamp := time.Now().UTC()\n\t\teventID, err := utils.GetRandString(11)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to generate ID %v\", err)\n\t\t}\n\t\treq.SetHeader(\"event-id\", eventID)\n\t\treq.SetHeader(\"event-time\", timestamp.Format(time.RFC3339))\n\t\treq.SetHeader(\"event-namespace\", \"cli.kubeless.io\")\n\t\tres, err := req.Do().Raw()\n\t\tif err != nil {\n\t\t\t// Properly interpret line breaks\n\t\t\tlogrus.Error(string(res))\n\t\t\tif strings.Contains(err.Error(), \"status code 408\") {\n\t\t\t\t// Give a more meaninful error for timeout errors\n\t\t\t\tlogrus.Fatal(\"Request timeout exceeded\")\n\t\t\t} else {\n\t\t\t\tlogrus.Fatal(strings.Replace(err.Error(), `\\n`, \"\\n\", -1))\n\t\t\t}\n\t\t}\n\t\tfmt.Println(string(res))\n\t},\n}\n\nfunc init() {\n\tcallCmd.Flags().StringP(\"data\", \"d\", \"\", \"Specify data for function\")\n\tcallCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n\n}\n"
  },
  {
    "path": "cmd/kubeless/function/delete.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage function\n\nimport (\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n)\n\nvar deleteCmd = &cobra.Command{\n\tUse:   \"delete <function_name>\",\n\tShort: \"delete a function from Kubeless\",\n\tLong:  `delete a function from Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - function name\")\n\t\t}\n\t\tfuncName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = utils.GetDefaultNamespace()\n\t\t}\n\n\t\tkubelessClient, err := utils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\terr = utils.DeleteFunctionCustomResource(kubelessClient, funcName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t},\n}\n\nfunc init() {\n\tdeleteCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n}\n"
  },
  {
    "path": "cmd/kubeless/function/deploy.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage function\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/ghodss/yaml\"\n\tcronjobApi \"github.com/kubeless/cronjob-trigger/pkg/apis/kubeless/v1beta1\"\n\tcronjobUtils \"github.com/kubeless/cronjob-trigger/pkg/utils\"\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/langruntime\"\n\tkubelessutil \"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/robfig/cron\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar deployCmd = &cobra.Command{\n\tUse:   \"deploy <function_name> FLAG\",\n\tShort: \"deploy a function to Kubeless\",\n\tLong:  `deploy a function to Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcli := kubelessutil.GetClientOutOfCluster()\n\t\tapiExtensionsClientset := kubelessutil.GetAPIExtensionsClientOutOfCluster()\n\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - function name\")\n\t\t}\n\t\tfuncName := args[0]\n\n\t\truntime, err := cmd.Flags().GetString(\"runtime\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\t// Checking runtime parameter if allowed by RBAC, otherwide skip the check\n\t\tconfig, err := kubelessutil.GetKubelessConfig(cli, apiExtensionsClientset)\n\t\tif config == nil || err != nil {\n\t\t\tlogrus.Warnf(\"%v. Runtime check is disabled.\", err)\n\t\t} else {\n\t\t\tlr := langruntime.New(config)\n\t\t\tlr.ReadConfigMap()\n\n\t\t\tif runtime != \"\" && !lr.IsValidRuntime(runtime) {\n\t\t\t\tlogrus.Fatalf(\"Invalid runtime: %s. Supported runtimes are: %s\",\n\t\t\t\t\truntime, strings.Join(lr.GetRuntimes(), \", \"))\n\t\t\t}\n\t\t}\n\n\t\tschedule, err := cmd.Flags().GetString(\"schedule\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif schedule != \"\" {\n\t\t\tif _, err := cron.ParseStandard(schedule); err != nil {\n\t\t\t\tlogrus.Fatalf(\"Invalid value for --schedule. \" + err.Error())\n\t\t\t}\n\t\t}\n\n\t\tlabels, err := cmd.Flags().GetStringSlice(\"label\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tenvs, err := cmd.Flags().GetStringSlice(\"env\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\thandler, err := cmd.Flags().GetString(\"handler\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tfile, err := cmd.Flags().GetString(\"from-file\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tvar nsArg string\n\t\tif ns == \"\" {\n\t\t\tns = kubelessutil.GetDefaultNamespace()\n\t\t} else {\n\t\t\tnsArg = fmt.Sprintf(\" -n %s\", ns)\n\t\t}\n\n\t\tdeps, err := cmd.Flags().GetString(\"dependencies\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tsecrets, err := cmd.Flags().GetStringSlice(\"secrets\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tserviceAccount, err := cmd.Flags().GetString(\"service-account\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\truntimeImage, err := cmd.Flags().GetString(\"runtime-image\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\timagePullPolicy, err := cmd.Flags().GetString(\"image-pull-policy\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif imagePullPolicy != \"IfNotPresent\" && imagePullPolicy != \"Always\" && imagePullPolicy != \"Never\" {\n\t\t\terr := fmt.Errorf(\"image-pull-policy must be {IfNotPresent|Always|Never}\")\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tmem, err := cmd.Flags().GetString(\"memory\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tcpu, err := cmd.Flags().GetString(\"cpu\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\ttimeout, err := cmd.Flags().GetString(\"timeout\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"output\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\theadless, err := cmd.Flags().GetBool(\"headless\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tdryrun, err := cmd.Flags().GetBool(\"dryrun\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tport, err := cmd.Flags().GetInt32(\"port\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif port <= 0 || port > 65535 {\n\t\t\tlogrus.Fatalf(\"Invalid port number %d specified\", port)\n\t\t}\n\n\t\tservicePort, err := cmd.Flags().GetInt32(\"servicePort\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif servicePort < 0 || servicePort > 65535 {\n\t\t\tlogrus.Fatalf(\"Invalid servicePort number %d specified\", servicePort)\n\t\t}\n\n\t\tfuncDeps := \"\"\n\t\tif deps != \"\" {\n\t\t\tcontentType, err := kubelessutil.GetContentType(deps)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tfuncDeps, _, err = kubelessutil.ParseContent(deps, contentType)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\tif runtime == \"\" && runtimeImage == \"\" {\n\t\t\tlogrus.Fatal(\"Either `--runtime` or `--runtime-image` flag must be specified.\")\n\t\t}\n\n\t\tif runtime != \"\" && handler == \"\" {\n\t\t\tlogrus.Fatal(\"You must specify handler for the runtime.\")\n\t\t}\n\n\t\tnodeSelectors, err := cmd.Flags().GetStringSlice(\"node-selectors\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tdefaultFunctionSpec := kubelessApi.Function{}\n\t\tdefaultFunctionSpec.ObjectMeta.Labels = map[string]string{\n\t\t\t\"created-by\": \"kubeless\",\n\t\t\t\"function\":   funcName,\n\t\t}\n\n\t\tf, err := getFunctionDescription(funcName, ns, handler, file, funcDeps, runtime, runtimeImage, mem, cpu, timeout, imagePullPolicy, serviceAccount, port, servicePort, headless, envs, labels, secrets, nodeSelectors, defaultFunctionSpec)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif dryrun == true {\n\t\t\tif output == \"json\" {\n\t\t\t\tj, err := json.MarshalIndent(f, \"\", \"    \")\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\t\t\t\tfmt.Println(string(j[:]))\n\t\t\t\treturn\n\t\t\t} else if output == \"yaml\" {\n\t\t\t\ty, err := yaml.Marshal(f)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\t\t\t\tfmt.Println(string(y[:]))\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tlogrus.Infof(\"Output format needs to be yaml or json\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tkubelessClient, err := kubelessutil.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tlogrus.Infof(\"Deploying function...\")\n\t\terr = kubelessutil.CreateFunctionCustomResource(kubelessClient, f)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to deploy %s. Received:\\n%s\", funcName, err)\n\t\t}\n\t\tlogrus.Infof(\"Function %s submitted for deployment\", funcName)\n\t\tlogrus.Infof(\"Check the deployment status executing 'kubeless function ls %s%s'\", funcName, nsArg)\n\n\t\tif schedule != \"\" {\n\t\t\tcronJobTrigger := cronjobApi.CronJobTrigger{}\n\t\t\tcronJobTrigger.TypeMeta = metav1.TypeMeta{\n\t\t\t\tKind:       \"CronJobTrigger\",\n\t\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t\t}\n\t\t\tcronJobTrigger.ObjectMeta = metav1.ObjectMeta{\n\t\t\t\tName:      funcName,\n\t\t\t\tNamespace: ns,\n\t\t\t}\n\t\t\tcronJobTrigger.ObjectMeta.Labels = map[string]string{\n\t\t\t\t\"created-by\": \"kubeless\",\n\t\t\t\t\"function\":   funcName,\n\t\t\t}\n\t\t\tcronJobTrigger.Spec.FunctionName = funcName\n\t\t\tcronJobTrigger.Spec.Schedule = schedule\n\t\t\tcronjobClient, err := cronjobUtils.GetKubelessClientOutCluster()\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\terr = cronjobUtils.CreateCronJobCustomResource(cronjobClient, &cronJobTrigger)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatalf(\"Failed to deploy cron job trigger %s. Received:\\n%s\", funcName, err)\n\t\t\t}\n\t\t}\n\t},\n}\n\nfunc init() {\n\tdeployCmd.Flags().StringP(\"runtime\", \"r\", \"\", \"Specify runtime\")\n\tdeployCmd.Flags().StringP(\"handler\", \"\", \"\", \"Specify handler\")\n\tdeployCmd.Flags().StringP(\"from-file\", \"f\", \"\", \"Specify code file or a URL to the code file\")\n\tdeployCmd.Flags().StringSliceP(\"label\", \"l\", []string{}, \"Specify labels of the function. Both separator ':' and '=' are allowed. For example: --label foo1=bar1,foo2:bar2\")\n\tdeployCmd.Flags().StringSliceP(\"secrets\", \"\", []string{}, \"Specify Secrets to be mounted to the functions container. For example: --secrets mySecret\")\n\tdeployCmd.Flags().StringSliceP(\"env\", \"e\", []string{}, \"Specify environment variable of the function. Both separator ':' and '=' are allowed. For example: --env foo1=bar1,foo2:bar2\")\n\tdeployCmd.Flags().StringSliceP(\"node-selectors\", \"\", []string{}, \"Specify node selectors for the function. Both separator ':' and '=' are allowed. For example: --node-selectors key1=val1,key2:val2\")\n\tdeployCmd.Flags().StringP(\"service-account\", \"\", \"\", \"Specify service account for the function. For example: --service-account controller-acct\")\n\tdeployCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n\tdeployCmd.Flags().StringP(\"dependencies\", \"d\", \"\", \"Specify a file containing list of dependencies for the function\")\n\tdeployCmd.Flags().StringP(\"schedule\", \"\", \"\", \"Specify schedule in cron format for scheduled function\")\n\tdeployCmd.Flags().StringP(\"memory\", \"\", \"\", \"Request amount of memory, which is measured in bytes, for the function. It is expressed as a plain integer or a fixed-point interger with one of these suffies: E, P, T, G, M, K, Ei, Pi, Ti, Gi, Mi, Ki\")\n\tdeployCmd.Flags().StringP(\"cpu\", \"\", \"\", \"Request amount of cpu for the function, which is measured in units of cores. Please see the following link for more information: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu\")\n\tdeployCmd.Flags().StringP(\"runtime-image\", \"\", \"\", \"Custom runtime image\")\n\tdeployCmd.Flags().StringP(\"image-pull-policy\", \"\", \"Always\", \"Image pull policy\")\n\tdeployCmd.Flags().StringP(\"timeout\", \"\", \"180\", \"Maximum timeout (in seconds) for the function to complete its execution\")\n\tdeployCmd.Flags().StringP(\"output\", \"o\", \"yaml\", \"Output format\")\n\tdeployCmd.Flags().Bool(\"headless\", false, \"Deploy http-based function without a single service IP and load balancing support from Kubernetes. See: https://kubernetes.io/docs/concepts/services-networking/service/#headless-services\")\n\tdeployCmd.Flags().Bool(\"dryrun\", false, \"Output JSON manifest of the function without creating it\")\n\tdeployCmd.Flags().Int32(\"port\", 8080, \"Deploy http-based function with a custom port\")\n\tdeployCmd.Flags().Int32(\"servicePort\", 0, \"Deploy http-based function with a custom service port. If not provided the value of 'port' will be used\")\n}\n"
  },
  {
    "path": "cmd/kubeless/function/describe.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage function\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/gosuri/uitable\"\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n)\n\nvar describeCmd = &cobra.Command{\n\tUse:     \"describe FLAG\",\n\tAliases: []string{\"ls\"},\n\tShort:   \"describe a function deployed to Kubeless\",\n\tLong:    `describe a function deployed to Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - function name\")\n\t\t}\n\t\tfuncName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not describe function: %v\", err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = utils.GetDefaultNamespace()\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"out\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not describe function: %v\", err)\n\t\t}\n\n\t\tf, err := utils.GetFunction(funcName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not describe function: %v\", err)\n\t\t}\n\n\t\terr = print(f, funcName, output)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not describe function: %v\", err)\n\t\t}\n\t},\n}\n\nfunc init() {\n\tdescribeCmd.Flags().StringP(\"out\", \"o\", \"\", \"Output format. One of: json|yaml\")\n\tdescribeCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n}\n\nfunc print(f kubelessApi.Function, name, output string) error {\n\tswitch output {\n\tcase \"\":\n\t\ttable := uitable.New()\n\t\ttable.MaxColWidth = 80\n\t\ttable.Wrap = true\n\t\tlabel, err := json.Marshal(f.ObjectMeta.Labels)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar env, memory string\n\t\tif len(f.Spec.Deployment.Spec.Template.Spec.Containers) != 0 {\n\t\t\tb, err := json.Marshal(f.Spec.Deployment.Spec.Template.Spec.Containers[0].Env)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tenv = string(b)\n\t\t\tmemory = f.Spec.Deployment.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()\n\t\t}\n\n\t\ttable.AddRow(\"Name:\", name)\n\t\ttable.AddRow(\"Namespace:\", f.ObjectMeta.Namespace)\n\t\ttable.AddRow(\"Handler:\", f.Spec.Handler)\n\t\ttable.AddRow(\"Runtime:\", f.Spec.Runtime)\n\t\ttable.AddRow(\"Label:\", string(label))\n\t\ttable.AddRow(\"Envvar:\", env)\n\t\ttable.AddRow(\"Memory:\", memory)\n\t\ttable.AddRow(\"Dependencies:\", f.Spec.Deps)\n\t\tfmt.Println(table)\n\tcase \"json\":\n\t\tb, err := json.MarshalIndent(f, \"\", \"  \")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(string(b))\n\tcase \"yaml\":\n\t\tb, err := yaml.Marshal(f)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(string(b))\n\tdefault:\n\t\tfmt.Println(\"Wrong output format. Please use only json|yaml\")\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/kubeless/function/function.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage function\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/client/clientset/versioned\"\n\tkubelessutil \"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/spf13/cobra\"\n\tv1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/util/intstr\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\n// FunctionCmd contains first-class command for function\nvar FunctionCmd = &cobra.Command{\n\tUse:   \"function SUBCOMMAND\",\n\tShort: \"function specific operations\",\n\tLong:  `function command allows user to list, deploy, edit, delete functions running on Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Help()\n\t},\n}\n\nfunc init() {\n\tFunctionCmd.AddCommand(deployCmd)\n\tFunctionCmd.AddCommand(deleteCmd)\n\tFunctionCmd.AddCommand(listCmd)\n\tFunctionCmd.AddCommand(callCmd)\n\tFunctionCmd.AddCommand(logsCmd)\n\tFunctionCmd.AddCommand(describeCmd)\n\tFunctionCmd.AddCommand(updateCmd)\n\tFunctionCmd.AddCommand(topCmd)\n}\n\nfunc getKV(input string) (string, string) {\n\tvar key, value string\n\tif pos := strings.IndexAny(input, \"=:\"); pos != -1 {\n\t\tkey = input[:pos]\n\t\tvalue = input[pos+1:]\n\t} else {\n\t\t// no separator found\n\t\tkey = input\n\t\tvalue = \"\"\n\t}\n\n\treturn key, value\n}\n\nfunc parseLabel(labels []string) map[string]string {\n\tfuncLabels := make(map[string]string)\n\tfor _, label := range labels {\n\t\tk, v := getKV(label)\n\t\tfuncLabels[k] = v\n\t}\n\treturn funcLabels\n}\n\nfunc parseEnv(envs []string) []v1.EnvVar {\n\tfuncEnv := []v1.EnvVar{}\n\tfor _, env := range envs {\n\t\tk, v := getKV(env)\n\t\tfuncEnv = append(funcEnv, v1.EnvVar{\n\t\t\tName:  k,\n\t\t\tValue: v,\n\t\t})\n\t}\n\treturn funcEnv\n}\n\nfunc parseResource(in string) (resource.Quantity, error) {\n\tif in == \"\" {\n\t\treturn resource.Quantity{}, nil\n\t}\n\n\tquantity, err := resource.ParseQuantity(in)\n\tif err != nil {\n\t\treturn resource.Quantity{}, err\n\t}\n\n\treturn quantity, nil\n}\n\nfunc parseNodeSelectors(nodeSelectors []string) map[string]string {\n\tfuncNodeSelectors := make(map[string]string)\n\tfor _, nodeSelector := range nodeSelectors {\n\t\tk, v := getKV(nodeSelector)\n\t\tfuncNodeSelectors[k] = v\n\t}\n\treturn funcNodeSelectors\n}\n\nfunc getFunctionDescription(funcName, ns, handler, file, deps, runtime, runtimeImage, mem, cpu, timeout string, imagePullPolicy string, serviceAccount string, port int32, servicePort int32, headless bool, envs, labels, secrets, nodeSelectors []string, defaultFunction kubelessApi.Function) (*kubelessApi.Function, error) {\n\tfunction := defaultFunction\n\tfunction.TypeMeta = metav1.TypeMeta{\n\t\tKind:       \"Function\",\n\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t}\n\tif handler != \"\" {\n\t\tfunction.Spec.Handler = handler\n\t}\n\n\tif file != \"\" {\n\t\tcontentType, err := kubelessutil.GetContentType(file)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfunctionContent, checksum, err := kubelessutil.ParseContent(file, contentType)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif strings.Contains(contentType, \"url\") {\n\t\t\t// set the function to be the URL provided on the command line\n\t\t\tfunction.Spec.Function = file\n\t\t} else {\n\t\t\tfunction.Spec.Function = functionContent\n\t\t}\n\t\tfunction.Spec.Checksum = checksum\n\t\tfunction.Spec.FunctionContentType = contentType\n\t}\n\n\tif deps != \"\" {\n\t\tfunction.Spec.Deps = deps\n\t}\n\n\tif runtime != \"\" {\n\t\tfunction.Spec.Runtime = runtime\n\t}\n\n\tif timeout != \"\" {\n\t\tfunction.Spec.Timeout = timeout\n\t}\n\n\tfuncEnv := parseEnv(envs)\n\tif len(funcEnv) == 0 && len(defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers) != 0 {\n\t\tfuncEnv = defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers[0].Env\n\t}\n\n\tfuncLabels := defaultFunction.ObjectMeta.Labels\n\tif len(funcLabels) == 0 {\n\t\tfuncLabels = make(map[string]string)\n\t}\n\tls := parseLabel(labels)\n\tfor k, v := range ls {\n\t\tfuncLabels[k] = v\n\t}\n\tfunction.ObjectMeta = metav1.ObjectMeta{\n\t\tName:      funcName,\n\t\tNamespace: ns,\n\t\tLabels:    funcLabels,\n\t}\n\n\tresources := v1.ResourceRequirements{}\n\tif mem != \"\" || cpu != \"\" {\n\t\tfuncMem, err := parseResource(mem)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Wrong format of the memory value: %v\", err)\n\t\t\treturn &kubelessApi.Function{}, err\n\t\t}\n\t\tfuncCPU, err := parseResource(cpu)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Wrong format for cpu value: %v\", err)\n\t\t\treturn &kubelessApi.Function{}, err\n\t\t}\n\t\tresource := map[v1.ResourceName]resource.Quantity{\n\t\t\tv1.ResourceMemory: funcMem,\n\t\t\tv1.ResourceCPU:    funcCPU,\n\t\t}\n\n\t\tresources = v1.ResourceRequirements{\n\t\t\tLimits:   resource,\n\t\t\tRequests: resource,\n\t\t}\n\t} else {\n\t\tif len(defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers) != 0 {\n\t\t\tresources = defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers[0].Resources\n\t\t}\n\t}\n\n\tif len(runtimeImage) == 0 && len(defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers) != 0 {\n\t\truntimeImage = defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers[0].Image\n\t}\n\tfunction.Spec.Deployment.Spec.Template.Spec.Containers = []v1.Container{\n\t\t{\n\t\t\tImagePullPolicy: v1.PullPolicy(imagePullPolicy),\n\t\t\tEnv:             funcEnv,\n\t\t\tResources:       resources,\n\t\t\tImage:           runtimeImage,\n\t\t},\n\t}\n\n\tif serviceAccount != \"\" {\n\t\tfunction.Spec.Deployment.Spec.Template.Spec.ServiceAccountName = serviceAccount\n\t}\n\n\tif len(defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers) != 0 {\n\t\tfunction.Spec.Deployment.Spec.Template.Spec.Containers[0].VolumeMounts = defaultFunction.Spec.Deployment.Spec.Template.Spec.Containers[0].VolumeMounts\n\t}\n\n\tsvcSpec := v1.ServiceSpec{\n\t\tPorts: []v1.ServicePort{\n\t\t\t{\n\t\t\t\tName:     \"http-function-port\",\n\t\t\t\tNodePort: 0,\n\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t},\n\t\t},\n\t\tSelector: funcLabels,\n\t\tType:     v1.ServiceTypeClusterIP,\n\t}\n\n\tif headless {\n\t\tsvcSpec.ClusterIP = v1.ClusterIPNone\n\t}\n\n\tif port != 0 {\n\t\tsvcSpec.Ports[0].Port = port\n\t\tsvcSpec.Ports[0].TargetPort = intstr.FromInt(int(port))\n\t}\n\tif servicePort != 0 {\n\t\tsvcSpec.Ports[0].Port = servicePort\n\t}\n\tfunction.Spec.ServiceSpec = svcSpec\n\n\tfor _, secret := range secrets {\n\t\tfunction.Spec.Deployment.Spec.Template.Spec.Volumes = append(function.Spec.Deployment.Spec.Template.Spec.Volumes, v1.Volume{\n\t\t\tName: secret + \"-vol\",\n\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\tSecret: &v1.SecretVolumeSource{\n\t\t\t\t\tSecretName: secret,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tfunction.Spec.Deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(function.Spec.Deployment.Spec.Template.Spec.Containers[0].VolumeMounts, v1.VolumeMount{\n\t\t\tName:      secret + \"-vol\",\n\t\t\tMountPath: \"/\" + secret,\n\t\t})\n\n\t}\n\n\tfuncNodeSelectors := parseNodeSelectors(nodeSelectors)\n\tif len(funcNodeSelectors) == 0 && len(defaultFunction.Spec.Deployment.Spec.Template.Spec.NodeSelector) != 0 {\n\t\tfuncNodeSelectors = defaultFunction.Spec.Deployment.Spec.Template.Spec.NodeSelector\n\t}\n\tfunction.Spec.Deployment.Spec.Template.Spec.NodeSelector = funcNodeSelectors\n\n\treturn &function, nil\n}\n\nfunc getDeploymentStatus(cli kubernetes.Interface, funcName, ns string) (string, error) {\n\tdpm, err := cli.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tstatus := fmt.Sprintf(\"%d/%d\", dpm.Status.ReadyReplicas, dpm.Status.Replicas)\n\tif dpm.Status.ReadyReplicas > 0 {\n\t\tstatus += \" READY\"\n\t} else {\n\t\tstatus += \" NOT READY\"\n\t}\n\treturn status, nil\n}\n\nfunc getFunctions(kubelessClient versioned.Interface, namespace, functionName string) ([]*kubelessApi.Function, error) {\n\tif functionName == \"\" {\n\t\tf, err := kubelessClient.KubelessV1beta1().Functions(namespace).List(metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\treturn []*kubelessApi.Function{}, err\n\t\t}\n\t\treturn f.Items, nil\n\t}\n\n\tf, err := kubelessClient.KubelessV1beta1().Functions(namespace).Get(functionName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn []*kubelessApi.Function{}, err\n\t}\n\treturn []*kubelessApi.Function{\n\t\tf,\n\t}, nil\n}\n"
  },
  {
    "path": "cmd/kubeless/function/function_test.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage function\n\nimport (\n\t\"archive/tar\"\n\t\"archive/zip\"\n\t\"compress/gzip\"\n\t\"crypto/sha256\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\t\"k8s.io/api/autoscaling/v2beta1\"\n\tv1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/util/intstr\"\n)\n\nfunc TestParseLabel(t *testing.T) {\n\tlabels := []string{\n\t\t\"foo=bar\",\n\t\t\"bar:foo\",\n\t\t\"foobar\",\n\t}\n\texpected := map[string]string{\n\t\t\"foo\":    \"bar\",\n\t\t\"bar\":    \"foo\",\n\t\t\"foobar\": \"\",\n\t}\n\tactual := parseLabel(labels)\n\tif eq := reflect.DeepEqual(expected, actual); !eq {\n\t\tt.Errorf(\"Expect %v got %v\", expected, actual)\n\t}\n}\n\nfunc TestParseEnv(t *testing.T) {\n\tenvs := []string{\n\t\t\"foo=bar\",\n\t\t\"bar:foo\",\n\t\t\"foobar\",\n\t\t\"foo=bar=baz\",\n\t\t\"qux=bar,baz\",\n\t}\n\texpected := []v1.EnvVar{\n\t\t{\n\t\t\tName:  \"foo\",\n\t\t\tValue: \"bar\",\n\t\t},\n\t\t{\n\t\t\tName:  \"bar\",\n\t\t\tValue: \"foo\",\n\t\t},\n\t\t{\n\t\t\tName:  \"foobar\",\n\t\t\tValue: \"\",\n\t\t},\n\t\t{\n\t\t\tName:  \"foo\",\n\t\t\tValue: \"bar=baz\",\n\t\t},\n\t\t{\n\t\t\tName:  \"qux\",\n\t\t\tValue: \"bar,baz\",\n\t\t},\n\t}\n\tactual := parseEnv(envs)\n\tif eq := reflect.DeepEqual(expected, actual); !eq {\n\t\tt.Errorf(\"Expect %v got %v\", expected, actual)\n\t}\n}\n\nfunc TestParseNodeSelectors(t *testing.T) {\n\tnodeSelectors := []string{\n\t\t\"foo=bar\",\n\t\t\"baz:qux\",\n\t}\n\texpected := map[string]string{\n\t\t\"foo\": \"bar\",\n\t\t\"baz\": \"qux\",\n\t}\n\tactual := parseNodeSelectors(nodeSelectors)\n\tif eq := reflect.DeepEqual(expected, actual); !eq {\n\t\tt.Errorf(\"Expect %v got %v\", expected, actual)\n\t}\n}\n\nfunc TestGetFunctionDescription(t *testing.T) {\n\t// It should parse the given values\n\tfile, err := ioutil.TempFile(\"\", \"test\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\t_, err = file.WriteString(\"function\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tfile.Close()\n\tdefer os.Remove(file.Name()) // clean up\n\n\tresult, err := getFunctionDescription(\"test\", \"default\", \"file.handler\", file.Name(), \"dependencies\", \"runtime\", \"test-image\", \"128Mi\", \"\", \"10\", \"Always\", \"serviceAccount\", 8080, 0, false, []string{\"TEST=1\"}, []string{\"test=1\"}, []string{\"secretName\"}, []string{\"foo1=bar1\", \"baz1:qux1\"}, kubelessApi.Function{})\n\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tparsedMem, _ := parseResource(\"128Mi\")\n\tparsedCPU, _ := parseResource(\"\")\n\texpectedFunction := kubelessApi.Function{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind:       \"Function\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"test\",\n\t\t\tNamespace: \"default\",\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"test\": \"1\",\n\t\t\t},\n\t\t},\n\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\tHandler:             \"file.handler\",\n\t\t\tRuntime:             \"runtime\",\n\t\t\tFunction:            \"function\",\n\t\t\tChecksum:            \"sha256:78f9ac018e554365069108352dacabb7fbd15246edf19400677e3b54fe24e126\",\n\t\t\tFunctionContentType: \"text\",\n\t\t\tDeps:                \"dependencies\",\n\t\t\tTimeout:             \"10\",\n\t\t\tDeployment: appsv1.Deployment{\n\t\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\tServiceAccountName: \"serviceAccount\",\n\t\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tEnv: []v1.EnvVar{{\n\t\t\t\t\t\t\t\t\t\tName:  \"TEST\",\n\t\t\t\t\t\t\t\t\t\tValue: \"1\",\n\t\t\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\t\t\t\t\t\t\tLimits: map[v1.ResourceName]resource.Quantity{\n\t\t\t\t\t\t\t\t\t\t\tv1.ResourceMemory: parsedMem,\n\t\t\t\t\t\t\t\t\t\t\tv1.ResourceCPU:    parsedCPU,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tRequests: map[v1.ResourceName]resource.Quantity{\n\t\t\t\t\t\t\t\t\t\t\tv1.ResourceMemory: parsedMem,\n\t\t\t\t\t\t\t\t\t\t\tv1.ResourceCPU:    parsedCPU,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tImage:           \"test-image\",\n\t\t\t\t\t\t\t\t\tImagePullPolicy: v1.PullAlways,\n\t\t\t\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tName:      \"secretName-vol\",\n\t\t\t\t\t\t\t\t\t\t\tMountPath: \"/secretName\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVolumes: []v1.Volume{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"secretName-vol\",\n\t\t\t\t\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\t\t\t\t\tSecret: &v1.SecretVolumeSource{\n\t\t\t\t\t\t\t\t\t\t\tSecretName: \"secretName\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\t\t\t\t\"foo1\": \"bar1\",\n\t\t\t\t\t\t\t\t\"baz1\": \"qux1\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tServiceSpec: v1.ServiceSpec{\n\t\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t\t{Name: \"http-function-port\", Protocol: \"TCP\", Port: 8080, TargetPort: intstr.FromInt(8080)},\n\t\t\t\t},\n\t\t\t\tSelector: map[string]string{\n\t\t\t\t\t\"test\": \"1\",\n\t\t\t\t},\n\t\t\t\tType: v1.ServiceTypeClusterIP,\n\t\t\t},\n\t\t},\n\t}\n\tif !reflect.DeepEqual(expectedFunction, *result) {\n\t\tt.Errorf(\"Unexpected result. Expecting:\\n %+v\\nReceived:\\n %+v\", expectedFunction, *result)\n\t}\n\n\t// It should take the default values\n\tresult2, err := getFunctionDescription(\"test\", \"default\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"Always\", \"\", 8080, 0, false, []string{}, []string{}, []string{}, []string{}, expectedFunction)\n\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !reflect.DeepEqual(expectedFunction, *result2) {\n\t\tt.Errorf(\"Unexpected result. Expecting:\\n %+v\\n Received %+v\\n\", expectedFunction, *result2)\n\t}\n\n\t// Given parameters should take precedence from default values\n\tfile, err = ioutil.TempFile(\"\", \"test\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\t_, err = file.WriteString(\"function-modified\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tfile.Close()\n\tdefer os.Remove(file.Name()) // clean up\n\n\tresult3, err := getFunctionDescription(\"test\", \"default\", \"file.handler2\", file.Name(), \"dependencies2\", \"runtime2\", \"test-image2\", \"256Mi\", \"100m\", \"20\", \"Always\", \"NewServiceAccount\", 8080, 0, false, []string{\"TEST=2\"}, []string{\"test=2\"}, []string{\"secret2\"}, []string{\"foo2=bar2\", \"baz2:qux2\"}, expectedFunction)\n\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tparsedMem2, _ := parseResource(\"256Mi\")\n\tparsedCPU2, _ := parseResource(\"100m\")\n\tnewFunction := kubelessApi.Function{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind:       \"Function\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"test\",\n\t\t\tNamespace: \"default\",\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"test\": \"2\",\n\t\t\t},\n\t\t},\n\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\tHandler:             \"file.handler2\",\n\t\t\tRuntime:             \"runtime2\",\n\t\t\tFunction:            \"function-modified\",\n\t\t\tFunctionContentType: \"text\",\n\t\t\tChecksum:            \"sha256:1958eb96d7d3cadedd0f327f09322eb7db296afb282ed91aa66cb4ab0dcc3c9f\",\n\t\t\tDeps:                \"dependencies2\",\n\t\t\tTimeout:             \"20\",\n\t\t\tDeployment: appsv1.Deployment{\n\t\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\tServiceAccountName: \"NewServiceAccount\",\n\t\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tEnv: []v1.EnvVar{{\n\t\t\t\t\t\t\t\t\t\tName:  \"TEST\",\n\t\t\t\t\t\t\t\t\t\tValue: \"2\",\n\t\t\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\t\t\t\t\t\t\tLimits: map[v1.ResourceName]resource.Quantity{\n\t\t\t\t\t\t\t\t\t\t\tv1.ResourceMemory: parsedMem2,\n\t\t\t\t\t\t\t\t\t\t\tv1.ResourceCPU:    parsedCPU2,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tRequests: map[v1.ResourceName]resource.Quantity{\n\t\t\t\t\t\t\t\t\t\t\tv1.ResourceMemory: parsedMem2,\n\t\t\t\t\t\t\t\t\t\t\tv1.ResourceCPU:    parsedCPU2,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tImage:           \"test-image2\",\n\t\t\t\t\t\t\t\t\tImagePullPolicy: v1.PullAlways,\n\t\t\t\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tName:      \"secretName-vol\",\n\t\t\t\t\t\t\t\t\t\t\tMountPath: \"/secretName\",\n\t\t\t\t\t\t\t\t\t\t}, {\n\t\t\t\t\t\t\t\t\t\t\tName:      \"secret2-vol\",\n\t\t\t\t\t\t\t\t\t\t\tMountPath: \"/secret2\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVolumes: []v1.Volume{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"secretName-vol\",\n\t\t\t\t\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\t\t\t\t\tSecret: &v1.SecretVolumeSource{\n\t\t\t\t\t\t\t\t\t\t\tSecretName: \"secretName\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t}, {\n\t\t\t\t\t\t\t\t\tName: \"secret2-vol\",\n\t\t\t\t\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\t\t\t\t\tSecret: &v1.SecretVolumeSource{\n\t\t\t\t\t\t\t\t\t\t\tSecretName: \"secret2\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\t\t\t\t\"foo2\": \"bar2\",\n\t\t\t\t\t\t\t\t\"baz2\": \"qux2\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tServiceSpec: v1.ServiceSpec{\n\t\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t\t{Name: \"http-function-port\", Protocol: \"TCP\", Port: 8080, TargetPort: intstr.FromInt(8080)},\n\t\t\t\t},\n\t\t\t\tSelector: map[string]string{\n\t\t\t\t\t\"test\": \"2\",\n\t\t\t\t},\n\t\t\t\tType: v1.ServiceTypeClusterIP,\n\t\t\t},\n\t\t},\n\t}\n\tif !reflect.DeepEqual(newFunction, *result3) {\n\t\tt.Errorf(\"Unexpected result. Expecting:\\n %+v\\n Received %+v\\n\", newFunction, *result3)\n\t}\n\n\t// It should detect that it is a Zip file or a compressed tar file\n\tfile, err = os.Open(file.Name())\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tzipFile, err := os.Create(file.Name() + \".zip\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer os.Remove(zipFile.Name()) // clean up\n\n\ttarGzFile, err := os.Create(file.Name() + \".tar.gz\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tdefer os.Remove(tarGzFile.Name()) // clean up\n\n\tzipW := zip.NewWriter(zipFile)\n\tgzipW := gzip.NewWriter(tarGzFile)\n\ttarW := tar.NewWriter(gzipW)\n\n\tinfo, err := file.Stat()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tzipHeader, err := zip.FileInfoHeader(info)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\twriter, err := zipW.CreateHeader(zipHeader)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\t_, err = io.Copy(writer, file)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\ttarHeader, err := tar.FileInfoHeader(info, info.Name())\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\ttarHeader.Name = file.Name()\n\terr = tarW.WriteHeader(tarHeader)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\t_, err = io.Copy(writer, file)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tfile.Close()\n\tzipW.Close()\n\tzipFile.Close()\n\ttarW.Close()\n\tgzipW.Close()\n\ttarGzFile.Close()\n\n\tresult4A, err := getFunctionDescription(\"test\", \"default\", \"file.handler\", zipFile.Name(), \"dependencies\", \"runtime\", \"\", \"\", \"\", \"\", \"Always\", \"\", 8080, 0, false, []string{}, []string{}, []string{}, []string{}, expectedFunction)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif result4A.Spec.FunctionContentType != \"base64+zip\" {\n\t\tt.Errorf(\"Should return base64+zip, received %s\", result4A.Spec.FunctionContentType)\n\t}\n\n\tresult4B, err := getFunctionDescription(\"test\", \"default\", \"file.handler\", tarGzFile.Name(), \"dependencies\", \"runtime\", \"\", \"\", \"\", \"\", \"Always\", \"\", 8080, 0, false, []string{}, []string{}, []string{}, []string{}, expectedFunction)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif result4B.Spec.FunctionContentType != \"base64+compressedtar\" {\n\t\tt.Errorf(\"Should return base64+compressedtar, received %s\", result4B.Spec.FunctionContentType)\n\t}\n\n\t// It should maintain previous HPA definition\n\tresult5, err := getFunctionDescription(\"test\", \"default\", \"file.handler\", file.Name(), \"dependencies\", \"runtime\", \"test-image\", \"128Mi\", \"\", \"10\", \"Always\", \"serviceAccount\", 8080, 0, false, []string{\"TEST=1\"}, []string{\"test=1\"}, []string{}, []string{}, kubelessApi.Function{\n\n\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\tHorizontalPodAutoscaler: v2beta1.HorizontalPodAutoscaler{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"previous-hpa\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tif result5.Spec.HorizontalPodAutoscaler.ObjectMeta.Name != \"previous-hpa\" {\n\t\tt.Error(\"should maintain previous HPA definition\")\n\t}\n\n\t// It should set the Port, ServicePort and headless service properly\n\tresult6, err := getFunctionDescription(\"test\", \"default\", \"file.handler\", file.Name(), \"dependencies\", \"runtime\", \"test-image\", \"128Mi\", \"\", \"\", \"Always\", \"serviceAccount\", 9091, 9092, true, []string{}, []string{}, []string{}, []string{}, kubelessApi.Function{})\n\texpectedPort := v1.ServicePort{\n\t\tName:       \"http-function-port\",\n\t\tPort:       9092,\n\t\tTargetPort: intstr.FromInt(9091),\n\t\tNodePort:   0,\n\t\tProtocol:   v1.ProtocolTCP,\n\t}\n\tif !reflect.DeepEqual(result6.Spec.ServiceSpec.Ports[0], expectedPort) {\n\t\tt.Errorf(\"Unexpected port definition: %v\", result6.Spec.ServiceSpec.Ports[0])\n\t}\n\tif result6.Spec.ServiceSpec.ClusterIP != v1.ClusterIPNone {\n\t\tt.Errorf(\"Unexpected clusterIP %v\", result6.Spec.ServiceSpec.ClusterIP)\n\t}\n\n\t// it should create a function from a URL\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w, \"function\")\n\t}))\n\tdefer ts.Close()\n\n\texpectedURLFunction := kubelessApi.Function{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind:       \"Function\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"test\",\n\t\t\tNamespace: \"default\",\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"test\": \"1\",\n\t\t\t},\n\t\t},\n\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\tHandler:             \"file.handler\",\n\t\t\tRuntime:             \"runtime\",\n\t\t\tFunction:            ts.URL,\n\t\t\tChecksum:            \"sha256:78f9ac018e554365069108352dacabb7fbd15246edf19400677e3b54fe24e126\",\n\t\t\tFunctionContentType: \"url\",\n\t\t\tDeps:                \"dependencies\",\n\t\t\tTimeout:             \"10\",\n\t\t\tDeployment: appsv1.Deployment{\n\t\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\tServiceAccountName: \"serviceAccount\",\n\t\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tEnv: []v1.EnvVar{{\n\t\t\t\t\t\t\t\t\t\tName:  \"TEST\",\n\t\t\t\t\t\t\t\t\t\tValue: \"1\",\n\t\t\t\t\t\t\t\t\t}},\n\t\t\t\t\t\t\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\t\t\t\t\t\t\tLimits: map[v1.ResourceName]resource.Quantity{\n\t\t\t\t\t\t\t\t\t\t\tv1.ResourceMemory: parsedMem,\n\t\t\t\t\t\t\t\t\t\t\tv1.ResourceCPU:    parsedCPU,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tRequests: map[v1.ResourceName]resource.Quantity{\n\t\t\t\t\t\t\t\t\t\t\tv1.ResourceMemory: parsedMem,\n\t\t\t\t\t\t\t\t\t\t\tv1.ResourceCPU:    parsedCPU,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tImage:           \"test-image\",\n\t\t\t\t\t\t\t\t\tImagePullPolicy: v1.PullAlways,\n\t\t\t\t\t\t\t\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tName:      \"secretName-vol\",\n\t\t\t\t\t\t\t\t\t\t\tMountPath: \"/secretName\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVolumes: []v1.Volume{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"secretName-vol\",\n\t\t\t\t\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\t\t\t\t\tSecret: &v1.SecretVolumeSource{\n\t\t\t\t\t\t\t\t\t\t\tSecretName: \"secretName\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\t\t\t\t\"foo3\": \"bar3\",\n\t\t\t\t\t\t\t\t\"baz3\": \"qux3\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tServiceSpec: v1.ServiceSpec{\n\t\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t\t{Name: \"http-function-port\", Protocol: \"TCP\", Port: 8080, TargetPort: intstr.FromInt(8080)},\n\t\t\t\t},\n\t\t\t\tSelector: map[string]string{\n\t\t\t\t\t\"test\": \"1\",\n\t\t\t\t},\n\t\t\t\tType: v1.ServiceTypeClusterIP,\n\t\t\t},\n\t\t},\n\t}\n\n\tresult7, err := getFunctionDescription(\"test\", \"default\", \"file.handler\", ts.URL, \"dependencies\", \"runtime\", \"test-image\", \"128Mi\", \"\", \"10\", \"Always\", \"serviceAccount\", 8080, 0, false, []string{\"TEST=1\"}, []string{\"test=1\"}, []string{\"secretName\"}, []string{\"foo3=bar3\", \"baz3:qux3\"}, kubelessApi.Function{})\n\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif !reflect.DeepEqual(expectedURLFunction, *result7) {\n\t\tt.Errorf(\"Unexpected result. Expecting:\\n %+v\\nReceived:\\n %+v\", expectedURLFunction, *result7)\n\t}\n\n\t// It should handle zip files and compressed tar files from a URL and detect url+zip and url+compressedtar encoding respectively\n\tzipBytes, err := ioutil.ReadFile(zipFile.Name())\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tts2A := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write(zipBytes)\n\t}))\n\tdefer ts2A.Close()\n\n\texpectedURLFunction.Spec.FunctionContentType = \"url+zip\"\n\texpectedURLFunction.Spec.Function = ts2A.URL + \"/test.zip\"\n\texpectedURLFunction.Spec.Checksum, err = getSha256(zipBytes)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tresult8A, err := getFunctionDescription(\"test\", \"default\", \"file.handler\", ts2A.URL+\"/test.zip\", \"dependencies\", \"runtime\", \"test-image\", \"128Mi\", \"\", \"10\", \"Always\", \"serviceAccount\", 8080, 0, false, []string{\"TEST=1\"}, []string{\"test=1\"}, []string{\"secretName\"}, []string{\"foo3=bar3\", \"baz3:qux3\"}, kubelessApi.Function{})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !reflect.DeepEqual(expectedURLFunction, *result8A) {\n\t\tt.Errorf(\"Unexpected result. Expecting:\\n %+v\\nReceived:\\n %+v\", expectedURLFunction, *result8A)\n\t}\n\n\ttarGzBytes, err := ioutil.ReadFile(tarGzFile.Name())\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tts2B := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write(tarGzBytes)\n\t}))\n\tdefer ts2B.Close()\n\n\texpectedURLFunction.Spec.FunctionContentType = \"url+compressedtar\"\n\texpectedURLFunction.Spec.Function = ts2B.URL + \"/test.tar.gz\"\n\texpectedURLFunction.Spec.Checksum, err = getSha256(tarGzBytes)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tresult8B, err := getFunctionDescription(\"test\", \"default\", \"file.handler\", ts2B.URL+\"/test.tar.gz\", \"dependencies\", \"runtime\", \"test-image\", \"128Mi\", \"\", \"10\", \"Always\", \"serviceAccount\", 8080, 0, false, []string{\"TEST=1\"}, []string{\"test=1\"}, []string{\"secretName\"}, []string{\"foo3=bar3\", \"baz3:qux3\"}, kubelessApi.Function{})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !reflect.DeepEqual(expectedURLFunction, *result8B) {\n\t\tt.Errorf(\"Unexpected result. Expecting:\\n %+v\\nReceived:\\n %+v\", expectedURLFunction, *result8B)\n\t}\n\t// end test\n}\n\nfunc getSha256(bytes []byte) (string, error) {\n\th := sha256.New()\n\t_, err := h.Write(bytes)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tchecksum := hex.EncodeToString(h.Sum(nil))\n\treturn \"sha256:\" + checksum, nil\n}\n"
  },
  {
    "path": "cmd/kubeless/function/list.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage function\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/gosuri/uitable\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\tk8sErrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/client/clientset/versioned\"\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n)\n\nvar listCmd = &cobra.Command{\n\tUse:     \"list FLAG\",\n\tAliases: []string{\"ls\"},\n\tShort:   \"list all functions deployed to Kubeless\",\n\tLong:    `list all functions deployed to Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\toutput, err := cmd.Flags().GetString(\"out\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = utils.GetDefaultNamespace()\n\t\t}\n\n\t\tkubelessClient, err := utils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not list functions: %v\", err)\n\t\t}\n\n\t\tapiV1Client := utils.GetClientOutOfCluster()\n\n\t\tif err := doList(cmd.OutOrStdout(), kubelessClient, apiV1Client, ns, output, args); err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t},\n}\n\nfunc init() {\n\tlistCmd.Flags().StringP(\"out\", \"o\", \"\", \"Output format. One of: json|yaml\")\n\tlistCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n}\n\nfunc doList(w io.Writer, kubelessClient versioned.Interface, apiV1Client kubernetes.Interface, ns, output string, args []string) error {\n\tvar list []*kubelessApi.Function\n\tif len(args) == 0 {\n\t\tfuncList, err := kubelessClient.KubelessV1beta1().Functions(ns).List(metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlist = funcList.Items\n\t} else {\n\t\tlist = make([]*kubelessApi.Function, 0, len(args))\n\t\tfor _, arg := range args {\n\t\t\tf, err := kubelessClient.KubelessV1beta1().Functions(ns).Get(arg, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error listing function %s: %v\", arg, err)\n\t\t\t}\n\t\t\tlist = append(list, f)\n\t\t}\n\t}\n\n\treturn printFunctions(w, list, apiV1Client, output)\n}\n\nfunc parseDeps(deps, runtime string) (res string, err error) {\n\tif deps != \"\" {\n\t\tif strings.Contains(runtime, \"nodejs\") {\n\t\t\tpkgjson := make(map[string]interface{})\n\t\t\terr = json.Unmarshal([]byte(deps), &pkgjson)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif pkgjson[\"dependencies\"] != nil {\n\t\t\t\tdependencies := []string{}\n\t\t\t\tfor pkg, ver := range pkgjson[\"dependencies\"].(map[string]interface{}) {\n\t\t\t\t\tdependencies = append(dependencies, pkg+\": \"+ver.(string))\n\t\t\t\t}\n\t\t\t\tres = strings.Join(dependencies, \"\\n\")\n\t\t\t}\n\t\t} else {\n\t\t\tres = deps\n\t\t}\n\t}\n\treturn\n}\n\n// printFunctions formats the output of function list\nfunc printFunctions(w io.Writer, functions []*kubelessApi.Function, cli kubernetes.Interface, output string) error {\n\tif output == \"\" {\n\t\ttable := uitable.New()\n\t\ttable.MaxColWidth = 50\n\t\ttable.Wrap = true\n\t\ttable.AddRow(\"NAME\", \"NAMESPACE\", \"HANDLER\", \"RUNTIME\", \"DEPENDENCIES\", \"STATUS\")\n\t\tfor _, f := range functions {\n\t\t\tn := f.ObjectMeta.Name\n\t\t\th := f.Spec.Handler\n\t\t\tr := f.Spec.Runtime\n\t\t\tns := f.ObjectMeta.Namespace\n\t\t\tstatus, err := getDeploymentStatus(cli, f.ObjectMeta.Name, f.ObjectMeta.Namespace)\n\t\t\tif err != nil && k8sErrors.IsNotFound(err) {\n\t\t\t\tstatus = \"MISSING: Check controller logs\"\n\t\t\t} else if err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdeps, err := parseDeps(f.Spec.Deps, r)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttable.AddRow(n, ns, h, r, deps, status)\n\t\t}\n\t\tfmt.Fprintln(w, table)\n\t} else if output == \"wide\" {\n\t\ttable := uitable.New()\n\t\ttable.MaxColWidth = 50\n\t\ttable.Wrap = true\n\t\ttable.AddRow(\"NAME\", \"NAMESPACE\", \"HANDLER\", \"RUNTIME\", \"TYPE\", \"TOPIC\", \"DEPENDENCIES\", \"STATUS\", \"MEMORY\", \"ENV\", \"LABEL\", \"SCHEDULE\")\n\t\tfor _, f := range functions {\n\t\t\tn := f.ObjectMeta.Name\n\t\t\th := f.Spec.Handler\n\t\t\tr := f.Spec.Runtime\n\t\t\tdeps, err := parseDeps(f.Spec.Deps, r)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tns := f.ObjectMeta.Namespace\n\t\t\tstatus, err := getDeploymentStatus(cli, f.ObjectMeta.Name, f.ObjectMeta.Namespace)\n\t\t\tif err != nil && k8sErrors.IsNotFound(err) {\n\t\t\t\tstatus = \"MISSING: Check controller logs\"\n\t\t\t} else if err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmem := \"\"\n\t\t\tenv := \"\"\n\t\t\tif len(f.Spec.Deployment.Spec.Template.Spec.Containers[0].Resources.Requests) != 0 {\n\t\t\t\tmem = f.Spec.Deployment.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().String()\n\t\t\t}\n\t\t\tif len(f.Spec.Deployment.Spec.Template.Spec.Containers[0].Env) != 0 {\n\t\t\t\tvar buffer bytes.Buffer\n\t\t\t\tfor _, e := range f.Spec.Deployment.Spec.Template.Spec.Containers[0].Env {\n\t\t\t\t\tbuffer.WriteString(e.Name + \" = \" + e.Value + \"\\n\")\n\t\t\t\t}\n\t\t\t\tenv = buffer.String()\n\t\t\t}\n\t\t\tlabel := \"\"\n\t\t\tif len(f.ObjectMeta.Labels) > 0 {\n\t\t\t\tvar buffer bytes.Buffer\n\t\t\t\tfor k, v := range f.ObjectMeta.Labels {\n\t\t\t\t\tbuffer.WriteString(k + \" : \" + v + \"\\n\")\n\t\t\t\t}\n\t\t\t\tlabel = buffer.String()\n\t\t\t}\n\t\t\ttable.AddRow(n, ns, h, r, deps, status, mem, env, label)\n\t\t}\n\t\tfmt.Fprintln(w, table)\n\t} else {\n\t\tswitch output {\n\t\tcase \"json\":\n\t\t\tb, err := json.MarshalIndent(functions, \"\", \"  \")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Fprintln(w, string(b))\n\t\tcase \"yaml\":\n\t\t\tb, err := yaml.Marshal(functions)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Fprintln(w, string(b))\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Wrong output format. Please use only json|yaml\")\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/kubeless/function/list_test.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage function\n\nimport (\n\t\"bytes\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/client/clientset/versioned\"\n\tfFake \"github.com/kubeless/kubeless/pkg/client/clientset/versioned/fake\"\n)\n\nfunc listOutput(t *testing.T, client versioned.Interface, apiV1Client kubernetes.Interface, ns, output string, args []string) string {\n\tvar buf bytes.Buffer\n\n\tif err := doList(&buf, client, apiV1Client, ns, output, args); err != nil {\n\t\tt.Fatalf(\"doList returned error: %v\", err)\n\t}\n\n\treturn buf.String()\n}\n\nfunc TestList(t *testing.T) {\n\tfuncMem, _ := parseResource(\"128Mi\")\n\tlistObj := kubelessApi.FunctionList{\n\t\tItems: []*kubelessApi.Function{\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"foo\",\n\t\t\t\t\tNamespace: \"myns\",\n\t\t\t\t},\n\t\t\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\t\t\tHandler:  \"fhandler\",\n\t\t\t\t\tFunction: \"ffunction\",\n\t\t\t\t\tRuntime:  \"fruntime\",\n\t\t\t\t\tDeps:     \"fdeps\",\n\t\t\t\t\tDeployment: appsv1.Deployment{\n\t\t\t\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\t\t\tContainers: []v1.Container{{}},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"bar\",\n\t\t\t\t\tNamespace: \"myns\",\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"foo\": \"bar\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\t\t\tHandler:  \"bhandler\",\n\t\t\t\t\tFunction: \"bfunction\",\n\t\t\t\t\tRuntime:  \"nodejs6\",\n\t\t\t\t\tDeps:     \"{\\\"dependencies\\\": {\\\"test\\\": \\\"^1.0.0\\\"}}\",\n\t\t\t\t\tDeployment: appsv1.Deployment{\n\t\t\t\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tEnv: []v1.EnvVar{\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\tName:  \"foo\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tValue: \"bar\",\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\tName:  \"foo2\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tValue: \"bar2\",\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\t\t\t\t\t\t\t\t\tLimits: map[v1.ResourceName]resource.Quantity{\n\t\t\t\t\t\t\t\t\t\t\t\t\tv1.ResourceMemory: funcMem,\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\tRequests: map[v1.ResourceName]resource.Quantity{\n\t\t\t\t\t\t\t\t\t\t\t\t\tv1.ResourceMemory: funcMem,\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"wrong\",\n\t\t\t\t\tNamespace: \"myns\",\n\t\t\t\t},\n\t\t\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\t\t\tHandler:  \"fhandler\",\n\t\t\t\t\tFunction: \"ffunction\",\n\t\t\t\t\tRuntime:  \"fruntime\",\n\t\t\t\t\tDeps:     \"fdeps\",\n\t\t\t\t\tDeployment: appsv1.Deployment{\n\t\t\t\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\t\t\tContainers: []v1.Container{{}},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tclient := fFake.NewSimpleClientset(listObj.Items[0], listObj.Items[1], listObj.Items[2])\n\n\tdeploymentFoo := appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"foo\",\n\t\t\tNamespace: \"myns\",\n\t\t},\n\t\tStatus: appsv1.DeploymentStatus{\n\t\t\tReplicas:      int32(1),\n\t\t\tReadyReplicas: int32(1),\n\t\t},\n\t}\n\tdeploymentBar := appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"bar\",\n\t\t\tNamespace: \"myns\",\n\t\t},\n\t\tStatus: appsv1.DeploymentStatus{\n\t\t\tReplicas:      int32(2),\n\t\t\tReadyReplicas: int32(0),\n\t\t},\n\t}\n\tapiV1Client := fake.NewSimpleClientset(&deploymentFoo, &deploymentBar)\n\n\t// No arg -> list everything in namespace\n\toutput := listOutput(t, client, apiV1Client, \"myns\", \"\", []string{})\n\tt.Log(\"output is\", output)\n\n\tif !strings.Contains(output, \"foo\") || !strings.Contains(output, \"bar\") {\n\t\tt.Errorf(\"table output didn't mention both functions\")\n\t}\n\t// Status\n\tm, err := regexp.MatchString(\"foo.*1/1 READY\", output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !m {\n\t\tt.Errorf(\"table output didn't mention deployment status\")\n\t}\n\tm, err = regexp.MatchString(\"bar.*0/2 NOT READY\", output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !m {\n\t\tt.Errorf(\"table output didn't mention deployment status\")\n\t}\n\tm, err = regexp.MatchString(\"wrong.*MISSING\", output)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !m {\n\t\tt.Errorf(\"table output didn't mention deployment status\")\n\t}\n\n\t// Explicit arg(s)\n\toutput = listOutput(t, client, apiV1Client, \"myns\", \"\", []string{\"foo\"})\n\tt.Log(\"output is\", output)\n\n\tif !strings.Contains(output, \"foo\") {\n\t\tt.Errorf(\"table output didn't mention explicit function foo\")\n\t}\n\tif strings.Contains(output, \"bar\") {\n\t\tt.Errorf(\"table output mentions unrequested function bar\")\n\t}\n\n\tif strings.Contains(output, \"test: ^1.0.0\") {\n\t\tt.Errorf(\"table output doesn't show parsed dependencies\")\n\t}\n\n\t// TODO: Actually validate the output of the following.\n\t// Probably need to fix output framing first.\n\n\t// json output\n\toutput = listOutput(t, client, apiV1Client, \"myns\", \"json\", []string{})\n\tt.Log(\"output is\", output)\n\tif !strings.Contains(output, \"foo\") || !strings.Contains(output, \"bar\") {\n\t\tt.Errorf(\"table output didn't mention both functions\")\n\t}\n\n\t// yaml output\n\toutput = listOutput(t, client, apiV1Client, \"myns\", \"yaml\", []string{})\n\tt.Log(\"output is\", output)\n\tif !strings.Contains(output, \"128Mi\") {\n\t\tt.Errorf(\"table output didn't mention proper memory of function\")\n\t}\n\n\t// wide output\n\toutput = listOutput(t, client, apiV1Client, \"myns\", \"wide\", []string{})\n\tt.Log(\"output is\", output)\n\tif !strings.Contains(output, \"foo = bar\") {\n\t\tt.Errorf(\"table output didn't mention proper env of function\")\n\t}\n}\n"
  },
  {
    "path": "cmd/kubeless/function/logs.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage function\n\nimport (\n\t\"io\"\n\t\"os\"\n\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"k8s.io/api/core/v1\"\n)\n\nvar logsCmd = &cobra.Command{\n\tUse:   \"logs <function_name> FLAG\",\n\tShort: \"get logs from a running function\",\n\tLong:  `get logs from a running function`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - function name\")\n\t\t}\n\t\tfuncName := args[0]\n\t\tfollow, err := cmd.Flags().GetBool(\"follow\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = utils.GetDefaultNamespace()\n\t\t}\n\n\t\tk8sClient := utils.GetClientOutOfCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Getting log failed: %v\", err)\n\t\t}\n\t\tpods, err := utils.GetPodsByLabel(k8sClient, ns, \"function\", funcName)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can't find the function pod: %v\", err)\n\t\t}\n\t\treadyPod, err := utils.GetReadyPod(pods)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"No function pod is running: %v\", err)\n\t\t}\n\t\tpodLog := &v1.PodLogOptions{\n\t\t\tContainer: funcName,\n\t\t\tFollow:    follow,\n\t\t}\n\t\treq := k8sClient.Core().Pods(ns).GetLogs(readyPod.Name, podLog)\n\n\t\treadCloser, err := req.Stream()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Getting log failed: %v\", err)\n\t\t}\n\t\tdefer readCloser.Close()\n\t\tio.Copy(os.Stdout, readCloser)\n\t},\n}\n\nfunc init() {\n\tlogsCmd.Flags().BoolP(\"follow\", \"f\", false, \"Specify if the logs should be streamed.\")\n\tlogsCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n}\n"
  },
  {
    "path": "cmd/kubeless/function/top.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage function\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/gosuri/uitable\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"k8s.io/client-go/kubernetes\"\n\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/client/clientset/versioned\"\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n)\n\nvar topCmd = &cobra.Command{\n\tUse:     \"top\",\n\tAliases: []string{\"stats\"},\n\tShort:   \"display function metrics\",\n\tLong:    `display function metrics`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tfunctionName, err := cmd.Flags().GetString(\"function\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = utils.GetDefaultNamespace()\n\t\t}\n\t\toutput, err := cmd.Flags().GetString(\"out\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\n\t\tapiV1Client := utils.GetClientOutOfCluster()\n\t\tkubelessClient, err := utils.GetKubelessClientOutCluster()\n\t\thandler := &utils.PrometheusMetricsHandler{}\n\n\t\terr = doTop(cmd.OutOrStdout(), kubelessClient, apiV1Client, handler, ns, functionName, output)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t},\n}\n\nfunc init() {\n\ttopCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n\ttopCmd.Flags().StringP(\"function\", \"f\", \"\", \"Specify the function\")\n\ttopCmd.Flags().StringP(\"out\", \"o\", \"\", \"Output format. One of: json|yaml\")\n}\n\nfunc doTop(w io.Writer, kubelessClient versioned.Interface, apiV1Client kubernetes.Interface, handler utils.MetricsRetriever, ns, functionName, output string) error {\n\tfunctions, err := getFunctions(kubelessClient, ns, functionName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error listing functions: %v\", err)\n\t}\n\n\tch := make(chan []*utils.Metric, len(functions))\n\tfor _, f := range functions {\n\t\tgo func(f *kubelessApi.Function) {\n\t\t\tch <- utils.GetFunctionMetrics(apiV1Client, handler, ns, f.ObjectMeta.Name)\n\t\t}(f)\n\t}\n\n\tvar metrics []*utils.Metric\n\n\ti := 0\n\tfor i < len(functions) {\n\t\tselect {\n\t\tcase r := <-ch:\n\t\t\tmetrics = append(metrics, r...)\n\t\t\ti++\n\t\t// timeout all go routines after 5 seconds to avoid hanging at the cmd line\n\t\tcase <-time.After(5 * time.Second):\n\t\t\ti = len(functions)\n\t\t}\n\t}\n\n\t// sort the results - useful when using 'watch kubeless function top'\n\tsort.Slice(metrics, func(i, j int) bool {\n\t\treturn metrics[i].FunctionName < metrics[j].FunctionName\n\t})\n\treturn printTop(w, metrics, apiV1Client, output)\n}\n\nfunc printTop(w io.Writer, metrics []*utils.Metric, cli kubernetes.Interface, output string) error {\n\tif output == \"\" {\n\t\ttable := uitable.New()\n\t\ttable.MaxColWidth = 50\n\t\ttable.Wrap = true\n\t\ttable.AddRow(\"NAME\", \"NAMESPACE\", \"METHOD\", \"TOTAL_CALLS\", \"TOTAL_FAILURES\", \"TOTAL_DURATION_SECONDS\", \"AVG_DURATION_SECONDS\", \"MESSAGE\")\n\t\tfor _, f := range metrics {\n\t\t\tif f.Message != \"\" {\n\t\t\t\ttable.AddRow(f.FunctionName, f.Namespace, \"\", \"\", \"\", \"\", \"\", f.Message)\n\t\t\t} else {\n\t\t\t\ttable.AddRow(f.FunctionName, f.Namespace, f.Method, f.TotalCalls, f.TotalFailures, f.TotalDurationSeconds, f.AvgDurationSeconds, \"\")\n\t\t\t}\n\t\t}\n\t\tfmt.Fprintln(w, table)\n\t} else {\n\t\tswitch output {\n\t\tcase \"json\":\n\t\t\tb, err := json.MarshalIndent(metrics, \"\", \"  \")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Fprintln(w, string(b))\n\t\tcase \"yaml\":\n\t\t\tb, err := yaml.Marshal(metrics)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Fprintln(w, string(b))\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Wrong output format. Please use only json|yaml\")\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/kubeless/function/top_test.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage function\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"strings\"\n\t\"testing\"\n\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/util/intstr\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/client/clientset/versioned\"\n\tfFake \"github.com/kubeless/kubeless/pkg/client/clientset/versioned/fake\"\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n)\n\ntype testMetricsHandler struct{}\n\n// handler used for testing purposes only\n// satisfies the MetricsRetriever interface, gets metrics from the test http server (URL to test http server stored in svc.SelfLink field)\nfunc (h *testMetricsHandler) GetRawMetrics(apiClient kubernetes.Interface, namespace, functionName string) ([]byte, error) {\n\tsvc, err := apiClient.CoreV1().Services(namespace).Get(functionName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tb, err := http.Get(svc.SelfLink)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer b.Body.Close()\n\treturn ioutil.ReadAll(b.Body)\n}\n\nfunc topOutput(t *testing.T, client versioned.Interface, apiV1Client kubernetes.Interface, h utils.MetricsRetriever, ns, functionName, output string) string {\n\tvar buf bytes.Buffer\n\n\tif err := doTop(&buf, client, apiV1Client, h, ns, functionName, output); err != nil {\n\t\tt.Fatalf(\"doTop returned error: %v\", err)\n\t}\n\n\treturn buf.String()\n}\n\nfunc TestTop(t *testing.T) {\n\n\t// setup test server to serve the /metrics endpoint\n\tts2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w,\n\t\t\t`# HELP go_gc_duration_seconds A summary of the GC invocation durations.\n\t\t\t# TYPE go_gc_duration_seconds summary\n\t\t\tgo_gc_duration_seconds{quantile=\"0\"} 1.6846e-05\n\t\t\tgo_gc_duration_seconds{quantile=\"0.25\"} 3.9124e-05\n\t\t\tgo_gc_duration_seconds{quantile=\"0.5\"} 0.000147183\n\t\t\tgo_gc_duration_seconds{quantile=\"0.75\"} 0.000958419\n\t\t\tgo_gc_duration_seconds{quantile=\"1\"} 0.00796035\n\t\t\tgo_gc_duration_seconds_sum 2.50781303\n\t\t\tgo_gc_duration_seconds_count 3424\n\t\t\t# HELP go_goroutines Number of goroutines that currently exist.\n\t\t\t# TYPE go_goroutines gauge\n\t\t\tgo_goroutines 7\n\t\t\t# HELP go_info Information about the Go environment.\n\t\t\t# TYPE go_info gauge\n\t\t\tgo_info{version=\"go1.10.2\"} 1\n\t\t\t# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n\t\t\t# TYPE go_memstats_alloc_bytes gauge\n\t\t\tgo_memstats_alloc_bytes 2.28336e+06\n\t\t\t# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n\t\t\t# TYPE go_memstats_alloc_bytes_total counter\n\t\t\tgo_memstats_alloc_bytes_total 9.9682544e+09\n\t\t\t# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.\n\t\t\t# TYPE go_memstats_buck_hash_sys_bytes gauge\n\t\t\tgo_memstats_buck_hash_sys_bytes 1.500081e+06\n\t\t\t# HELP go_memstats_frees_total Total number of frees.\n\t\t\t# TYPE go_memstats_frees_total counter\n\t\t\tgo_memstats_frees_total 1.2698678e+07\n\t\t\t# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.\n\t\t\t# TYPE go_memstats_gc_cpu_fraction gauge\n\t\t\tgo_memstats_gc_cpu_fraction 0.0001214506861340198\n\t\t\t# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.\n\t\t\t# TYPE go_memstats_gc_sys_bytes gauge\n\t\t\tgo_memstats_gc_sys_bytes 405504\n\t\t\t# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.\n\t\t\t# TYPE go_memstats_heap_alloc_bytes gauge\n\t\t\tgo_memstats_heap_alloc_bytes 2.28336e+06\n\t\t\t# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.\n\t\t\t# TYPE go_memstats_heap_idle_bytes gauge\n\t\t\tgo_memstats_heap_idle_bytes 2.6624e+06\n\t\t\t# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.\n\t\t\t# TYPE go_memstats_heap_inuse_bytes gauge\n\t\t\tgo_memstats_heap_inuse_bytes 3.072e+06\n\t\t\t# HELP go_memstats_heap_objects Number of allocated objects.\n\t\t\t# TYPE go_memstats_heap_objects gauge\n\t\t\tgo_memstats_heap_objects 6280\n\t\t\t# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.\n\t\t\t# TYPE go_memstats_heap_released_bytes gauge\n\t\t\tgo_memstats_heap_released_bytes 0\n\t\t\t# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.\n\t\t\t# TYPE go_memstats_heap_sys_bytes gauge\n\t\t\tgo_memstats_heap_sys_bytes 5.7344e+06\n\t\t\t# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.\n\t\t\t# TYPE go_memstats_last_gc_time_seconds gauge\n\t\t\tgo_memstats_last_gc_time_seconds 1.528573398809276e+09\n\t\t\t# HELP go_memstats_lookups_total Total number of pointer lookups.\n\t\t\t# TYPE go_memstats_lookups_total counter\n\t\t\tgo_memstats_lookups_total 88701\n\t\t\t# HELP go_memstats_mallocs_total Total number of mallocs.\n\t\t\t# TYPE go_memstats_mallocs_total counter\n\t\t\tgo_memstats_mallocs_total 1.2704958e+07\n\t\t\t# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.\n\t\t\t# TYPE go_memstats_mcache_inuse_bytes gauge\n\t\t\tgo_memstats_mcache_inuse_bytes 3472\n\t\t\t# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.\n\t\t\t# TYPE go_memstats_mcache_sys_bytes gauge\n\t\t\tgo_memstats_mcache_sys_bytes 16384\n\t\t\t# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.\n\t\t\t# TYPE go_memstats_mspan_inuse_bytes gauge\n\t\t\tgo_memstats_mspan_inuse_bytes 25688\n\t\t\t# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.\n\t\t\t# TYPE go_memstats_mspan_sys_bytes gauge\n\t\t\tgo_memstats_mspan_sys_bytes 32768\n\t\t\t# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.\n\t\t\t# TYPE go_memstats_next_gc_bytes gauge\n\t\t\tgo_memstats_next_gc_bytes 4.194304e+06\n\t\t\t# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.\n\t\t\t# TYPE go_memstats_other_sys_bytes gauge\n\t\t\tgo_memstats_other_sys_bytes 738631\n\t\t\t# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.\n\t\t\t# TYPE go_memstats_stack_inuse_bytes gauge\n\t\t\tgo_memstats_stack_inuse_bytes 557056\n\t\t\t# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.\n\t\t\t# TYPE go_memstats_stack_sys_bytes gauge\n\t\t\tgo_memstats_stack_sys_bytes 557056\n\t\t\t# HELP go_memstats_sys_bytes Number of bytes obtained from system.\n\t\t\t# TYPE go_memstats_sys_bytes gauge\n\t\t\tgo_memstats_sys_bytes 8.984824e+06\n\t\t\t# HELP go_threads Number of OS threads created.\n\t\t\t# TYPE go_threads gauge\n\t\t\tgo_threads 10\n\t\t\t# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.\n\t\t\t# TYPE process_cpu_seconds_total counter\n\t\t\tprocess_cpu_seconds_total 25.88\n\t\t\t# HELP process_max_fds Maximum number of open file descriptors.\n\t\t\t# TYPE process_max_fds gauge\n\t\t\tprocess_max_fds 1.048576e+06\n\t\t\t# HELP process_open_fds Number of open file descriptors.\n\t\t\t# TYPE process_open_fds gauge\n\t\t\tprocess_open_fds 8\n\t\t\t# HELP process_resident_memory_bytes Resident memory size in bytes.\n\t\t\t# TYPE process_resident_memory_bytes gauge\n\t\t\tprocess_resident_memory_bytes 1.3942784e+07\n\t\t\t# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n\t\t\t# TYPE process_start_time_seconds gauge\n\t\t\tprocess_start_time_seconds 1.52853941225e+09\n\t\t\t# HELP process_virtual_memory_bytes Virtual memory size in bytes.\n\t\t\t# TYPE process_virtual_memory_bytes gauge\n\t\t\tprocess_virtual_memory_bytes 1.57294592e+08\n\t\t\t# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.\n\t\t\t# TYPE promhttp_metric_handler_requests_in_flight gauge\n\t\t\tpromhttp_metric_handler_requests_in_flight 1\n\t\t\t# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.\n\t\t\t# TYPE promhttp_metric_handler_requests_total counter\n\t\t\tpromhttp_metric_handler_requests_total{code=\"200\"} 10798\n\t\t\tpromhttp_metric_handler_requests_total{code=\"500\"} 0\n\t\t\tpromhttp_metric_handler_requests_total{code=\"503\"} 0\n\n`)\n\t}))\n\tdefer ts2.Close()\n\n\t// setup test server to serve the /metrics endpoint\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tfmt.Fprint(w,\n\t\t\t`# HELP process_virtual_memory_bytes Virtual memory size in bytes.\n\t\t\t# TYPE process_virtual_memory_bytes gauge\n\t\t\tprocess_virtual_memory_bytes 815255552.0\n\t\t\t# HELP process_resident_memory_bytes Resident memory size in bytes.\n\t\t\t# TYPE process_resident_memory_bytes gauge\n\t\t\tprocess_resident_memory_bytes 25001984.0\n\t\t\t# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n\t\t\t# TYPE process_start_time_seconds gauge\n\t\t\tprocess_start_time_seconds 1528507334.03\n\t\t\t# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.\n\t\t\t# TYPE process_cpu_seconds_total counter\n\t\t\tprocess_cpu_seconds_total 54.72\n\t\t\t# HELP process_open_fds Number of open file descriptors.\n\t\t\t# TYPE process_open_fds gauge\n\t\t\tprocess_open_fds 8.0\n\t\t\t# HELP process_max_fds Maximum number of open file descriptors.\n\t\t\t# TYPE process_max_fds gauge\n\t\t\tprocess_max_fds 1048576.0\n\t\t\t# HELP python_info Python platform information\n\t\t\t# TYPE python_info gauge\n\t\t\tpython_info{implementation=\"CPython\",major=\"2\",minor=\"7\",patchlevel=\"9\",version=\"2.7.9\"} 1.0\n\t\t\t# HELP function_failures_total Number of exceptions in user function\n\t\t\t# TYPE function_failures_total counter\n\t\t\tfunction_failures_total{method=\"GET\"} 0.0\n\t\t\tfunction_failures_total{method=\"POST\"} 0.0\n\t\t\t# HELP function_calls_total Number of calls to user function\n\t\t\t# TYPE function_calls_total counter\n\t\t\tfunction_calls_total{method=\"GET\"} 254.0\n\t\t\tfunction_calls_total{method=\"POST\"} 296.0\n\t\t\t# HELP function_duration_seconds Duration of user function in seconds\n\t\t\t# TYPE function_duration_seconds histogram\n\t\t\tfunction_duration_seconds_bucket{le=\"0.005\",method=\"GET\"} 8.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.01\",method=\"GET\"} 191.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.025\",method=\"GET\"} 248.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.05\",method=\"GET\"} 253.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.075\",method=\"GET\"} 253.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.1\",method=\"GET\"} 253.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.25\",method=\"GET\"} 254.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.5\",method=\"GET\"} 254.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.75\",method=\"GET\"} 254.0\n\t\t\tfunction_duration_seconds_bucket{le=\"1.0\",method=\"GET\"} 254.0\n\t\t\tfunction_duration_seconds_bucket{le=\"2.5\",method=\"GET\"} 254.0\n\t\t\tfunction_duration_seconds_bucket{le=\"5.0\",method=\"GET\"} 254.0\n\t\t\tfunction_duration_seconds_bucket{le=\"7.5\",method=\"GET\"} 254.0\n\t\t\tfunction_duration_seconds_bucket{le=\"10.0\",method=\"GET\"} 254.0\n\t\t\tfunction_duration_seconds_bucket{le=\"+Inf\",method=\"GET\"} 254.0\n\t\t\tfunction_duration_seconds_count{method=\"GET\"} 254.0\n\t\t\tfunction_duration_seconds_sum{method=\"GET\"} 2.863368272781372\n\t\t\tfunction_duration_seconds_bucket{le=\"0.005\",method=\"POST\"} 1.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.01\",method=\"POST\"} 157.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.025\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.05\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.075\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.1\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.25\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.5\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_bucket{le=\"0.75\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_bucket{le=\"1.0\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_bucket{le=\"2.5\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_bucket{le=\"5.0\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_bucket{le=\"7.5\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_bucket{le=\"10.0\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_bucket{le=\"+Inf\",method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_count{method=\"POST\"} 296.0\n\t\t\tfunction_duration_seconds_sum{method=\"POST\"} 3.4116291999816895\n\n`)\n\t}))\n\tdefer ts.Close()\n\n\tfunction1Name := \"pyFunc\"\n\tfunction2Name := \"goFunc\"\n\tnamespace := \"myns\"\n\n\tlistObj := kubelessApi.FunctionList{\n\t\tItems: []*kubelessApi.Function{\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      function1Name,\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t},\n\t\t\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\t\t\tHandler:  \"fhandler\",\n\t\t\t\t\tFunction: function1Name,\n\t\t\t\t\tRuntime:  \"pyruntime\",\n\t\t\t\t\tDeps:     \"pydeps\",\n\t\t\t\t\tDeployment: appsv1.Deployment{\n\t\t\t\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\t\t\tContainers: []v1.Container{{}},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      function2Name,\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t},\n\t\t\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\t\t\tHandler:  \"gohandler\",\n\t\t\t\t\tFunction: function2Name,\n\t\t\t\t\tRuntime:  \"goruntime\",\n\t\t\t\t\tDeps:     \"godeps\",\n\t\t\t\t\tDeployment: appsv1.Deployment{\n\t\t\t\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\t\t\tContainers: []v1.Container{{}},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tclient := fFake.NewSimpleClientset(listObj.Items[0], listObj.Items[1])\n\n\tdeploymentPy := appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      function1Name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tStatus: appsv1.DeploymentStatus{\n\t\t\tReplicas:      int32(1),\n\t\t\tReadyReplicas: int32(1),\n\t\t},\n\t}\n\tdeploymentGo := appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      function2Name,\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tStatus: appsv1.DeploymentStatus{\n\t\t\tReplicas:      int32(1),\n\t\t\tReadyReplicas: int32(1),\n\t\t},\n\t}\n\tserviceGo := v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      function2Name,\n\t\t\tNamespace: namespace,\n\t\t\tSelfLink:  ts2.URL,\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName:       \"p1\",\n\t\t\t\t\tPort:       int32(8080),\n\t\t\t\t\tTargetPort: intstr.FromInt(8080),\n\t\t\t\t\tNodePort:   0,\n\t\t\t\t\tProtocol:   v1.ProtocolTCP,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tservicePy := v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      function1Name,\n\t\t\tNamespace: namespace,\n\t\t\tSelfLink:  ts.URL,\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName:       \"p1\",\n\t\t\t\t\tPort:       int32(8080),\n\t\t\t\t\tTargetPort: intstr.FromInt(8080),\n\t\t\t\t\tNodePort:   0,\n\t\t\t\t\tProtocol:   v1.ProtocolTCP,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tapiV1Client := fake.NewSimpleClientset(&deploymentPy, &servicePy, &deploymentGo, &serviceGo)\n\n\thandler := &testMetricsHandler{}\n\n\t// List multiple functions\n\toutput := topOutput(t, client, apiV1Client, handler, namespace, \"\", \"\")\n\tt.Log(\"output is\", output)\n\n\tif !strings.Contains(output, function1Name) || !strings.Contains(output, function2Name) || !strings.Contains(output, namespace) {\n\t\tt.Errorf(\"table output didn't match FUNCTION or NAMESPACE\")\n\t}\n\tif !strings.Contains(output, \"GET\") || !strings.Contains(output, \"POST\") {\n\t\tt.Errorf(\"table output didn't match on METHOD\")\n\t}\n\tif !strings.Contains(output, \"2.86336\") || !strings.Contains(output, \"3.41162\") {\n\t\tt.Errorf(\"table output didn't match on TOTAL_DURATION_SECONDS\")\n\t}\n\t// verify calculated fields\n\tif !strings.Contains(output, \"0.0112731\") || !strings.Contains(output, \"0.0115257\") {\n\t\tt.Errorf(\"table output didn't match on AVG_DURATION_SECONDS\")\n\t}\n\n\t// Get single function\n\toutput = topOutput(t, client, apiV1Client, handler, namespace, function2Name, \"\")\n\tt.Log(\"output is\", output)\n\n\tif strings.Contains(output, function1Name) || !strings.Contains(output, function2Name) || !strings.Contains(output, namespace) {\n\t\tt.Errorf(\"table output didn't match FUNCTION or NAMESPACE\")\n\t}\n\n\t// json output\n\toutput = topOutput(t, client, apiV1Client, handler, namespace, \"\", \"json\")\n\tt.Log(\"output is\", output)\n\n\tif !strings.Contains(output, function1Name) || !strings.Contains(output, function2Name) || !strings.Contains(output, namespace) {\n\t\tt.Errorf(\"table output didn't match FUNCTION or NAMESPACE\")\n\t}\n\tif !strings.Contains(output, \"GET\") || !strings.Contains(output, \"POST\") {\n\t\tt.Errorf(\"table output didn't match on METHOD\")\n\t}\n\tif !strings.Contains(output, \"2.86336\") || !strings.Contains(output, \"3.41162\") {\n\t\tt.Errorf(\"table output didn't match on TOTAL_DURATION_SECONDS\")\n\t}\n\t// verify calculated fields\n\tif !strings.Contains(output, \"0.0112731\") || !strings.Contains(output, \"0.0115257\") {\n\t\tt.Errorf(\"table output didn't match on AVG_DURATION_SECONDS\")\n\t}\n\n\t// yaml output\n\toutput = topOutput(t, client, apiV1Client, handler, namespace, \"\", \"yaml\")\n\tt.Log(\"output is\", output)\n\n\tif !strings.Contains(output, function1Name) || !strings.Contains(output, function2Name) || !strings.Contains(output, namespace) {\n\t\tt.Errorf(\"table output didn't match FUNCTION or NAMESPACE\")\n\t}\n\tif !strings.Contains(output, \"GET\") || !strings.Contains(output, \"POST\") {\n\t\tt.Errorf(\"table output didn't match on METHOD\")\n\t}\n\tif !strings.Contains(output, \"2.86336\") || !strings.Contains(output, \"3.41162\") {\n\t\tt.Errorf(\"table output didn't match on TOTAL_DURATION_SECONDS\")\n\t}\n\t// verify calculated fields\n\tif !strings.Contains(output, \"0.0112731\") || !strings.Contains(output, \"0.0115257\") {\n\t\tt.Errorf(\"table output didn't match on AVG_DURATION_SECONDS\")\n\t}\n\n}\n"
  },
  {
    "path": "cmd/kubeless/function/update.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage function\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/kubeless/kubeless/pkg/langruntime\"\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n)\n\nvar updateCmd = &cobra.Command{\n\tUse:   \"update <function_name> FLAG\",\n\tShort: \"update a function on Kubeless\",\n\tLong:  `update a function on Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcli := utils.GetClientOutOfCluster()\n\t\tapiExtensionsClientset := utils.GetAPIExtensionsClientOutOfCluster()\n\t\tconfig, err := utils.GetKubelessConfig(cli, apiExtensionsClientset)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to read the configmap: %v\", err)\n\t\t}\n\n\t\tvar lr = langruntime.New(config)\n\t\tlr.ReadConfigMap()\n\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - function name\")\n\t\t}\n\t\tfuncName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tvar nsArg string\n\t\tif ns == \"\" {\n\t\t\tns = utils.GetDefaultNamespace()\n\t\t} else {\n\t\t\tnsArg = fmt.Sprintf(\" -n %s\", ns)\n\t\t}\n\n\t\thandler, err := cmd.Flags().GetString(\"handler\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tfile, err := cmd.Flags().GetString(\"from-file\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tsecrets, err := cmd.Flags().GetStringSlice(\"secrets\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tserviceAccount, err := cmd.Flags().GetString(\"service-account\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\truntime, err := cmd.Flags().GetString(\"runtime\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif runtime != \"\" && !lr.IsValidRuntime(runtime) {\n\t\t\tlogrus.Fatalf(\"Invalid runtime: %s. Supported runtimes are: %s\",\n\t\t\t\truntime, strings.Join(lr.GetRuntimes(), \", \"))\n\t\t}\n\n\t\tlabels, err := cmd.Flags().GetStringSlice(\"label\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tenvs, err := cmd.Flags().GetStringSlice(\"env\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\truntimeImage, err := cmd.Flags().GetString(\"runtime-image\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\timagePullPolicy, err := cmd.Flags().GetString(\"image-pull-policy\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif imagePullPolicy != \"IfNotPresent\" && imagePullPolicy != \"Always\" && imagePullPolicy != \"Never\" {\n\t\t\terr := fmt.Errorf(\"image-pull-policy must be {IfNotPresent|Always|Never}\")\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tmem, err := cmd.Flags().GetString(\"memory\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tcpu, err := cmd.Flags().GetString(\"cpu\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\ttimeout, err := cmd.Flags().GetString(\"timeout\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tdeps, err := cmd.Flags().GetString(\"dependencies\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tfuncDeps := \"\"\n\t\tif deps != \"\" {\n\t\t\tcontentType, err := utils.GetContentType(deps)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tfuncDeps, _, err = utils.ParseContent(deps, contentType)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t}\n\t\theadless, err := cmd.Flags().GetBool(\"headless\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tport, err := cmd.Flags().GetInt32(\"port\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif port <= 0 || port > 65535 {\n\t\t\tlogrus.Fatalf(\"Invalid port number %d specified\", port)\n\t\t}\n\t\tservicePort, err := cmd.Flags().GetInt32(\"servicePort\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif servicePort < 0 || servicePort > 65535 {\n\t\t\tlogrus.Fatalf(\"Invalid servicePort number %d specified\", servicePort)\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"output\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tdryrun, err := cmd.Flags().GetBool(\"dryrun\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tnodeSelectors, err := cmd.Flags().GetStringSlice(\"node-selectors\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tpreviousFunction, err := utils.GetFunction(funcName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tf, err := getFunctionDescription(funcName, ns, handler, file, funcDeps, runtime, runtimeImage, mem, cpu, timeout, imagePullPolicy, serviceAccount, port, servicePort, headless, envs, labels, secrets, nodeSelectors, previousFunction)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif dryrun == true {\n\t\t\tif output == \"json\" {\n\t\t\t\tj, err := json.MarshalIndent(f, \"\", \"    \")\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\t\t\t\tfmt.Println(string(j[:]))\n\t\t\t\treturn\n\t\t\t} else if output == \"yaml\" {\n\t\t\t\ty, err := yaml.Marshal(f)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t}\n\t\t\t\tfmt.Println(string(y[:]))\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tlogrus.Infof(\"Output format needs to be yaml or json\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tkubelessClient, err := utils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tlogrus.Infof(\"Redeploying function...\")\n\t\terr = utils.PatchFunctionCustomResource(kubelessClient, f)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tlogrus.Infof(\"Function %s submitted for deployment\", funcName)\n\t\tlogrus.Infof(\"Check the deployment status executing 'kubeless function ls %s%s'\", funcName, nsArg)\n\t},\n}\n\nfunc init() {\n\tupdateCmd.Flags().StringP(\"runtime\", \"r\", \"\", \"Specify runtime\")\n\tupdateCmd.Flags().StringP(\"handler\", \"\", \"\", \"Specify handler\")\n\tupdateCmd.Flags().StringP(\"from-file\", \"f\", \"\", \"Specify code file or a URL to the code file\")\n\tupdateCmd.Flags().StringP(\"memory\", \"\", \"\", \"Request amount of memory for the function\")\n\tupdateCmd.Flags().StringP(\"cpu\", \"\", \"\", \"Request amount of cpu for the function.\")\n\tupdateCmd.Flags().StringSliceP(\"label\", \"l\", []string{}, \"Specify labels of the function\")\n\tupdateCmd.Flags().StringSliceP(\"secrets\", \"\", []string{}, \"Specify Secrets to be mounted to the functions container. For example: --secrets mySecret\")\n\tupdateCmd.Flags().StringSliceP(\"env\", \"e\", []string{}, \"Specify environment variable of the function\")\n\tupdateCmd.Flags().StringSliceP(\"node-selectors\", \"\", []string{}, \"Specify node selectors for the function\")\n\tupdateCmd.Flags().StringP(\"service-account\", \"\", \"\", \"Specify service account for the function. For example: --service-account controller-acct\")\n\tupdateCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n\tupdateCmd.Flags().StringP(\"dependencies\", \"d\", \"\", \"Specify a file containing list of dependencies for the function\")\n\tupdateCmd.Flags().StringP(\"runtime-image\", \"\", \"\", \"Custom runtime image\")\n\tupdateCmd.Flags().StringP(\"image-pull-policy\", \"\", \"Always\", \"Image pull policy\")\n\tupdateCmd.Flags().StringP(\"timeout\", \"\", \"180\", \"Maximum timeout (in seconds) for the function to complete its execution\")\n\tupdateCmd.Flags().Bool(\"headless\", false, \"Deploy http-based function without a single service IP and load balancing support from Kubernetes. See: https://kubernetes.io/docs/concepts/services-networking/service/#headless-services\")\n\tupdateCmd.Flags().Int32(\"port\", 8080, \"Deploy http-based function with a custom port\")\n\tupdateCmd.Flags().Int32(\"servicePort\", 0, \"Deploy http-based function with a custom service port\")\n\tupdateCmd.Flags().Bool(\"dryrun\", false, \"Output JSON manifest of the function without creating it\")\n\tupdateCmd.Flags().StringP(\"output\", \"o\", \"yaml\", \"Output format\")\n}\n"
  },
  {
    "path": "cmd/kubeless/getserverconfig/getServerConfig.go",
    "content": "package getserverconfig\n\nimport (\n\t\"strings\"\n\n\t\"github.com/kubeless/kubeless/pkg/langruntime\"\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n)\n\n// GetServerConfigCmd contains first-class command for displaying the current server config\nvar GetServerConfigCmd = &cobra.Command{\n\tUse:   \"get-server-config\",\n\tShort: \"Print the current configuration of the controller\",\n\tLong:  ``,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcli := utils.GetClientOutOfCluster()\n\t\tapiExtensionsClientset := utils.GetAPIExtensionsClientOutOfCluster()\n\t\tconfig, err := utils.GetKubelessConfig(cli, apiExtensionsClientset)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to read the configmap: %v\", err)\n\t\t}\n\n\t\tvar lr = langruntime.New(config)\n\t\tlr.ReadConfigMap()\n\n\t\tlogrus.Info(\"Current Server Config:\")\n\t\tlogrus.Infof(\"Supported Runtimes are: %s\",\n\t\t\tstrings.Join(lr.GetRuntimes(), \", \"))\n\t},\n}\n"
  },
  {
    "path": "cmd/kubeless/kubeless.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Serverless framework for Kubernetes.\npackage main\n\nimport (\n\t\"os\"\n\n\t\"github.com/kubeless/kubeless/cmd/kubeless/autoscale\"\n\t\"github.com/kubeless/kubeless/cmd/kubeless/completion\"\n\t\"github.com/kubeless/kubeless/cmd/kubeless/function\"\n\t\"github.com/kubeless/kubeless/cmd/kubeless/getserverconfig\"\n\t\"github.com/kubeless/kubeless/cmd/kubeless/topic\"\n\t\"github.com/kubeless/kubeless/cmd/kubeless/trigger\"\n\t\"github.com/kubeless/kubeless/cmd/kubeless/version\"\n\t\"github.com/spf13/cobra\"\n)\n\nvar globalUsage = `` //TODO: add explanation\n\nfunc newRootCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:   \"kubeless\",\n\t\tShort: \"Serverless framework for Kubernetes\",\n\t\tLong:  globalUsage,\n\t}\n\n\tcmd.AddCommand(function.FunctionCmd, topic.TopicCmd, version.VersionCmd, autoscale.AutoscaleCmd, getserverconfig.GetServerConfigCmd, trigger.TriggerCmd, completion.CompletionCmd)\n\treturn cmd\n}\n\nfunc main() {\n\tcmd := newRootCmd()\n\tif err := cmd.Execute(); err != nil {\n\t\tos.Exit(1)\n\t}\n}\n"
  },
  {
    "path": "cmd/kubeless/topic/topic.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage topic\n\nimport (\n\t\"github.com/spf13/cobra\"\n)\n\n// TopicCmd contains first-class command for topic\nvar TopicCmd = &cobra.Command{\n\tUse:   \"topic SUBCOMMAND\",\n\tShort: \"manage message topics in Kubeless\",\n\tLong:  `topic command allows user to list, create, delete topics on Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Help()\n\t},\n}\n\nfunc init() {\n\tcmds := []*cobra.Command{topicCreateCmd, topicDeleteCmd, topicListCmd, topicPublishCmd}\n\n\tfor _, cmd := range cmds {\n\t\tTopicCmd.AddCommand(cmd)\n\t\tcmd.Flags().StringP(\"kafka-namespace\", \"\", \"kubeless\", \"Namespace where kafka-controller is deployed. It will default to 'kubeless'\")\n\t}\n}\n"
  },
  {
    "path": "cmd/kubeless/topic/topicCreate.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage topic\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"k8s.io/api/core/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/rest\"\n)\n\nvar topicCreateCmd = &cobra.Command{\n\tUse:   \"create <topic_name> FLAG\",\n\tShort: \"create a topic to Kubeless\",\n\tLong:  `create a topic to Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - topic name\")\n\t\t}\n\t\tctlNamespace, err := cmd.Flags().GetString(\"kafka-namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\ttopicName := args[0]\n\n\t\tconf, err := utils.BuildOutOfClusterConfig()\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tk8sClientSet := utils.GetClientOutOfCluster()\n\n\t\terr = createTopic(conf, k8sClientSet, ctlNamespace, topicName, cmd.OutOrStdout())\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t},\n}\n\nfunc createTopic(conf *rest.Config, clientset kubernetes.Interface, ctlNamespace, topicName string, out io.Writer) error {\n\tcommand := []string{\n\t\t\"bash\", \"/opt/bitnami/kafka/bin/kafka-topics.sh\",\n\t\t\"--zookeeper\", \"zookeeper.\" + ctlNamespace + \":2181\",\n\t\t\"--replication-factor\", \"1\",\n\t\t\"--partitions\", \"1\",\n\t\t\"--create\",\n\t\t\"--topic\", topicName,\n\t}\n\treturn execCommand(conf, clientset, ctlNamespace, command, out)\n}\n\n// wrapper of kubectl exec\n// execCommand executes a command to kafka pod\nfunc execCommand(conf *rest.Config, k8sClientSet kubernetes.Interface, ctlNamespace string, command []string, out io.Writer) error {\n\tpods, err := utils.GetPodsByLabel(k8sClientSet, ctlNamespace, \"kubeless\", \"kafka\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Can't find the kafka pod: %v\", err)\n\t} else if len(pods.Items) == 0 {\n\t\treturn fmt.Errorf(\"Can't find any kafka pod\")\n\t}\n\n\tcmd := utils.Cmd{\n\t\tStdout: out,\n\t\tStderr: out,\n\t}\n\trt, err := utils.ExecRoundTripper(conf, cmd.RoundTripCallback)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\topts := v1.PodExecOptions{\n\t\tStdin:     false,\n\t\tStdout:    true,\n\t\tStderr:    true,\n\t\tContainer: \"broker\",\n\t\tCommand:   command,\n\t}\n\n\treq, err := utils.Exec(k8sClientSet.Core(), pods.Items[0].Name, ctlNamespace, opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = rt.RoundTrip(req)\n\treturn err\n}\n"
  },
  {
    "path": "cmd/kubeless/topic/topicDelete.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage topic\n\nimport (\n\t\"io\"\n\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/rest\"\n)\n\nvar topicDeleteCmd = &cobra.Command{\n\tUse:   \"delete <topic_name>\",\n\tShort: \"delete a topic from Kubeless\",\n\tLong:  `delete a topic from Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - topic name\")\n\t\t}\n\t\tctlNamespace, err := cmd.Flags().GetString(\"kafka-namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\ttopicName := args[0]\n\n\t\tconf, err := utils.BuildOutOfClusterConfig()\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tk8sClientSet := utils.GetClientOutOfCluster()\n\n\t\terr = deleteTopic(conf, k8sClientSet, ctlNamespace, topicName, cmd.OutOrStdout())\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t},\n}\n\nfunc deleteTopic(conf *rest.Config, clientset kubernetes.Interface, ctlNamespace, topicName string, out io.Writer) error {\n\tcommand := []string{\n\t\t\"bash\", \"/opt/bitnami/kafka/bin/kafka-topics.sh\",\n\t\t\"--zookeeper\", \"zookeeper.\" + ctlNamespace + \":2181\",\n\t\t\"--delete\",\n\t\t\"--topic\", topicName,\n\t}\n\n\treturn execCommand(conf, clientset, ctlNamespace, command, out)\n}\n"
  },
  {
    "path": "cmd/kubeless/topic/topicList.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage topic\n\nimport (\n\t\"io\"\n\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/rest\"\n)\n\nvar topicListCmd = &cobra.Command{\n\tUse:     \"list FLAG\",\n\tAliases: []string{\"ls\"},\n\tShort:   \"list all topics created in Kubeless\",\n\tLong:    `list all topics created in Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tctlNamespace, err := cmd.Flags().GetString(\"kafka-namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tconf, err := utils.BuildOutOfClusterConfig()\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tk8sClientSet := utils.GetClientOutOfCluster()\n\n\t\terr = listTopic(conf, k8sClientSet, ctlNamespace, cmd.OutOrStdout())\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t},\n}\n\nfunc listTopic(conf *rest.Config, clientset kubernetes.Interface, ctlNamespace string, out io.Writer) error {\n\tcommand := []string{\n\t\t\"bash\", \"/opt/bitnami/kafka/bin/kafka-topics.sh\",\n\t\t\"--zookeeper\", \"zookeeper.\" + ctlNamespace + \":2181\",\n\t\t\"--list\",\n\t}\n\treturn execCommand(conf, clientset, ctlNamespace, command, out)\n}\n"
  },
  {
    "path": "cmd/kubeless/topic/topicPublish.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage topic\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"k8s.io/api/core/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/rest\"\n)\n\nvar topicPublishCmd = &cobra.Command{\n\tUse:   \"publish FLAG\",\n\tShort: \"publish message to a topic\",\n\tLong:  `publish message to a topic`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tdata, err := cmd.Flags().GetString(\"data\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\ttopic, err := cmd.Flags().GetString(\"topic\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tctlNamespace, err := cmd.Flags().GetString(\"kafka-namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tconf, err := utils.BuildOutOfClusterConfig()\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tk8sClientSet := utils.GetClientOutOfCluster()\n\n\t\terr = publishTopic(conf, k8sClientSet, ctlNamespace, topic, data, cmd.OutOrStdout())\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t},\n}\n\nfunc publishTopic(conf *rest.Config, clientset kubernetes.Interface, namespace, topic, data string, out io.Writer) error {\n\tcommand := []string{\n\t\t\"bash\", \"/opt/bitnami/kafka/bin/kafka-console-producer.sh\",\n\t\t\"--broker-list\", \"localhost:9092\",\n\t\t\"--topic\", topic,\n\t}\n\n\t// Can't just use `execCommand` since we want to specify stdin\n\t// TODO(gus): refactor better.\n\n\tpods, err := utils.GetPodsByLabel(clientset, namespace, \"kubeless\", \"kafka\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Can't find the kafka pod: %v\", err)\n\t} else if len(pods.Items) == 0 {\n\t\treturn fmt.Errorf(\"Can't find any kafka pod\")\n\t}\n\n\tpRead, pWrite := io.Pipe()\n\tcmd := utils.Cmd{\n\t\tStdin:  pRead,\n\t\tStdout: out,\n\t\tStderr: out,\n\t}\n\trt, err := utils.ExecRoundTripper(conf, cmd.RoundTripCallback)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\tio.WriteString(pWrite, data+\"\\n\")\n\t\tpWrite.Close()\n\t}()\n\n\topts := v1.PodExecOptions{\n\t\tStdin:     true,\n\t\tStdout:    true,\n\t\tStderr:    true,\n\t\tTTY:       true,\n\t\tContainer: \"broker\",\n\t\tCommand:   command,\n\t}\n\n\treq, err := utils.Exec(clientset.Core(), pods.Items[0].Name, namespace, opts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = rt.RoundTrip(req)\n\treturn err\n}\n\nfunc init() {\n\ttopicPublishCmd.Flags().StringP(\"data\", \"\", \"\", \"Specify data for function\")\n\ttopicPublishCmd.Flags().StringP(\"topic\", \"\", \"kubeless\", \"Specify topic name\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/cronjob/create.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cronjob\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/robfig/cron\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\tcronjobApi \"github.com/kubeless/cronjob-trigger/pkg/apis/kubeless/v1beta1\"\n\tcronjobUtils \"github.com/kubeless/cronjob-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar createCmd = &cobra.Command{\n\tUse:   \"create <cronjob_trigger_name> FLAG\",\n\tShort: \"Create a cron job trigger\",\n\tLong:  `Create a cron job trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - cronjob trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tschedule, err := cmd.Flags().GetString(\"schedule\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif _, err := cron.ParseStandard(schedule); err != nil {\n\t\t\tlogrus.Fatalf(\"Invalid value for --schedule. \" + err.Error())\n\t\t}\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tfunctionName, err := cmd.Flags().GetString(\"function\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tdryrun, err := cmd.Flags().GetBool(\"dryrun\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"output\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tpayload, err := cmd.Flags().GetString(\"payload\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tpayloadFromFile, err := cmd.Flags().GetString(\"payload-from-file\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif len(payload) > 0 && len(payloadFromFile) > 0 {\n\t\t\terr := \"You can't provide both raw payload and a payload file\"\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tkubelessClient, err := kubelessUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\tcronJobClient, err := cronjobUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\t_, err = kubelessUtils.GetFunctionCustomResource(kubelessClient, functionName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to find Function %s in namespace %s. Error %s\", functionName, ns, err)\n\t\t}\n\n\t\tparsedPayload, err := parsePayload(payload, payloadFromFile)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to parse the payload of Function %s in namespace %s. Error %s\", functionName, ns, err)\n\t\t}\n\n\t\tcronJobTrigger := cronjobApi.CronJobTrigger{}\n\t\tcronJobTrigger.TypeMeta = metav1.TypeMeta{\n\t\t\tKind:       \"CronJobTrigger\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t}\n\t\tcronJobTrigger.ObjectMeta = metav1.ObjectMeta{\n\t\t\tName:      triggerName,\n\t\t\tNamespace: ns,\n\t\t}\n\t\tcronJobTrigger.ObjectMeta.Labels = map[string]string{\n\t\t\t\"created-by\": \"kubeless\",\n\t\t}\n\t\tcronJobTrigger.Spec.FunctionName = functionName\n\t\tcronJobTrigger.Spec.Schedule = schedule\n\t\tcronJobTrigger.Spec.Payload = parsedPayload\n\n\t\tif dryrun == true {\n\t\t\tres, err := kubelessUtils.DryRunFmt(output, cronJobTrigger)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Println(res)\n\t\t\treturn\n\t\t}\n\n\t\terr = cronjobUtils.CreateCronJobCustomResource(cronJobClient, &cronJobTrigger)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to create cronjob trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"Cronjob trigger %s created in namespace %s successfully!\", triggerName, ns)\n\t},\n}\n\nfunc init() {\n\tcreateCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the cronjob trigger\")\n\tcreateCmd.Flags().StringP(\"schedule\", \"\", \"\", \"Specify schedule in cron format for scheduled function\")\n\tcreateCmd.Flags().StringP(\"function\", \"\", \"\", \"Name of the function to be associated with trigger\")\n\tcreateCmd.MarkFlagRequired(\"function\")\n\tcreateCmd.MarkFlagRequired(\"schedule\")\n\tcreateCmd.Flags().Bool(\"dryrun\", false, \"Output JSON manifest of the function without creating it\")\n\tcreateCmd.Flags().StringP(\"output\", \"o\", \"yaml\", \"Output format\")\n\tcreateCmd.Flags().StringP(\"payload\", \"p\", \"\", \"Specify a stringified JSON data to pass to function upon execution\")\n\tcreateCmd.Flags().StringP(\"payload-from-file\", \"f\", \"\", \"Specify a payload file to use. It must be a JSON file\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/cronjob/cronjob_trigger.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cronjob\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"path/filepath\"\n\n\tkubelessutil \"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/spf13/cobra\"\n)\n\n// CronjobTriggerCmd command for CronJob trigger commands\nvar CronjobTriggerCmd = &cobra.Command{\n\tUse:   \"cronjob SUBCOMMAND\",\n\tShort: \"cronjob trigger specific operations\",\n\tLong:  `cronjob trigger command allows user to create, list, update, delete cronjob triggers running on Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Help()\n\t},\n}\n\nfunc init() {\n\tCronjobTriggerCmd.AddCommand(createCmd)\n\tCronjobTriggerCmd.AddCommand(deleteCmd)\n\tCronjobTriggerCmd.AddCommand(listCmd)\n\tCronjobTriggerCmd.AddCommand(updateCmd)\n}\n\nfunc parsePayload(content string, file string) (interface{}, error) {\n\tif len(file) > 0 {\n\t\tcontent, err := getPayloadRawContent(file)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn parsePayloadContent(content), nil\n\t}\n\n\treturn parsePayloadContent(content), nil\n}\n\nfunc getPayloadRawContent(file string) (string, error) {\n\tcontentType, err := kubelessutil.GetContentType(file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcontent, _, err := kubelessutil.ParseContent(file, contentType)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\text := filepath.Ext(file)\n\tif ext != \".json\" {\n\t\treturn \"\", fmt.Errorf(\"Sorry, we can't parse %s files yet\", ext)\n\t}\n\n\treturn content, nil\n}\n\nfunc parsePayloadContent(raw string) interface{} {\n\tvar payload map[string]interface{}\n\n\terr := json.Unmarshal([]byte(raw), &payload)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Found an error during JSON parsing on your payload: %s\", err)\n\t}\n\n\treturn payload\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/cronjob/delete.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cronjob\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\tcronjobUtils \"github.com/kubeless/cronjob-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n)\n\nvar deleteCmd = &cobra.Command{\n\n\tUse:   \"delete <cronjob_trigger_name>\",\n\tShort: \"delete a cronjob trigger from Kubeless\",\n\tLong:  `delete a cronjob trigger from Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - cronjob trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tkubelessClient, err := cronjobUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\terr = cronjobUtils.DeleteCronJobCustomResource(kubelessClient, triggerName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to delete Cronjob trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"Cronjob trigger %s deleted from namespace %s successfully!\", triggerName, ns)\n\t},\n}\n\nfunc init() {\n\tdeleteCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace of the Cronjob trigger\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/cronjob/list.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cronjob\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/gosuri/uitable\"\n\t\"github.com/kubeless/cronjob-trigger/pkg/client/clientset/versioned\"\n\tcronjobUtils \"github.com/kubeless/cronjob-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar listCmd = &cobra.Command{\n\tUse:     \"list FLAG\",\n\tAliases: []string{\"ls\"},\n\tShort:   \"list all Cronjob triggers deployed to Kubeless\",\n\tLong:    `list all Cronjob triggers deployed to Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tkubelessClient, err := cronjobUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\tif err := doList(cmd.OutOrStdout(), kubelessClient, ns); err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t},\n}\n\nfunc init() {\n\tlistCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n}\n\nfunc doList(w io.Writer, kubelessClient versioned.Interface, ns string) error {\n\ttriggersList, err := kubelessClient.KubelessV1beta1().CronJobTriggers(ns).List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\ttable := uitable.New()\n\ttable.MaxColWidth = 50\n\ttable.Wrap = true\n\ttable.AddRow(\"NAME\", \"NAMESPACE\", \"SCHEDULE\", \"FUNCTION NAME\")\n\tfor _, trigger := range triggersList.Items {\n\t\ttable.AddRow(trigger.Name, trigger.Namespace, trigger.Spec.Schedule, trigger.Spec.FunctionName)\n\t}\n\tfmt.Fprintln(w, table)\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/cronjob/update.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage cronjob\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/robfig/cron\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\tcronjobUtils \"github.com/kubeless/cronjob-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n)\n\nvar updateCmd = &cobra.Command{\n\tUse:   \"update <cronjob_trigger_name> FLAG\",\n\tShort: \"Update a cron job trigger\",\n\tLong:  `Update a cron job trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - cronjob trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tschedule, err := cmd.Flags().GetString(\"schedule\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif schedule != \"\" {\n\t\t\tif _, err := cron.ParseStandard(schedule); err != nil {\n\t\t\t\tlogrus.Fatalf(\"Invalid value for --schedule. \" + err.Error())\n\t\t\t}\n\t\t}\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tfunctionName, err := cmd.Flags().GetString(\"function\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tdryrun, err := cmd.Flags().GetBool(\"dryrun\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"output\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tpayload, err := cmd.Flags().GetString(\"payload\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tpayloadFromFile, err := cmd.Flags().GetString(\"payload-from-file\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif len(payload) > 0 && len(payloadFromFile) > 0 {\n\t\t\terr := \"You can't provide both raw payload and a payload file\"\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tkubelessClient, err := kubelessUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\tcronJobClient, err := cronjobUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\t_, err = kubelessUtils.GetFunctionCustomResource(kubelessClient, functionName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to find Function %s in namespace %s. Error %s\", triggerName, ns, err)\n\t\t}\n\n\t\tparsedPayload, err := parsePayload(payload, payloadFromFile)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to parse the payload of Function %s in namespace %s. Error %s\", functionName, ns, err)\n\t\t}\n\n\t\tcronJobTrigger, err := cronjobUtils.GetCronJobCustomResource(cronJobClient, triggerName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to find Cronjob trigger %s in namespace %s. Error %s\", triggerName, ns, err)\n\t\t}\n\t\tcronJobTrigger.Spec.FunctionName = functionName\n\t\tcronJobTrigger.Spec.Schedule = schedule\n\t\tcronJobTrigger.Spec.Payload = parsedPayload\n\n\t\tif dryrun == true {\n\t\t\tres, err := kubelessUtils.DryRunFmt(output, cronJobTrigger)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Println(res)\n\t\t\treturn\n\t\t}\n\n\t\terr = cronjobUtils.UpdateCronJobCustomResource(cronJobClient, cronJobTrigger)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to update cronjob trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"Cronjob trigger %s updated in namespace %s successfully!\", triggerName, ns)\n\t},\n}\n\nfunc init() {\n\tupdateCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace of the cronjob trigger\")\n\tupdateCmd.Flags().StringP(\"schedule\", \"\", \"\", \"Specify schedule in cron format for scheduled function\")\n\tupdateCmd.Flags().StringP(\"function\", \"\", \"\", \"Name of the function to be associated with trigger\")\n\tupdateCmd.Flags().Bool(\"dryrun\", false, \"Output JSON manifest of the function without creating it\")\n\tupdateCmd.Flags().StringP(\"output\", \"o\", \"yaml\", \"Output format\")\n\tupdateCmd.Flags().StringP(\"payload\", \"p\", \"\", \"Specify a stringified JSON data to pass to function upon execution\")\n\tupdateCmd.Flags().StringP(\"payload-from-file\", \"f\", \"\", \"Specify a payload file to use. It must be a JSON file\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/http/create.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage http\n\nimport (\n\t\"fmt\"\n\thttpApi \"github.com/kubeless/http-trigger/pkg/apis/kubeless/v1beta1\"\n\thttpUtils \"github.com/kubeless/http-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar createCmd = &cobra.Command{\n\tUse:   \"create <http_trigger_name> FLAG\",\n\tShort: \"Create a http trigger\",\n\tLong:  `Create a http trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - http trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tpath, err := cmd.Flags().GetString(\"path\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tfunctionName, err := cmd.Flags().GetString(\"function-name\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tdryrun, err := cmd.Flags().GetBool(\"dryrun\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"output\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tkubelessClient, err := kubelessUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\t_, err = kubelessUtils.GetFunctionCustomResource(kubelessClient, functionName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to find Function %s in namespace %s. Error %s\", functionName, ns, err)\n\t\t}\n\n\t\thttpTrigger := httpApi.HTTPTrigger{}\n\t\thttpTrigger.TypeMeta = metav1.TypeMeta{\n\t\t\tKind:       \"HTTPTrigger\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t}\n\t\thttpTrigger.ObjectMeta = metav1.ObjectMeta{\n\t\t\tName:      triggerName,\n\t\t\tNamespace: ns,\n\t\t}\n\t\thttpTrigger.ObjectMeta.Labels = map[string]string{\n\t\t\t\"created-by\": \"kubeless\",\n\t\t}\n\t\thttpTrigger.Spec.FunctionName = functionName\n\n\t\tif len(path) != 0 {\n\t\t\thttpTrigger.Spec.Path = path\n\t\t}\n\n\t\tenableTLSAcme, err := cmd.Flags().GetBool(\"enableTLSAcme\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\thttpTrigger.Spec.TLSAcme = enableTLSAcme\n\n\t\tcorsEnabled, err := cmd.Flags().GetBool(\"cors-enable\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\thttpTrigger.Spec.CorsEnable = corsEnabled\n\n\t\ttlsSecret, err := cmd.Flags().GetString(\"tls-secret\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif enableTLSAcme && len(tlsSecret) > 0 {\n\t\t\tlogrus.Fatalf(\"Cannot specify both --enableTLSAcme and --tls-secret\")\n\t\t}\n\t\thttpTrigger.Spec.TLSSecret = tlsSecret\n\n\t\tgateway, err := cmd.Flags().GetString(\"gateway\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif gateway != \"nginx\" && gateway != \"traefik\" && gateway != \"kong\" {\n\t\t\tlogrus.Fatalf(\"Unsupported gateway %s\", gateway)\n\t\t}\n\t\thttpTrigger.Spec.Gateway = gateway\n\n\t\thostName, err := cmd.Flags().GetString(\"hostname\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif hostName == \"\" && gateway == \"nginx\" {\n\t\t\t// We assume that Nginx will be listening in the port 80\n\t\t\t// of the cluster plublic IP\n\t\t\tconfig, err := kubelessUtils.BuildOutOfClusterConfig()\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\thostName, err = httpUtils.GetLocalHostname(config, functionName)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t}\n\t\tif hostName == \"\" {\n\t\t\tlogrus.Fatalf(\"The --hostname flag is required\")\n\t\t}\n\t\thttpTrigger.Spec.HostName = hostName\n\n\t\tbasicAuthSecret, err := cmd.Flags().GetString(\"basic-auth-secret\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\thttpTrigger.Spec.BasicAuthSecret = basicAuthSecret\n\n\t\tif dryrun == true {\n\t\t\tres, err := kubelessUtils.DryRunFmt(output, httpTrigger)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Println(res)\n\t\t\treturn\n\t\t}\n\n\t\thttpClient, err := httpUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\terr = httpUtils.CreateHTTPTriggerCustomResource(httpClient, &httpTrigger)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to deploy HTTP trigger %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"HTTP trigger %s created in namespace %s successfully!\", triggerName, ns)\n\t},\n}\n\nfunc init() {\n\tcreateCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the HTTP trigger\")\n\tcreateCmd.Flags().StringP(\"function-name\", \"\", \"\", \"Name of the function to be associated with trigger\")\n\tcreateCmd.Flags().StringP(\"path\", \"\", \"\", \"Ingress path for the function\")\n\tcreateCmd.Flags().StringP(\"hostname\", \"\", \"\", \"Specify a valid hostname for the function\")\n\tcreateCmd.Flags().BoolP(\"enableTLSAcme\", \"\", false, \"If true, routing rule will be configured for use with kube-lego\")\n\tcreateCmd.Flags().StringP(\"gateway\", \"\", \"nginx\", \"Specify a valid gateway for the Ingress. Supported: nginx, traefik, kong\")\n\tcreateCmd.Flags().StringP(\"basic-auth-secret\", \"\", \"\", \"Specify an existing secret name for basic authentication\")\n\tcreateCmd.Flags().StringP(\"tls-secret\", \"\", \"\", \"Specify an existing secret that contains a TLS private key and certificate to secure ingress\")\n\tcreateCmd.Flags().Bool(\"dryrun\", false, \"Output JSON manifest of the function without creating it\")\n\tcreateCmd.Flags().StringP(\"output\", \"o\", \"yaml\", \"Output format\")\n\tcreateCmd.Flags().BoolP(\"cors-enable\", \"\", false, \"If true then cors will be enabled on Http Trigger\")\n\tcreateCmd.MarkFlagRequired(\"function-name\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/http/delete.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage http\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\thttpUtils \"github.com/kubeless/http-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n)\n\nvar deleteCmd = &cobra.Command{\n\n\tUse:   \"delete <http_trigger_name>\",\n\tShort: \"Delete a HTTP trigger\",\n\tLong:  `Delete a HTTP trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - Kafka trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\thttpClient, err := httpUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\terr = httpUtils.DeleteHTTPTriggerCustomResource(httpClient, triggerName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to delete HTTP trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"HTTP trigger %s deleted from namespace %s successfully!\", triggerName, ns)\n\t},\n}\n\nfunc init() {\n\tdeleteCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/http/http_trigger.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage http\n\nimport (\n\t\"github.com/spf13/cobra\"\n)\n\n// HTTPTriggerCmd command for http trigger commands\nvar HTTPTriggerCmd = &cobra.Command{\n\tUse:   \"http SUBCOMMAND\",\n\tShort: \"http trigger specific operations\",\n\tLong:  `http trigger command allows user to create, list, update, delete http triggers running on Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Help()\n\t},\n}\n\nfunc init() {\n\tHTTPTriggerCmd.AddCommand(createCmd)\n\tHTTPTriggerCmd.AddCommand(deleteCmd)\n\tHTTPTriggerCmd.AddCommand(listCmd)\n\tHTTPTriggerCmd.AddCommand(updateCmd)\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/http/list.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage http\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/gosuri/uitable\"\n\t\"github.com/kubeless/http-trigger/pkg/client/clientset/versioned\"\n\thttpUtils \"github.com/kubeless/http-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar listCmd = &cobra.Command{\n\tUse:     \"list FLAG\",\n\tAliases: []string{\"ls\"},\n\tShort:   \"list all HTTP triggers deployed to Kubeless\",\n\tLong:    `list all HTTP triggers deployed to Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\thttpClient, err := httpUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\tif err := doList(cmd.OutOrStdout(), httpClient, ns); err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t},\n}\n\nfunc init() {\n\tlistCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n}\n\nfunc doList(w io.Writer, kubelessClient versioned.Interface, ns string) error {\n\ttriggersList, err := kubelessClient.KubelessV1beta1().HTTPTriggers(ns).List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\ttable := uitable.New()\n\ttable.MaxColWidth = 50\n\ttable.Wrap = true\n\ttable.AddRow(\"NAME\", \"NAMESPACE\", \"FUNCTION NAME\")\n\tfor _, trigger := range triggersList.Items {\n\t\ttable.AddRow(trigger.Name, trigger.Namespace, trigger.Spec.FunctionName)\n\t}\n\tfmt.Fprintln(w, table)\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/http/update.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage http\n\nimport (\n\t\"fmt\"\n\n\thttpUtils \"github.com/kubeless/http-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n)\n\nvar updateCmd = &cobra.Command{\n\tUse:   \"update <http_trigger_name> FLAG\",\n\tShort: \"Update a http trigger\",\n\tLong:  `Update a http trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - http trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tkubelessClient, err := kubelessUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\thttpClient, err := httpUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\thttpTrigger, err := httpUtils.GetHTTPTriggerCustomResource(httpClient, triggerName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to find HTTP trigger %s in namespace %s. Error %s\", triggerName, ns, err)\n\t\t}\n\n\t\tfunctionName, err := cmd.Flags().GetString(\"function-name\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif functionName != \"\" {\n\t\t\t_, err = kubelessUtils.GetFunctionCustomResource(kubelessClient, functionName, ns)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatalf(\"Unable to find Function %s in namespace %s. Error %s\", functionName, ns, err)\n\t\t\t}\n\t\t\thttpTrigger.Spec.FunctionName = functionName\n\t\t}\n\n\t\tpath, err := cmd.Flags().GetString(\"path\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif path != \"\" {\n\t\t\thttpTrigger.Spec.Path = path\n\t\t}\n\n\t\thostName, err := cmd.Flags().GetString(\"hostname\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif hostName != \"\" {\n\t\t\thttpTrigger.Spec.HostName = hostName\n\t\t}\n\t\tenableTLSAcme, err := cmd.Flags().GetBool(\"enableTLSAcme\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\thttpTrigger.Spec.TLSAcme = enableTLSAcme\n\n\t\ttlsSecret, err := cmd.Flags().GetString(\"tls-secret\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif enableTLSAcme && len(tlsSecret) > 0 {\n\t\t\tlogrus.Fatalf(\"Cannot specify both --enableTLSAcme and --tls-secret\")\n\t\t}\n\t\tif tlsSecret != \"\" {\n\t\t\thttpTrigger.Spec.TLSSecret = tlsSecret\n\t\t}\n\n\t\tgateway, err := cmd.Flags().GetString(\"gateway\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif gateway != \"\" {\n\t\t\thttpTrigger.Spec.Gateway = gateway\n\t\t}\n\n\t\tbasicAuthSecret, err := cmd.Flags().GetString(\"basic-auth-secret\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif basicAuthSecret != \"\" {\n\t\t\thttpTrigger.Spec.BasicAuthSecret = basicAuthSecret\n\t\t}\n\n\t\tdryrun, err := cmd.Flags().GetBool(\"dryrun\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"output\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif dryrun == true {\n\t\t\tres, err := kubelessUtils.DryRunFmt(output, httpTrigger)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Println(res)\n\t\t\treturn\n\t\t}\n\n\t\terr = httpUtils.UpdateHTTPTriggerCustomResource(httpClient, httpTrigger)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to deploy HTTP trigger %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"HTTP trigger %s updated in namespace %s successfully!\", triggerName, ns)\n\t},\n}\n\nfunc init() {\n\tupdateCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the HTTP trigger\")\n\tupdateCmd.Flags().StringP(\"function-name\", \"\", \"\", \"Name of the function to be associated with trigger\")\n\tupdateCmd.Flags().StringP(\"path\", \"\", \"\", \"Ingress path for the function\")\n\tupdateCmd.Flags().StringP(\"hostname\", \"\", \"\", \"Specify a valid hostname for the function\")\n\tupdateCmd.Flags().BoolP(\"enableTLSAcme\", \"\", false, \"If true, routing rule will be configured for use with kube-lego\")\n\tupdateCmd.Flags().StringP(\"gateway\", \"\", \"\", \"Specify a valid gateway for the Ingress\")\n\tupdateCmd.Flags().StringP(\"basic-auth-secret\", \"\", \"\", \"Specify an existing secret name for basic authentication\")\n\tupdateCmd.Flags().StringP(\"tls-secret\", \"\", \"\", \"Specify an existing secret that contains a TLS private key and certificate to secure ingress\")\n\tupdateCmd.Flags().Bool(\"dryrun\", false, \"Output JSON manifest of the function without creating it\")\n\tupdateCmd.Flags().StringP(\"output\", \"o\", \"yaml\", \"Output format\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/kafka/create.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kafka\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\tkafkaApi \"github.com/kubeless/kafka-trigger/pkg/apis/kubeless/v1beta1\"\n\tkafkaUtils \"github.com/kubeless/kafka-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar createCmd = &cobra.Command{\n\n\tUse:   \"create <kafka_trigger_name> FLAG\",\n\tShort: \"Create a Kafka trigger\",\n\tLong:  `Create a Kafka trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - kafka trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\ttopic, err := cmd.Flags().GetString(\"trigger-topic\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tfunctionSelector, err := cmd.Flags().GetString(\"function-selector\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tdryrun, err := cmd.Flags().GetBool(\"dryrun\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"output\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tlabelSelector, err := metav1.ParseToLabelSelector(functionSelector)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(\"Invalid lable selector specified \" + err.Error())\n\t\t}\n\n\t\tkafkaTrigger := kafkaApi.KafkaTrigger{}\n\t\tkafkaTrigger.TypeMeta = metav1.TypeMeta{\n\t\t\tKind:       \"KafkaTrigger\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t}\n\t\tkafkaTrigger.ObjectMeta = metav1.ObjectMeta{\n\t\t\tName:      triggerName,\n\t\t\tNamespace: ns,\n\t\t}\n\t\tkafkaTrigger.ObjectMeta.Labels = map[string]string{\n\t\t\t\"created-by\": \"kubeless\",\n\t\t}\n\t\tkafkaTrigger.Spec.FunctionSelector.MatchLabels = labelSelector.MatchLabels\n\t\tkafkaTrigger.Spec.Topic = topic\n\n\t\tif dryrun == true {\n\t\t\tres, err := kubelessUtils.DryRunFmt(output, kafkaTrigger)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Println(res)\n\t\t\treturn\n\t\t}\n\n\t\tkafkaClient, err := kafkaUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\t\terr = kafkaUtils.CreateKafkaTriggerCustomResource(kafkaClient, &kafkaTrigger)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to create Kafka trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"Kafka trigger %s created in namespace %s successfully!\", triggerName, ns)\n\n\t},\n}\n\nfunc init() {\n\tcreateCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the kafka trigger\")\n\tcreateCmd.Flags().StringP(\"trigger-topic\", \"\", \"\", \"Specify topic to listen to in Kafka broker\")\n\tcreateCmd.Flags().StringP(\"function-selector\", \"\", \"\", \"Selector (label query) to select function on (e.g. --function-selector key1=value1,key2=value2)\")\n\tcreateCmd.MarkFlagRequired(\"trigger-topic\")\n\tcreateCmd.MarkFlagRequired(\"function-selector\")\n\tcreateCmd.Flags().Bool(\"dryrun\", false, \"Output JSON manifest of the function without creating it\")\n\tcreateCmd.Flags().StringP(\"output\", \"o\", \"yaml\", \"Output format\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/kafka/delete.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kafka\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\tkafkaUtils \"github.com/kubeless/kafka-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n)\n\nvar deleteCmd = &cobra.Command{\n\n\tUse:   \"delete <kafka_trigger_name>\",\n\tShort: \"Delete a Kafka trigger\",\n\tLong:  `Delete a Kafka trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - Kafka trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tkafkaClient, err := kafkaUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\t\terr = kafkaUtils.DeleteKafkaTriggerCustomResource(kafkaClient, triggerName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to delete Kafka trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"Kafka trigger %s deleted from namespace %s successfully!\", triggerName, ns)\n\t},\n}\n\nfunc init() {\n\tdeleteCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace of the Kafka trigger\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/kafka/kafka_trigger.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kafka\n\nimport (\n\t\"github.com/spf13/cobra\"\n)\n\n// KafkaTriggerCmd command for Kafka trigger commands\nvar KafkaTriggerCmd = &cobra.Command{\n\tUse:   \"kafka SUBCOMMAND\",\n\tShort: \"kafka trigger specific operations\",\n\tLong:  `kafka trigger command allows user to create, list, update, delete kafka triggers running on Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Help()\n\t},\n}\n\nfunc init() {\n\tKafkaTriggerCmd.AddCommand(createCmd)\n\tKafkaTriggerCmd.AddCommand(deleteCmd)\n\tKafkaTriggerCmd.AddCommand(listCmd)\n\tKafkaTriggerCmd.AddCommand(updateCmd)\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/kafka/list.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kafka\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/gosuri/uitable\"\n\t\"github.com/kubeless/kafka-trigger/pkg/client/clientset/versioned\"\n\tkafkaUtils \"github.com/kubeless/kafka-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar listCmd = &cobra.Command{\n\n\tUse:     \"list FLAG\",\n\tAliases: []string{\"ls\"},\n\tShort:   \"list all Kafka triggers deployed to Kubeless\",\n\tLong:    `list all Kafka triggers deployed to Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tkafkaClient, err := kafkaUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\tif err := doList(cmd.OutOrStdout(), kafkaClient, ns); err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t},\n}\n\nfunc init() {\n\tlistCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n}\n\nfunc doList(w io.Writer, kubelessClient versioned.Interface, ns string) error {\n\ttriggersList, err := kubelessClient.KubelessV1beta1().KafkaTriggers(ns).List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\ttable := uitable.New()\n\ttable.MaxColWidth = 50\n\ttable.Wrap = true\n\ttable.AddRow(\"NAME\", \"NAMESPACE\", \"TOPIC\", \"FUNCTION SELECTOR\")\n\tfor _, trigger := range triggersList.Items {\n\t\ttable.AddRow(trigger.Name, trigger.Namespace, trigger.Spec.Topic, metav1.FormatLabelSelector(&trigger.Spec.FunctionSelector))\n\t}\n\tfmt.Fprintln(w, table)\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/kafka/update.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kafka\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\tkafkaUtils \"github.com/kubeless/kafka-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar updateCmd = &cobra.Command{\n\tUse:   \"update <kafka_trigger_name> FLAG\",\n\tShort: \"Update a Kafka trigger\",\n\tLong:  `Update a Kafka trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - kafka trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tkafkaClient, err := kafkaUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\tkafkaTrigger, err := kafkaUtils.GetKafkaTriggerCustomResource(kafkaClient, triggerName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to find Kafka trigger %s in namespace %s. Error %s\", triggerName, ns, err)\n\t\t}\n\n\t\ttopic, err := cmd.Flags().GetString(\"trigger-topic\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif topic != \"\" {\n\t\t\tkafkaTrigger.Spec.Topic = topic\n\t\t}\n\n\t\tfunctionSelector, err := cmd.Flags().GetString(\"function-selector\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tdryrun, err := cmd.Flags().GetBool(\"dryrun\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"output\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif functionSelector != \"\" {\n\t\t\tlabelSelector, err := metav1.ParseToLabelSelector(functionSelector)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(\"Invalid lable selector specified \" + err.Error())\n\t\t\t}\n\t\t\tkafkaTrigger.Spec.FunctionSelector.MatchLabels = labelSelector.MatchLabels\n\t\t}\n\n\t\tif dryrun == true {\n\t\t\tres, err := kubelessUtils.DryRunFmt(output, kafkaTrigger)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Println(res)\n\t\t\treturn\n\t\t}\n\n\t\terr = kafkaUtils.UpdateKafkaTriggerCustomResource(kafkaClient, kafkaTrigger)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to update Kafka trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"Kafka trigger %s updated in namespace %s successfully!\", triggerName, ns)\n\t},\n}\n\nfunc init() {\n\tupdateCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the function\")\n\tupdateCmd.Flags().StringP(\"trigger-topic\", \"\", \"\", \"Specify topic to listen to in Kafka broker\")\n\tupdateCmd.Flags().StringP(\"function-selector\", \"\", \"\", \"Selector (label query) to select function on (e.g. --function-selector key1=value1,key2=value2)\")\n\tupdateCmd.Flags().Bool(\"dryrun\", false, \"Output JSON manifest of the function without creating it\")\n\tupdateCmd.Flags().StringP(\"output\", \"o\", \"yaml\", \"Output format\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/kinesis/create.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kinesis\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"net/url\"\n\n\tkinesisApi \"github.com/kubeless/kinesis-trigger/pkg/apis/kubeless/v1beta1\"\n\tkinesisUtils \"github.com/kubeless/kinesis-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar createCmd = &cobra.Command{\n\n\tUse:   \"create <kinesis_trigger_name> FLAG\",\n\tShort: \"Create a Kinesis trigger\",\n\tLong:  `Create a Kinesis trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - Kinesis trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tfunctionName, err := cmd.Flags().GetString(\"function-name\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tkubelessClient, err := kubelessUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\t_, err = kubelessUtils.GetFunctionCustomResource(kubelessClient, functionName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to find Function %s in namespace %s. Error %s\", functionName, ns, err)\n\t\t}\n\n\t\tstreamName, err := cmd.Flags().GetString(\"stream\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tregionName, err := cmd.Flags().GetString(\"aws-region\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tshardID, err := cmd.Flags().GetString(\"shard-id\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tsecretName, err := cmd.Flags().GetString(\"secret\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tendpointURL, err := cmd.Flags().GetString(\"endpoint\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif len(endpointURL) > 0 {\n\t\t\t_, err = url.ParseRequestURI(endpointURL)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\n\t\tdryrun, err := cmd.Flags().GetBool(\"dryrun\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"output\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tcli := kubelessUtils.GetClientOutOfCluster()\n\t\t_, err = cli.Core().Secrets(ns).Get(secretName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tkinesisTrigger := kinesisApi.KinesisTrigger{}\n\t\tkinesisTrigger.TypeMeta = metav1.TypeMeta{\n\t\t\tKind:       \"KinesisTrigger\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t}\n\t\tkinesisTrigger.ObjectMeta = metav1.ObjectMeta{\n\t\t\tName:      triggerName,\n\t\t\tNamespace: ns,\n\t\t}\n\t\tkinesisTrigger.ObjectMeta.Labels = map[string]string{\n\t\t\t\"created-by\": \"kubeless\",\n\t\t}\n\t\tkinesisTrigger.Spec.FunctionName = functionName\n\t\tkinesisTrigger.Spec.Region = regionName\n\t\tkinesisTrigger.Spec.Stream = streamName\n\t\tkinesisTrigger.Spec.ShardID = shardID\n\t\tkinesisTrigger.Spec.Secret = secretName\n\t\tkinesisTrigger.Spec.Endpoint = endpointURL\n\n\t\tif dryrun == true {\n\t\t\tres, err := kubelessUtils.DryRunFmt(output, kinesisTrigger)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Println(res)\n\t\t\treturn\n\t\t}\n\n\t\tkinesisClient, err := kinesisUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\t\terr = kinesisUtils.CreateKinesisTriggerCustomResource(kinesisClient, &kinesisTrigger)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to create Kinesis trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"Kinesis trigger %s created in namespace %s successfully!\", triggerName, ns)\n\n\t},\n}\n\nfunc init() {\n\tcreateCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the Kinesis trigger\")\n\tcreateCmd.Flags().StringP(\"stream\", \"\", \"\", \"Name of the AWS Kinesis stream\")\n\tcreateCmd.Flags().StringP(\"aws-region\", \"\", \"\", \"AWS region in which stream is available\")\n\tcreateCmd.Flags().StringP(\"shard-id\", \"\", \"\", \"Shard-ID of the AWS kinesis stream\")\n\tcreateCmd.Flags().StringP(\"function-name\", \"\", \"\", \"Name of the Kubeless function to be associated with AWS Kinesis stream\")\n\tcreateCmd.Flags().StringP(\"secret\", \"\", \"\", \"Kubernetes secret that has AWS access key and secret key\")\n\tcreateCmd.Flags().StringP(\"endpoint\", \"\", \"\", \"Override AWS's default service URL with the given URL\")\n\tcreateCmd.MarkFlagRequired(\"stream\")\n\tcreateCmd.MarkFlagRequired(\"aws-region\")\n\tcreateCmd.MarkFlagRequired(\"shard-id\")\n\tcreateCmd.MarkFlagRequired(\"function-name\")\n\tcreateCmd.MarkFlagRequired(\"secret\")\n\tcreateCmd.Flags().Bool(\"dryrun\", false, \"Output JSON manifest of the function without creating it\")\n\tcreateCmd.Flags().StringP(\"output\", \"o\", \"yaml\", \"Output format\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/kinesis/delete.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kinesis\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\tkinesisUtils \"github.com/kubeless/kinesis-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n)\n\nvar deleteCmd = &cobra.Command{\n\n\tUse:   \"delete <kinesis_trigger_name>\",\n\tShort: \"Delete a Kinesis trigger\",\n\tLong:  `Delete a Kinesis trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - Kinesis trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tkinesisClient, err := kinesisUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\terr = kinesisUtils.DeleteKinesisTriggerCustomResource(kinesisClient, triggerName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to delete Kinesis trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"Kinesis trigger %s deleted from namespace %s successfully!\", triggerName, ns)\n\t},\n}\n\nfunc init() {\n\tdeleteCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace of the Kinesis trigger\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/kinesis/kinesis_trigger.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kinesis\n\nimport (\n\t\"github.com/spf13/cobra\"\n)\n\n// KinesisTriggerCmd command for Kinesis trigger commands\nvar KinesisTriggerCmd = &cobra.Command{\n\tUse:   \"kinesis SUBCOMMAND\",\n\tShort: \"kinesis trigger specific operations\",\n\tLong:  `kinesis trigger command allows users to create, list, update, delete Kinesis triggers running on Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Help()\n\t},\n}\n\nfunc init() {\n\tKinesisTriggerCmd.AddCommand(createCmd)\n\tKinesisTriggerCmd.AddCommand(deleteCmd)\n\tKinesisTriggerCmd.AddCommand(listCmd)\n\tKinesisTriggerCmd.AddCommand(updateCmd)\n\tKinesisTriggerCmd.AddCommand(publishCmd)\n\tKinesisTriggerCmd.AddCommand(createStreamCmd)\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/kinesis/list.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kinesis\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/gosuri/uitable\"\n\t\"github.com/kubeless/kinesis-trigger/pkg/client/clientset/versioned\"\n\tkinesisUtils \"github.com/kubeless/kinesis-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar listCmd = &cobra.Command{\n\n\tUse:     \"list FLAG\",\n\tAliases: []string{\"ls\"},\n\tShort:   \"list all Kinesis triggers deployed to Kubeless\",\n\tLong:    `list all Kinesis triggers deployed to Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tkinesisClient, err := kinesisUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\tif err := doList(cmd.OutOrStdout(), kinesisClient, ns); err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t},\n}\n\nfunc init() {\n\tlistCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the NATS trigger\")\n}\n\nfunc doList(w io.Writer, kubelessClient versioned.Interface, ns string) error {\n\ttriggersList, err := kubelessClient.KubelessV1beta1().KinesisTriggers(ns).List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\ttable := uitable.New()\n\ttable.MaxColWidth = 50\n\ttable.Wrap = true\n\ttable.AddRow(\"NAME\", \"NAMESPACE\", \"REGION\", \"STREAM\", \"SHARD\", \"FUNCTION NAME\")\n\tfor _, trigger := range triggersList.Items {\n\t\ttable.AddRow(trigger.Name, trigger.Namespace, trigger.Spec.Region, trigger.Spec.Stream, trigger.Spec.ShardID, trigger.Spec.FunctionName)\n\t}\n\tfmt.Fprintln(w, table)\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/kinesis/publish.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kinesis\n\nimport (\n\t\"net/url\"\n\n\t\"github.com/aws/aws-sdk-go/aws\"\n\t\"github.com/aws/aws-sdk-go/aws/credentials\"\n\t\"github.com/aws/aws-sdk-go/aws/session\"\n\t\"github.com/aws/aws-sdk-go/service/kinesis\"\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar publishCmd = &cobra.Command{\n\tUse:   \"publish FLAG\",\n\tShort: \"publish message to a Kinesis stream\",\n\tLong:  `publish message to a Kinesis stream`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\trecords, err := cmd.Flags().GetStringArray(\"records\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tstreamName, err := cmd.Flags().GetString(\"stream\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tregion, err := cmd.Flags().GetString(\"aws-region\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tkey, err := cmd.Flags().GetString(\"partition-key\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = utils.GetDefaultNamespace()\n\t\t}\n\t\tsecretName, err := cmd.Flags().GetString(\"secret\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tclient := utils.GetClientOutOfCluster()\n\t\tsecret, err := client.Core().Secrets(ns).Get(secretName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Error getting secret: %s necessary to connect to AWS services. Erro: %v\", secretName, err)\n\t\t}\n\t\tif _, ok := secret.Data[\"aws_access_key_id\"]; !ok {\n\t\t\tlogrus.Fatalf(\"Error getting aws_access_key_id from the secret: %s necessary to connect to AWS Kinesis service. Error: %v\", secretName, err)\n\t\t}\n\t\tif _, ok := secret.Data[\"aws_secret_access_key\"]; !ok {\n\t\t\tlogrus.Fatalf(\"Error getting aws_aaws_secret_access_keyccess_key_id from the secret: %s necessary to connect to AWS Kinesis service. Error: %v\", secretName, err)\n\t\t}\n\t\tawsAccessKey := string(secret.Data[\"aws_access_key_id\"][:])\n\t\tawsSecretAccessKey := string(secret.Data[\"aws_secret_access_key\"][:])\n\n\t\tendpointURL, err := cmd.Flags().GetString(\"endpoint\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif len(endpointURL) > 0 {\n\t\t\t_, err = url.ParseRequestURI(endpointURL)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t\tcustomCreds := credentials.NewStaticCredentials(awsAccessKey, awsSecretAccessKey, \"\")\n\t\tvar s *session.Session\n\t\tif len(endpointURL) > 0 {\n\t\t\ts = session.New(&aws.Config{Region: aws.String(region), Endpoint: aws.String(endpointURL), Credentials: customCreds})\n\t\t} else {\n\t\t\ts = session.New(&aws.Config{Region: aws.String(region), Credentials: customCreds})\n\t\t}\n\t\tkc := kinesis.New(s)\n\t\tentries := make([]*kinesis.PutRecordsRequestEntry, len(records))\n\t\tfor i, record := range records {\n\t\t\tentries[i] = &kinesis.PutRecordsRequestEntry{\n\t\t\t\tData:         []byte(record),\n\t\t\t\tPartitionKey: aws.String(key),\n\t\t\t}\n\t\t}\n\t\t_, err = kc.PutRecords(&kinesis.PutRecordsInput{\n\t\t\tRecords:    entries,\n\t\t\tStreamName: aws.String(streamName),\n\t\t})\n\t\tif err != nil {\n\t\t\tpanic(\"Failed to put to record in the stream. Error: \" + err.Error())\n\t\t}\n\t},\n}\n\nfunc init() {\n\tvar records []string\n\tpublishCmd.Flags().StringP(\"stream\", \"\", \"\", \"Name of the AWS Kinesis stream\")\n\tpublishCmd.Flags().StringP(\"aws-region\", \"\", \"\", \"AWS region in which stream is available\")\n\tpublishCmd.Flags().StringP(\"partition-key\", \"\", \"\", \"partiion key to use put message in AWS kinesis stream\")\n\tpublishCmd.Flags().StringArray(\"records\", records, \"Specify list of records to be published to the stream\")\n\tpublishCmd.Flags().StringP(\"endpoint\", \"\", \"\", \"Override AWS's default service URL with the given URL\")\n\tpublishCmd.Flags().StringP(\"secret\", \"\", \"\", \"Kubernetes secret that has AWS access key and secret key\")\n\tpublishCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the Kinesis trigger\")\n\tpublishCmd.MarkFlagRequired(\"stream\")\n\tpublishCmd.MarkFlagRequired(\"aws-region\")\n\tpublishCmd.MarkFlagRequired(\"partition-key\")\n\tpublishCmd.MarkFlagRequired(\"message\")\n\tpublishCmd.MarkFlagRequired(\"aws_access_key_id\")\n\tpublishCmd.MarkFlagRequired(\"aws_secret_access_key\")\n\tpublishCmd.MarkFlagRequired(\"records\")\n\tpublishCmd.MarkFlagRequired(\"secret\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/kinesis/stream_create.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kinesis\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\t\"net/url\"\n\n\t\"github.com/aws/aws-sdk-go/aws\"\n\t\"github.com/aws/aws-sdk-go/aws/credentials\"\n\t\"github.com/aws/aws-sdk-go/aws/session\"\n\t\"github.com/aws/aws-sdk-go/service/kinesis\"\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar createStreamCmd = &cobra.Command{\n\n\tUse:   \"create-stream <kinesis_streamr_name> FLAG\",\n\tShort: \"Create a Kinesis stream\",\n\tLong:  `Create a Kinesis stream. Provide only for convenience/quick testing in Kubeless cli`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tregionName, err := cmd.Flags().GetString(\"aws-region\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tshardCount, err := cmd.Flags().GetInt64(\"shard-count\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tendpointURL, err := cmd.Flags().GetString(\"endpoint\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\t_, err = url.ParseRequestURI(endpointURL)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = utils.GetDefaultNamespace()\n\t\t}\n\t\tsecretName, err := cmd.Flags().GetString(\"secret\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tclient := utils.GetClientOutOfCluster()\n\t\tsecret, err := client.Core().Secrets(ns).Get(secretName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Error getting secret: %s necessary to connect to AWS services. Erro: %v\", secretName, err)\n\t\t}\n\t\tif _, ok := secret.Data[\"aws_access_key_id\"]; !ok {\n\t\t\tlogrus.Fatalf(\"Error getting aws_access_key_id from the secret: %s necessary to connect to AWS Kinesis service. Error: %v\", secretName, err)\n\t\t}\n\t\tif _, ok := secret.Data[\"aws_secret_access_key\"]; !ok {\n\t\t\tlogrus.Fatalf(\"Error getting aws_aaws_secret_access_keyccess_key_id from the secret: %s necessary to connect to AWS Kinesis service. Error: %v\", secretName, err)\n\t\t}\n\t\tawsAccessKey := string(secret.Data[\"aws_access_key_id\"][:])\n\t\tawsSecretAccessKey := string(secret.Data[\"aws_secret_access_key\"][:])\n\n\t\tstreamName, err := cmd.Flags().GetString(\"stream-name\")\n\t\tcustomCreds := credentials.NewStaticCredentials(awsAccessKey, awsSecretAccessKey, \"\")\n\t\tvar s *session.Session\n\t\tif len(endpointURL) > 0 {\n\t\t\ts = session.New(&aws.Config{Region: aws.String(regionName), Endpoint: aws.String(endpointURL), Credentials: customCreds})\n\t\t} else {\n\t\t\ts = session.New(&aws.Config{Region: aws.String(regionName), Credentials: customCreds})\n\t\t}\n\n\t\tkc := kinesis.New(s)\n\t\t_, err = kc.CreateStream(&kinesis.CreateStreamInput{ShardCount: &shardCount, StreamName: &streamName})\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to create Kinesis stream. Error: %v\", err)\n\t\t}\n\t},\n}\n\nfunc init() {\n\tcreateStreamCmd.Flags().StringP(\"stream-name\", \"\", \"\", \"A  name to identify the stream.\")\n\tcreateStreamCmd.Flags().StringP(\"aws-region\", \"\", \"\", \"AWS region in which stream is to be created.\")\n\tcreateStreamCmd.Flags().Int64(\"shard-count\", 1, \"The number of shards that the stream will use.\")\n\tcreateStreamCmd.Flags().StringP(\"endpoint\", \"\", \"\", \"Override AWS's default service URL with the given URL\")\n\tcreateStreamCmd.Flags().StringP(\"secret\", \"\", \"\", \"Kubernetes secret that has AWS access key and secret key\")\n\tcreateStreamCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the Kinesis trigger\")\n\tcreateStreamCmd.MarkFlagRequired(\"stream-name\")\n\tcreateStreamCmd.MarkFlagRequired(\"aws-region\")\n\tcreateStreamCmd.MarkFlagRequired(\"aws_access_key_id\")\n\tcreateStreamCmd.MarkFlagRequired(\"aws_secret_access_key\")\n\tcreateStreamCmd.MarkFlagRequired(\"secret\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/kinesis/update.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kinesis\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\tkinesisUtils \"github.com/kubeless/kinesis-trigger/pkg/utils\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n)\n\nvar updateCmd = &cobra.Command{\n\tUse:   \"update <kinesis_trigger_name> FLAG\",\n\tShort: \"Update a Kinesis trigger\",\n\tLong:  `Update a Kinesis trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - Kinesis trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tkubelessClient, err := kubelessUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\t\tkinesisClient, err := kinesisUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\tkinesisTrigger, err := kinesisUtils.GetKinesisTriggerCustomResource(kinesisClient, triggerName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to find Kinesis trigger %s in namespace %s. Error %s\", triggerName, ns, err)\n\t\t}\n\n\t\tstreamName, err := cmd.Flags().GetString(\"stream\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tregionName, err := cmd.Flags().GetString(\"aws-region\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tshardID, err := cmd.Flags().GetString(\"shard-id\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tsecretName, err := cmd.Flags().GetString(\"secret\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tfunctionName, err := cmd.Flags().GetString(\"function-name\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif functionName != \"\" {\n\t\t\t_, err = kubelessUtils.GetFunctionCustomResource(kubelessClient, functionName, ns)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatalf(\"Unable to find Function %s in namespace %s. Error %s\", functionName, ns, err)\n\t\t\t}\n\t\t}\n\n\t\tdryrun, err := cmd.Flags().GetBool(\"dryrun\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"output\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif regionName != \"\" {\n\t\t\tkinesisTrigger.Spec.Region = regionName\n\t\t}\n\t\tif secretName != \"\" {\n\t\t\tkinesisTrigger.Spec.Secret = secretName\n\t\t}\n\t\tif shardID != \"\" {\n\t\t\tkinesisTrigger.Spec.ShardID = shardID\n\t\t}\n\t\tif streamName != \"\" {\n\t\t\tkinesisTrigger.Spec.Stream = streamName\n\t\t}\n\n\t\tif dryrun == true {\n\t\t\tres, err := kubelessUtils.DryRunFmt(output, kinesisTrigger)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Println(res)\n\t\t\treturn\n\t\t}\n\n\t\terr = kinesisUtils.UpdateKinesisTriggerCustomResource(kinesisClient, kinesisTrigger)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to update Kinesis trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"Kinesis trigger %s updated in namespace %s successfully!\", triggerName, ns)\n\t},\n}\n\nfunc init() {\n\tupdateCmd.Flags().StringP(\"stream\", \"\", \"\", \"Name of the AWS Kinesis stream\")\n\tupdateCmd.Flags().StringP(\"aws-region\", \"\", \"\", \"AWS region in which stream is available\")\n\tupdateCmd.Flags().StringP(\"shard-id\", \"\", \"\", \"Shard-ID of the AWS kinesis stream\")\n\tupdateCmd.Flags().StringP(\"function-name\", \"\", \"\", \"Name of the Kubeless function to be associated with AWS Kinesis stream\")\n\tupdateCmd.Flags().StringP(\"secret\", \"\", \"\", \"Kubernetes secret that has AWS access key and secret key\")\n\tupdateCmd.Flags().Bool(\"dryrun\", false, \"Output JSON manifest of the function without creating it\")\n\tupdateCmd.Flags().StringP(\"output\", \"o\", \"yaml\", \"Output format\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/nats/create.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage nats\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\tnatsApi \"github.com/kubeless/nats-trigger/pkg/apis/kubeless/v1beta1\"\n\tnatsUtils \"github.com/kubeless/nats-trigger/pkg/utils\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar createCmd = &cobra.Command{\n\n\tUse:   \"create <nats_trigger_name> FLAG\",\n\tShort: \"Create a NATS trigger\",\n\tLong:  `Create a NATS trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - NATS trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\ttopic, err := cmd.Flags().GetString(\"trigger-topic\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tfunctionSelector, err := cmd.Flags().GetString(\"function-selector\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tdryrun, err := cmd.Flags().GetBool(\"dryrun\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"output\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tlabelSelector, err := metav1.ParseToLabelSelector(functionSelector)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(\"Invalid label selector specified \" + err.Error())\n\t\t}\n\n\t\tnatsClient, err := natsUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\tnatsTrigger := natsApi.NATSTrigger{}\n\t\tnatsTrigger.TypeMeta = metav1.TypeMeta{\n\t\t\tKind:       \"NATSTrigger\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t}\n\t\tnatsTrigger.ObjectMeta = metav1.ObjectMeta{\n\t\t\tName:      triggerName,\n\t\t\tNamespace: ns,\n\t\t}\n\t\tnatsTrigger.ObjectMeta.Labels = map[string]string{\n\t\t\t\"created-by\": \"kubeless\",\n\t\t}\n\t\tnatsTrigger.Spec.FunctionSelector.MatchLabels = labelSelector.MatchLabels\n\t\tnatsTrigger.Spec.Topic = topic\n\n\t\tif dryrun == true {\n\t\t\tres, err := kubelessUtils.DryRunFmt(output, natsTrigger)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Println(res)\n\t\t\treturn\n\t\t}\n\n\t\terr = natsUtils.CreateNatsTriggerCustomResource(natsClient, &natsTrigger)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to create NATS trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"NATS trigger %s created in namespace %s successfully!\", triggerName, ns)\n\n\t},\n}\n\nfunc init() {\n\tcreateCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the NATS trigger\")\n\tcreateCmd.Flags().StringP(\"trigger-topic\", \"\", \"\", \"Specify topic to listen to in NATS\")\n\tcreateCmd.Flags().StringP(\"function-selector\", \"\", \"\", \"Selector (label query) to select function on (e.g. --function-selector key1=value1,key2=value2)\")\n\tcreateCmd.MarkFlagRequired(\"trigger-topic\")\n\tcreateCmd.MarkFlagRequired(\"function-selector\")\n\tcreateCmd.Flags().Bool(\"dryrun\", false, \"Output JSON manifest of the function without creating it\")\n\tcreateCmd.Flags().StringP(\"output\", \"o\", \"yaml\", \"Output format\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/nats/delete.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage nats\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\tnatsUtils \"github.com/kubeless/nats-trigger/pkg/utils\"\n)\n\nvar deleteCmd = &cobra.Command{\n\n\tUse:   \"delete <nats_trigger_name>\",\n\tShort: \"Delete a NATS trigger\",\n\tLong:  `Delete a NATS trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - NATS trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tnatsClient, err := natsUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\terr = natsUtils.DeleteNatsTriggerCustomResource(natsClient, triggerName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to delete NATS trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"NATS trigger %s deleted from namespace %s successfully!\", triggerName, ns)\n\t},\n}\n\nfunc init() {\n\tdeleteCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace of the NATS trigger\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/nats/list.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage nats\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/gosuri/uitable\"\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\t\"github.com/kubeless/nats-trigger/pkg/client/clientset/versioned\"\n\tnatsUtils \"github.com/kubeless/nats-trigger/pkg/utils\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar listCmd = &cobra.Command{\n\n\tUse:     \"list FLAG\",\n\tAliases: []string{\"ls\"},\n\tShort:   \"list all NATS triggers deployed to Kubeless\",\n\tLong:    `list all NATS triggers deployed to Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tnatsClient, err := natsUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\tif err := doList(cmd.OutOrStdout(), natsClient, ns); err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t},\n}\n\nfunc init() {\n\tlistCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the NATS trigger\")\n}\n\nfunc doList(w io.Writer, kubelessClient versioned.Interface, ns string) error {\n\ttriggersList, err := kubelessClient.KubelessV1beta1().NATSTriggers(ns).List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\ttable := uitable.New()\n\ttable.MaxColWidth = 50\n\ttable.Wrap = true\n\ttable.AddRow(\"NAME\", \"NAMESPACE\", \"TOPIC\", \"FUNCTION SELECTOR\")\n\tfor _, trigger := range triggersList.Items {\n\t\ttable.AddRow(trigger.Name, trigger.Namespace, trigger.Spec.Topic, metav1.FormatLabelSelector(&trigger.Spec.FunctionSelector))\n\t}\n\tfmt.Fprintln(w, table)\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/nats/nats_trigger.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage nats\n\nimport (\n\t\"github.com/spf13/cobra\"\n)\n\n// NATSTriggerCmd command for NATS trigger commands\nvar NATSTriggerCmd = &cobra.Command{\n\tUse:   \"nats SUBCOMMAND\",\n\tShort: \"nats trigger specific operations\",\n\tLong:  `nats trigger command allows user to create, list, update, delete NATS triggers running on Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Help()\n\t},\n}\n\nfunc init() {\n\tNATSTriggerCmd.AddCommand(createCmd)\n\tNATSTriggerCmd.AddCommand(deleteCmd)\n\tNATSTriggerCmd.AddCommand(listCmd)\n\tNATSTriggerCmd.AddCommand(updateCmd)\n\tNATSTriggerCmd.AddCommand(publishCmd)\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/nats/publish.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage nats\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\t\"github.com/nats-io/go-nats\"\n)\n\nvar publishCmd = &cobra.Command{\n\tUse:   \"publish FLAG\",\n\tShort: \"publish message to a topic\",\n\tLong:  `publish message to a topic`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tdata, err := cmd.Flags().GetString(\"message\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\ttopic, err := cmd.Flags().GetString(\"topic\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\turl, err := cmd.Flags().GetString(\"url\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\terr = publishTopic(topic, data, url)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(\"Failed to publish message to topic: \", err)\n\t\t}\n\t},\n}\n\nfunc publishTopic(topic, message, url string) error {\n\tnc, err := nats.Connect(url)\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\tdefer nc.Close()\n\tnc.Publish(topic, []byte(message))\n\tnc.Flush()\n\tif err := nc.LastError(); err != nil {\n\t\treturn err\n\t}\n\tlogrus.Infof(\"Published [%s] : '%s'\\n\", topic, message)\n\treturn nil\n}\n\nfunc init() {\n\tpublishCmd.Flags().StringP(\"message\", \"\", \"\", \"Specify message to be published\")\n\tpublishCmd.Flags().StringP(\"topic\", \"\", \"kubeless\", \"Specify topic name\")\n\tpublishCmd.Flags().StringP(\"url\", \"\", \"\", \"Specify NATS server details for e.g nats://localhost:4222)\")\n\tpublishCmd.MarkFlagRequired(\"url\")\n\tpublishCmd.MarkFlagRequired(\"topic\")\n\tpublishCmd.MarkFlagRequired(\"message\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/nats/update.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage nats\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/spf13/cobra\"\n\n\tkubelessUtils \"github.com/kubeless/kubeless/pkg/utils\"\n\tnatsUtils \"github.com/kubeless/nats-trigger/pkg/utils\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nvar updateCmd = &cobra.Command{\n\tUse:   \"update <nats_trigger_name> FLAG\",\n\tShort: \"Update a NATS trigger\",\n\tLong:  `Update a NATS trigger`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlogrus.Fatal(\"Need exactly one argument - NATS trigger name\")\n\t\t}\n\t\ttriggerName := args[0]\n\n\t\tns, err := cmd.Flags().GetString(\"namespace\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\t\tif ns == \"\" {\n\t\t\tns = kubelessUtils.GetDefaultNamespace()\n\t\t}\n\n\t\tnatsClient, err := natsUtils.GetKubelessClientOutCluster()\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Can not create out-of-cluster client: %v\", err)\n\t\t}\n\n\t\tnatsTrigger, err := natsUtils.GetNatsTriggerCustomResource(natsClient, triggerName, ns)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to find NATS trigger %s in namespace %s. Error %s\", triggerName, ns, err)\n\t\t}\n\n\t\ttopic, err := cmd.Flags().GetString(\"trigger-topic\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif topic != \"\" {\n\t\t\tnatsTrigger.Spec.Topic = topic\n\t\t}\n\n\t\tfunctionSelector, err := cmd.Flags().GetString(\"function-selector\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif functionSelector != \"\" {\n\t\t\tlabelSelector, err := metav1.ParseToLabelSelector(functionSelector)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(\"Invalid label selector specified \" + err.Error())\n\t\t\t}\n\t\t\tnatsTrigger.Spec.FunctionSelector.MatchLabels = labelSelector.MatchLabels\n\t\t}\n\n\t\tdryrun, err := cmd.Flags().GetBool(\"dryrun\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\toutput, err := cmd.Flags().GetString(\"output\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err)\n\t\t}\n\n\t\tif dryrun == true {\n\t\t\tres, err := kubelessUtils.DryRunFmt(output, natsTrigger)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Fatal(err)\n\t\t\t}\n\t\t\tfmt.Println(res)\n\t\t\treturn\n\t\t}\n\n\t\terr = natsUtils.UpdateNatsTriggerCustomResource(natsClient, natsTrigger)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Failed to update NATS trigger object %s in namespace %s. Error: %s\", triggerName, ns, err)\n\t\t}\n\t\tlogrus.Infof(\"NATS trigger %s updated in namespace %s successfully!\", triggerName, ns)\n\t},\n}\n\nfunc init() {\n\tupdateCmd.Flags().StringP(\"namespace\", \"n\", \"\", \"Specify namespace for the NATS trigger\")\n\tupdateCmd.Flags().StringP(\"trigger-topic\", \"\", \"\", \"Specify topic to listen to in NATS\")\n\tupdateCmd.Flags().StringP(\"function-selector\", \"\", \"\", \"Selector (label query) to select function on (e.g. --function-selector key1=value1,key2=value2)\")\n\tupdateCmd.Flags().Bool(\"dryrun\", false, \"Output JSON manifest of the function without creating it\")\n\tupdateCmd.Flags().StringP(\"output\", \"o\", \"yaml\", \"Output format\")\n}\n"
  },
  {
    "path": "cmd/kubeless/trigger/trigger.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage trigger\n\nimport (\n\t\"github.com/kubeless/kubeless/cmd/kubeless/trigger/cronjob\"\n\t\"github.com/kubeless/kubeless/cmd/kubeless/trigger/http\"\n\t\"github.com/kubeless/kubeless/cmd/kubeless/trigger/kafka\"\n\t\"github.com/kubeless/kubeless/cmd/kubeless/trigger/kinesis\"\n\t\"github.com/kubeless/kubeless/cmd/kubeless/trigger/nats\"\n\t\"github.com/spf13/cobra\"\n)\n\n// TriggerCmd contains first-class command for trigger\nvar TriggerCmd = &cobra.Command{\n\tUse:   \"trigger SUBCOMMAND\",\n\tShort: \"trigger specific operations\",\n\tLong:  `trigger command allows user to create, list, update, delete triggers running on Kubeless`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Help()\n\t},\n}\n\nfunc init() {\n\tTriggerCmd.AddCommand(cronjob.CronjobTriggerCmd)\n\tTriggerCmd.AddCommand(kafka.KafkaTriggerCmd)\n\tTriggerCmd.AddCommand(http.HTTPTriggerCmd)\n\tTriggerCmd.AddCommand(nats.NATSTriggerCmd)\n\tTriggerCmd.AddCommand(kinesis.KinesisTriggerCmd)\n}\n"
  },
  {
    "path": "cmd/kubeless/version/version.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage version\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/kubeless/kubeless/pkg/version\"\n\t\"github.com/spf13/cobra\"\n)\n\n// VersionCmd contains first-class command for version\nvar VersionCmd = &cobra.Command{\n\tUse:   \"version\",\n\tShort: \"Print the version of Kubeless\",\n\tLong:  ``,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tfmt.Println(\"Kubeless version: \" + version.Version)\n\t},\n}\n"
  },
  {
    "path": "docker/dev-environment/Dockerfile",
    "content": "FROM docker:17.11.0-ce-dind\n\nENV GOPATH=/go\nENV PATH=$GOPATH/bin:/usr/local/go/bin:/usr/local/bats/bin:$PATH \\\n    CGO_ENABLED=0\n\n# Install packages that requires persistence\nRUN set -eux; \\\n    apk add --no-cache \\\n    bash \\\n    git \\\n    make \\\n    sudo \\\n    gcc \\\n    musl-dev \\\n    openssl \\\n    ca-certificates \\\n    zip \\\n    curl \\\n    go && \\\n    # Install kubectl\n    KUBECTL_VERSION=$(wget -qO- https://storage.googleapis.com/kubernetes-release/release/stable.txt) && \\\n    wget \"https://storage.googleapis.com/kubernetes-release/release/$KUBECTL_VERSION/bin/linux/amd64/kubectl\" -O \"/usr/local/bin/kubectl\" && \\\n    chmod +x /usr/local/bin/kubectl && \\\n    # Install gox and golint\n    go get github.com/mitchellh/gox github.com/golang/lint/golint && \\\n    # Install bats\n    git clone --depth 1 https://github.com/sstephenson/bats /usr/local/bats && \\\n    # Install kubecfg\n    wget \"https://github.com/ksonnet/kubecfg/releases/download/v0.5.0/kubecfg-linux-amd64\" -O \"/usr/local/bin/kubecfg\" && chmod +x \"/usr/local/bin/kubecfg\"\n\nWORKDIR $GOPATH\n\nADD ./entry-point.sh /\nENTRYPOINT [ \"/entry-point.sh\" ]\n"
  },
  {
    "path": "docker/dev-environment/entry-point.sh",
    "content": "#!/bin/bash\n\nif [ ! -d \"$GOPATH/src/github.com/kubeless/kubeless\" ]; then\n    echo \"Kubeless directory not found\"\n    exit 1\nfi    \n\nif [ ! -d \"$GOPATH/src/github.com/kubeless/kubeless/ksonnet-lib\" ]; then\n    # Ksonnet-lib is required in the same folder than Kubeless\n    git clone --depth=1 https://github.com/ksonnet/ksonnet-lib.git \"$GOPATH/src/github.com/kubeless/kubeless/ksonnet-lib\"\nfi\nexport KUBECFG_JPATH=\"$GOPATH/src/github.com/kubeless/kubeless/ksonnet-lib\"\n\ndockerd > /dev/null 2>&1 &\n\ncd \"$GOPATH/src/github.com/kubeless/kubeless\"\n\n\"$@\"\n"
  },
  {
    "path": "docker/event-sources/kubernetes/Dockerfile",
    "content": "FROM bitnami/minideb:jessie\n\nRUN install_packages python3 curl ca-certificates git\nRUN curl https://bootstrap.pypa.io/get-pip.py --output get-pip.py\nRUN python3 ./get-pip.py\nRUN pip3 install  --no-cache-dir --upgrade kubernetes\nRUN pip3 install  --no-cache-dir --upgrade requests\n\nRUN git clone --depth 1 https://github.com/dpkp/kafka-python\nWORKDIR kafka-python\nRUN python3 ./setup.py install\n\nWORKDIR /\nADD events.py .\n\nCMD [\"python3\", \"/events.py\"]\n"
  },
  {
    "path": "docker/event-sources/kubernetes/README.md",
    "content": "# Container to feed k8s events to kafka\n\n`events.py` is a Python 3.4 script, that uses `asyncio` and the Kubernetes python client plus a Kafka client to watch for k8s events and send those events onto the kubeless Kafka _k8s_ topic.\n\nThe Dockerfile just builds an image to start this as a deployment in a k8s cluster running kubeless.\n\n## Usage\n\nCreate the `k8s` topic in kubeless:\n\n```\nkubeless topic create k8s\n```\n\nThen launch the event sync\n\n```\nkubectl run event --image=skippbox/k8s-events:0.10.12\n```\n"
  },
  {
    "path": "docker/event-sources/kubernetes/events.py",
    "content": "import asyncio\nimport logging\nimport json\n\nfrom kubernetes import client, config, watch\n\nfrom kafka import KafkaProducer\nfrom kafka.errors import KafkaError\n\nlogger = logging.getLogger('k8s_events')\nlogger.setLevel(logging.DEBUG)\n\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\n\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n#config.load_kube_config()\nconfig.load_incluster_config()\n\nv1 = client.CoreV1Api()\nv1ext = client.ExtensionsV1beta1Api()\n\nproducer=KafkaProducer(bootstrap_servers='kafka.kubeless:9092',value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n\n@asyncio.coroutine\ndef pods():\n    w = watch.Watch()\n    for event in w.stream(v1.list_pod_for_all_namespaces):\n        logger.info(\"Event: %s %s %s\" % (event['type'], event['object'].kind, event['object'].metadata.name))\n        msg = {'type':event['type'],'object':event['raw_object']}\n        producer.send('k8s', msg)\n        producer.flush()\n        yield from asyncio.sleep(0.1) \n\n@asyncio.coroutine\ndef namespaces():\n    w = watch.Watch()\n    for event in w.stream(v1.list_namespace):\n        logger.info(\"Event: %s %s %s\" % (event['type'], event['object'].kind, event['object'].metadata.name))\n        msg = {'type':event['type'],'object':event['raw_object']}\n        producer.send('k8s', msg)\n        producer.flush()\n        yield from asyncio.sleep(0.1)\n        \n@asyncio.coroutine\ndef services():\n    w = watch.Watch()\n    for event in w.stream(v1.list_service_for_all_namespaces):\n        logger.info(\"Event: %s %s %s\" % (event['type'], event['object'].kind, event['object'].metadata.name))\n        producer=KafkaProducer(bootstrap_servers='kafka.kubeless:9092',value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n        msg = {'type':event['type'],'object':event['raw_object']}\n        producer.send('k8s', msg)\n        producer.flush()\n        yield from asyncio.sleep(0.1)\n\n@asyncio.coroutine        \ndef deployments():\n    w = watch.Watch()\n    for event in w.stream(v1ext.list_deployment_for_all_namespaces):\n        logger.info(\"Event: %s %s %s\" % (event['type'], event['object'].kind, event['object'].metadata.name))\n        msg = {'type':event['type'],'object':event['raw_object']}\n        producer.send('k8s', msg)\n        producer.flush()\n        yield from asyncio.sleep(0.1)\n\n@asyncio.coroutine    \ndef replicasets():\n    w = watch.Watch()\n    for event in w.stream(v1ext.list_replica_set_for_all_namespaces):\n        logger.info(\"Event: %s %s %s\" % (event['type'], event['object'].kind, event['object'].metadata.name))\n        msg = {'type':event['type'],'object':event['raw_object']}\n        producer.send('k8s', msg)\n        producer.flush()\n        yield from asyncio.sleep(0.1)\n\nioloop = asyncio.get_event_loop()\n\nioloop.create_task(pods())\nioloop.create_task(namespaces())\nioloop.create_task(services())\nioloop.create_task(deployments())\nioloop.create_task(replicasets())\n\ntry:\n    # Blocking call interrupted by loop.stop()\n    print('step: loop.run_forever()')\n    ioloop.run_forever()\nexcept KeyboardInterrupt:\n    pass\nfinally:\n    print('step: loop.close()')\n    ioloop.close()\n"
  },
  {
    "path": "docker/function-controller/Dockerfile",
    "content": "FROM bitnami/minideb:jessie\n\nRUN install_packages ca-certificates\n\nADD kubeless-function-controller /kubeless-function-controller\n\nENTRYPOINT [\"/kubeless-function-controller\"]\n"
  },
  {
    "path": "docker/function-image-builder/Dockerfile",
    "content": "FROM fedora:27\n\nRUN dnf install -y skopeo nodejs\n\nADD imbuilder /\nADD entrypoint.sh /\n\nENTRYPOINT [ \"/entrypoint.sh\" ]\n"
  },
  {
    "path": "docker/function-image-builder/entrypoint.sh",
    "content": "#!/bin/bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -e\n\n# Kubernetes ImagePullSecrets uses .dockerconfigjson as the file name\n# for storing credentials but skopeo requires it to be named config.json\nif [ -f $DOCKER_CONFIG_FOLDER/.dockerconfigjson ]; then\n    echo \"Creating $HOME/.docker/config.json\"\n    mkdir -p $HOME/.docker\n    ln -s $DOCKER_CONFIG_FOLDER/.dockerconfigjson $HOME/.docker/config.json\nfi\n\n\"${@}\"\n"
  },
  {
    "path": "docker/runtime/README.md",
    "content": "Kubeless Runtimes has been migrated to it's own repository. You can find them here: https://github.com/kubeless/runtimes/\n\nIf you are interested in creating a new runtime please follow the instructions here: https://kubeless.io/docs/implementing-new-runtime/\n"
  },
  {
    "path": "docker/unzip/Dockerfile",
    "content": "FROM bitnami/minideb\nRUN install_packages unzip curl ca-certificates tar gzip bzip2 xz-utils\n"
  },
  {
    "path": "docs/GKE-deployment.md",
    "content": "# Deploying Kubeless to Google Kubernetes Engine (GKE)\n\nThis guide goes over the required steps for deploying Kubeless in GKE. There are a few pain points that you need to know in order to successfully deploy Kubeless in a GKE environment. First your google cloud account should have enough privileges to create and manage clusters. You can login to your account using the `gcloud` CLI tool:\n\n```console\n$ gcloud auth login\nGo to the following link in your browser:\n\n    https://accounts.google.com/o/oauth2/auth?redirect_uri=...\n\nEnter verification code: ...\nYou are now logged in as [your@mail.com].\nYour current project is [your-project].  You can change this setting by running:\n  $ gcloud config set project PROJECT_ID\n```\n\nYou can also follow the initialization process executing `gcloud init`.\n\n## Creating a cluster\n\nOnce you are logged in, you can create the cluster:\n\n```console\ngcloud container clusters create \\\n  --cluster-version=1.8.10-gke.0 \\\n  my-cluster \\\n  --num-nodes 5\n```\n\nAt the moment of writing this document, the CI/CD system is testing Kubeless against GKE 1.8 so that's the one we are specifying as the desired version. You can check the current version tested in [the Travis file](../.travis.yml).\n\nThe default number of nodes is 3. That default number is enough for small deployments but it is recommended to use at least 5 or 7 nodes so you don't run out of resources after deploying a few functions.\n\nAfter a few minutes you should be able to see your cluster running:\n\n```console\n$ gcloud container clusters list\nNAME        ...          STATUS\nmy-cluster  ...          RUNNING\n```\n\n## Creating the admin clusterrolebinding\n\nFor deploying Kubeless in your cluster, your user should have enough permissions for creating cluster roles and cluster role bindings. For doing so you need to give your current GKE account admin privileges in the new cluster. This is not being done by default so you need to do it manually:\n\n```console\nkubectl create clusterrolebinding kubeless-cluster-admin --clusterrole=cluster-admin --user=<your-gke-user>\n```\n\nThe above command may fail with:\n\n```console\nError from server (Forbidden): User \"your-gke-user\" cannot create\nclusterrolebindings.rbac.authorization.k8s.io at the cluster scope\n```\n\nThis error is shown since your account doesn't have privileges to create `clusterrolebindings` (even if you are able to create clusters). If that is the case you can still perform the above operation using the default `admin` user. You can retrieve the admin password executing:\n\n```console\ngcloud container clusters describe my-cluster --zone <my-cluster-zone>\n```\n\nOnce you have the admin password you can retry the command above:\n\n```console\nkubectl --username=admin --password=<admin_password> \\\n  create clusterrolebinding kubeless-cluster-admin \\\n  --clusterrole=cluster-admin --user=<your-gke-user>\n```\n\n## Deploying Kubeless\n\nAfter that your are finally able to deploy Kubeless. Get the latest release from the [release page](https://github.com/kubeless/kubeless/releases) and deploy the RBAC version of the Kubeless manifest.\n\n## Kubeless on GKE 1.8.x with Alpha features\n\nOn GKE 1.8.x, when you have finished the above steps, there is still one step required to make the Kafka/Zookeeper PVC bounded if you enable alpha features when creating your cluster. Checking PVC you will see they are pending:\n\n```\nkubectl get pvc -n kubeless\nNAME              STATUS    VOLUME    CAPACITY   ACCESSMODES   STORAGECLASS   AGE\ndatadir-kafka-0   Pending                                                     2m\nzookeeper-zoo-0   Pending                                                     2m\n```\n\nBecause there are no correlative PV available, you have to create them. On GKE, you might want to go with [GKE Persistent Disk](https://kubernetes.io/docs/concepts/storage/volumes/#gcepersistentdisk). First, create two PD with this command:\n\n```console\ngcloud compute disks create --size=1GB --zone=<your_GKE_zone> kubeless-kafka\ngcloud compute disks create --size=1GB --zone=<your_GKE_zone> kubeless-zookeeper\n```\n\nThen create Kafka and Zookeeper PV:\n\n```console\nkubectl create -f docs/misc/kafka-pv.yaml\nkubectl create -f docs/misc/zookeeper-pv.yaml\n```\n\nOnce both PV are created, the PVC will be bounded shortly and you will see Kafka and Zookeeper running:\n\n```console\nkubectl get pod -n kubeless\nNAME                                   READY     STATUS    RESTARTS   AGE\nkafka-0                                1/1       Running   1          30m\nkubeless-controller-659755588f-bwch6   1/1       Running   0          30m\nzoo-0                                  1/1       Running   0          30m\n```\n"
  },
  {
    "path": "docs/README.md",
    "content": "# Kubeless Docs\n\nThis folder holds the documentation that is served in [https://kubeless.io/docs](https://kubeless.io/docs).\n\n> Note:This folder may contain changes that has not been released yet. To get the latest features available click in the link above.\n"
  },
  {
    "path": "docs/advanced-function-deployment.md",
    "content": "# Deploying Kubeless Functions using Kubernetes API\n\nApart from using the `kubeless` CLI, it is possible to deploy Kubeless Functions directly using the Kubernetes API and creating Function objects. A minimal Function might look like:\n\n```yaml\napiVersion: kubeless.io/v1beta1\nkind: Function\nmetadata:\n  name: get-python\n  namespace: default\n  label:\n    created-by: kubeless\n    function: get-python\nspec:\n  runtime: python2.7\n  timeout: \"180\"\n  handler: helloget.foo\n  deps: \"\"\n  checksum: sha256:d251999dcbfdeccec385606fd0aec385b214cfc74ede8b6c9e47af71728f6e9a\n  function-content-type: text\n  function: |\n    def foo(event, context):\n        return \"hello world\"\n```\n\nThe fields that a Function specification can contain are:\n\n - Runtime: Runtime ID and version that the function will use. It should match one of the availables in the [Kubeless configuration](/docs/function-controller-configuration).\n - Timeout: Maximum timeout for the given function. After that time, the function execution will be terminated.\n - Handler: Pair of `<file_name>.<function_name>`. When using `zip` or `compressedtar` in `function-content-type`, the `<file_name>` will be used to find the file with the function to expose. In other cases, it will be used just as a final file name. `<function_name>` is used to select the function to run from the exported functions of `<file_name>`. This field is mandatory and should match with an exported function.\n - Deps: Dependencies of the function. The format of this field will depend on the runtime, e.g. a `package.json` for NodeJS functions or a `Gemfile` for Ruby.\n - Checksum: SHA256 of the function content.\n - Function content type: Content type of the function. Current supported values are `base64`, `url` or `text`. If the content is zipped, the suffix `+zip` should be added. If the content is a gzip/bzip2/xz compressed tar file, the suffix `+compressedtar` should be added.\n - Function: Function content.\n\nApart from the basic parameters, it is possible to add the specification of a `Deployment`, a `Service` or an `Horizontal Pod Autoscaler` that Kubeless will use to generate them.\n\n## Pod Anti Affinity\n\nBy default, a kubless generated `Deployment` will include a soft pod anti-affinity rule that will signal to kubernetes that it should try to deploy pods to different nodes. This behaviour can be overridden using a deployment template.\n\n## Deploying large functions\n\nAs any Kubernetes object, function objects have a maximum size of 1.5MiB (due to the [maximum size](https://github.com/etcd-io/etcd/blob/master/Documentation/dev-guide/limit.md#request-size-limit) of an etcd entry). Because of that, it's not possible to specify in the `function` field of the YAML content that surpasses that size. To workaround this issue it's possible to specify an URL in the `function` field. This file will be downloaded at build time (extracted if necessary) and the checksum will be checked. Doing this we avoid any limitation regarding the file size. It's also possible to include the function dependencies in this file and skip the dependency installation step. Note that since the file will be downloaded in a pod the URL should be accessible from within the cluster:\n\n```yaml\n  checksum: sha256:d1f84e9f0a8ce27e7d9ce6f457126a8f92e957e5109312e7996373f658015547\n  function: https://github.com/kubeless/kubeless/blob/master/examples/nodejs/helloFunctions.zip?raw=true\n  function-content-type: url+zip\n```\n\n## Functions with bundled deps file\n\nSince the dependencies file(for python runtime: ``requirement.txt``) will become long and difficult to put into kubernetes object as function getting complex, Kubeless support use the deps file in remote zip file with function.\n\nUsage: \n\n- 1.Compress your function and dependencies file(in this case: ``requirement.txt``) into a zip file\n- 2.add ``+deps`` into ``function-content-type``\n\n```yaml\n  checksum: sha256:d1f84e9f0a8ce27e7d9ce6f457126a8f92e957e5109312e7996373f658015547\n  function: https://github.com/kubeless/kubeless/blob/master/examples/nodejs/hellowithbundleddeps.zip?raw=true\n  function-content-type: url+zip+deps\n```\n\n## Custom Deployment\n\nIt is possible to specify a [`Deployment` spec](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#creating-a-deployment) in the Function spec that will be merged with default values set by the Kubeless controller. It is not necessary to specify all the fields of the deployment, just the fields you are interested on overwriting. For example:\n\n```yaml\napiVersion: kubeless.io/v1beta1\nkind: Function\nmetadata:\n  name: get-python\n...\nspec:\n...\n  deployment:\n    spec:\n      template:\n        spec:\n          initContainers:\n          - resources:\n              limits:\n                cpu: 200m\n                memory: 200Mi\n              requests:\n                cpu: 200m\n                memory: 200Mi\n          containers:\n          - env:\n            - name: FOO\n              value: bar\n            name: \"\"\n            resources:\n              limits:\n                cpu: 100m\n                memory: 100Mi\n              requests:\n                cpu: 100m\n                memory: 100Mi\n            volumeMounts:\n            - mountPath: /my_secret\n              name: my-secret-vol\n          volumes:\n          - name: my-secret-vol\n            secret:\n              secretName: my-secret\n```\n\nWould create a function with the environment variable `FOO`, using CPU and memory limits and mounting the secret `my-secret` as a volume. Note that you can also specify a default template for a Deployment spec in the [controller configuration](/docs/function-controller-configuration).\nThe resource configuration in `initContainers` will be applied to all of the initial containers in the target deployment (like `provision`, `compile` etc.)\n\n\n## Custom Service\n\nAs with a deployment, it is possible to specify custom values for a [Service](https://kubernetes.io/docs/concepts/services-networking/service). This would be an example:\n\n```yaml\napiVersion: kubeless.io/v1beta1\nkind: Function\nmetadata:\n  name: get-python\n...\nspec:\n...\n  service:\n    clusterIP: None\n    ports:\n    - name: http-function-port\n      port: 9090\n      protocol: TCP\n      targetPort: 9090\n    selector:\n      created-by: kubeless\n      function: get-python\n    type: ClusterIP\n```\n\nThe example above will create a headless service running in the port 9090.\n\n## Horizontal Pod Autoscaler\n\nFor configuring the [autoscale feature](/docs/autoscaling) it is possible to attach an [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to a function:\n\n```yaml\napiVersion: kubeless.io/v1beta1\nkind: Function\nmetadata:\n  name: get-python\n...\nspec:\n...\n  horizontalPodAutoscaler:\n    apiVersion: autoscaling/v2beta1\n    kind: HorizontalPodAutoscaler\n    metadata:\n      name: get-python\n      namespace: default\n    spec:\n      maxReplicas: 3\n      metrics:\n      - resource:\n          name: cpu\n          targetAverageUtilization: 70\n        type: Resource\n      minReplicas: 1\n      scaleTargetRef:\n        apiVersion: apps/v1beta1\n        kind: Deployment\n        name: get-python\n```\n\nThe above specification will create a Horizontal Pod Autoscaler using CPU metrics.\n"
  },
  {
    "path": "docs/architecture.md",
    "content": "# Kubeless architecture\n\nThis doc covers the architectural design of Kubeless and directory structure of the repository.\n\n## Concepts\n\nKubeless is built around below core concepts:\n\n- Functions\n- Triggers\n- Runtime\n\n### Functions\n\nA _Function_ is representation of the code to be executed. Along with the code a _Function_ contains metadata about its runtime dependencies, build instructions etc. A _Function_ has a independent life-cycle. The following methods are supported:\n\n* Deploy - Deploy function as function instances. This step may involve building the function image or re-use pre-generated image and deploying it on the cluster.\n* Execute - Invoke a function directly i.e) not through any event source\n* Get - Return the function metadata and spec\n* Update - Modify the function specification and its metadata\n* Delete - Delete a function, and clean up any resource provisioned for the function from the cluster\n* List - Show the list of functions and their metadata\n* Logs - Return the logs generated by a function\n\n### Triggers\n\nA _Trigger_ represents an event source for the functions associated to it. When an event occurs in the event source, Kubeless will ensure that the associated functions are invoked **at most once**. A Trigger can be associated to a single function or to several ones depending on the event source type. They are decoupled from the life-cycle of functions and can be independently operated with the following methods:\n\n* Create - Create a new trigger with details on event source and associated functions\n* Update - Modify the trigger specification\n* Delete - Delete a trigger, and clean up any resource provisioned for the trigger\n* List - Show the list of trigger and their specification\n\n### Runtime\n\nA _Runtime_ represents language and runtime specific environment in which function will be executed. Please see [runtimes](/docs/runtimes) for more details.\n\n## Design\n\nKubeless leverages multiple concepts of Kubernetes in order to support functions deployed on top of it. In details, we have been using:\n\n- A Custom Resource Definitions (CRD) is used to represent function\n- Each event source is modeled as a separate Trigger CRD object\n- Separate Custom Resource Definitions controller to handle CRUD operations corresponding to CRD object\n- Deployment / Pod to run the corresponding runtime.\n- Configmap to inject function's code into the runtime pod.\n- Init-container to load the dependencies that function might have.\n- Service to expose function.\n- Ingress resources to expose functions externally\n\nUse of Kubernetes CRD's and CRD controllers forms the core design tenet of Kubeless. Use of separate CRD's for functions and triggers provides clear separation of concerns. Use of separate CRD controllers keeps the code decoupled and modular.\n\n### Functions\n\nWhen you install kubeless, there is a CRD `functions.kubeless.io` created to represent _Function_:\n\n```yaml\n$ kubectl get customresourcedefinition functions.kubeless.io -o yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  ...\n  name: functions.kubeless.io\n  ...\n  selfLink: /apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/functions.kubeless.io\nspec:\n  group: kubeless.io\n  names:\n    kind: Function\n    listKind: FunctionList\n    plural: functions\n    singular: function\n  scope: Namespaced\n  version: v1beta1\n```\n\nThen function custom objects will be created under this CRD endpoint. A function object looks like this:\n\n```yaml\n$ kubectl get function get-python -o yaml\napiVersion: kubeless.io/v1beta1\nkind: Function\nmetadata:\n  clusterName: \"\"\n  creationTimestamp: 2018-03-25T19:13:07Z\n  finalizers:\n  - kubeless.io/function\n  generation: 0\n  labels:\n    created-by: kubeless\n    function: get-python\n  name: get-python\n  namespace: default\n  resourceVersion: \"9219\"\n  selfLink: /apis/kubeless.io/v1beta1/namespaces/default/functions/get-python\n  uid: 8d25a793-3060-11e8-ad89-08002730c417\nspec:\n  checksum: sha256:d251999dcbfdeccec385606fd0aec385b214cfc74ede8b6c9e47af71728f6e9a\n  deployment:\n    metadata:\n      creationTimestamp: null\n    spec:\n      strategy: {}\n      template:\n        metadata:\n          creationTimestamp: null\n        spec:\n          containers:\n          - name: \"\"\n            resources: {}\n    status: {}\n  deps: \"\"\n  function: |\n    def foo(event, context):\n        return \"hello world\"\n  function-content-type: text\n  handler: helloget.foo\n  horizontalPodAutoscaler:\n    metadata:\n      creationTimestamp: null\n    spec:\n      maxReplicas: 0\n      scaleTargetRef:\n        kind: \"\"\n        name: \"\"\n    status:\n      conditions: null\n      currentMetrics: null\n      currentReplicas: 0\n      desiredReplicas: 0\n  runtime: python2.7\n  service:\n    ports:\n    - name: http-function-port\n      port: 8080\n      protocol: TCP\n      targetPort: 8080\n    selector:\n      created-by: kubeless\n      function: get-python\n    type: ClusterIP\n  timeout: \"180\"\n```\n\n`function.spec` contains function's details like code, handler, runtime and probably its dependency file etc.\n\nKubeless ships with a CRD controller named `function-controller` which continuously watches changes to function objects and reacts accordingly. By default function-controller is installed in `kubeless-controller-manager` deployment which is deployed into `kubeless` namespace. Function-controller watches for create events corresponding to creation of _Function_ object. Function-controller creates a deployment for the function, and exposes the function as a clusterIP service. Both deployment and service resources created for the function can be controlled by the function creator by explicitly specifying deployment spec and service spec respectively in the `function.spec`.\n\nRuntime image used for the function deployment could be chosen by one of the below options:\n\n* User explicitly specifies custom runtime image to be used for the function\n* Image artifact is generated on the fly by the function builder\n* A pre-built image is used for each language and version combination. A configmap is used to inject the function code from function.spec.function into the corresponding k8s runtime pod.\n\nFunction-controller on receiving Function CRD object deletion event, cleans up all the resources (deployment, service, configmap etc) provisioned.\n\n### HTTP triggers\n\nWhen you install kubeless, there is a CRD `httptriggers.kubeless.io` created to represent HTTP triggers:\n\n```console\n$  kubectl get customresourcedefinition httptriggers.kubeless.io -o yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: httptriggers.kubeless.io\n  resourceVersion: \"102\"\n  selfLink: /apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/httptriggers.kubeless.io\n  uid: 0aa4a346-2ff4-11e8-ad89-08002730c417\nspec:\n  group: kubeless.io\n  names:\n    kind: HTTPTrigger\n    listKind: HTTPTriggerList\n    plural: httptriggers\n    singular: httptrigger\n  scope: Namespaced\n  version: v1beta1\n```\n\nHTTP trigger custom objects will be created under `httptriggers.kubeless.io` CRD endpoint. An example HTTP trigger object looks like this:\n\n```console\n$ kubectl get httptrigger get-python -o yaml\napiVersion: kubeless.io/v1beta1\nkind: HTTPTrigger\nmetadata:\n  labels:\n    created-by: kubeless\n  name: get-python\n  namespace: default\nspec:\n  function-name: get-python\n  host-name: get-python.192.168.99.100.nip.io\n  ingress-enabled: true\n  path: func\n  tls: false\n```\nHTTP trigger object spec contains below fields:\n\n* function-name - name of the associated function that needs to be invoked when URL corresponding to http trigger is accessed\n* host-name - name used for virtual hosting\n* path - route requests with this path to function service\n* tls - true if TLS is to be enabled\n\n`kubeless-controller-manager` ships with http trigger CRD controller which watches for the HTTP trigger CRD objects and configures Kubernetes ingress accordingly.\n\n### Cronjob triggers\n\n```console\n$ kubectl get customresourcedefinition cronjobtriggers.kubeless.io -o yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: cronjobtriggers.kubeless.io\n  selfLink: /apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/cronjobtriggers.kubeless.io\nspec:\n  group: kubeless.io\n  names:\n    kind: CronJobTrigger\n    listKind: CronJobTriggerList\n    plural: cronjobtriggers\n    singular: cronjobtrigger\n  scope: Namespaced\n  version: v1beta1\n```\n\nCronjob trigger custom objects will be created under `cronjobtriggers.kubeless.io` CRD endpoint. An example Cronjob trigger object looks like this:\n\n```console\n$ kubectl get cronjobtrigger scheduled-get-python -o yaml\napiVersion: kubeless.io/v1beta1\nkind: CronJobTrigger\nmetadata:\n  labels:\n    created-by: kubeless\n    function: scheduled-get-python\n  name: scheduled-get-python\n  namespace: default\nspec:\n  function-name: scheduled-get-python\n  schedule: '* * * * *'\n```\n\nCronjob trigger object spec contains below fields:\n\n* function-name - name of the associated function that needs to be invoked periodically as per specified\n* schedule - it takes a Cron format string, e.g. 0 * * * * or @hourly, as schedule time of its jobs to be created and executed.\n\n`kubeless-controller-manager` ships with Cronjob trigger CRD controller which watches for the Cronjob trigger CRD objects and configures Kubernetes cronjobs to run the functions at scheduled intrerval time.\n\n### Kafka triggers\n\n```console\n kubectl get customresourcedefinition kafkatriggers.kubeless.io -o yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: kafkatriggers.kubeless.io\n  selfLink: /apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/kafkatriggers.kubeless.io\n  uid: 0aa3988f-2ff4-11e8-ad89-08002730c417\nspec:\n  group: kubeless.io\n  names:\n    kind: KafkaTrigger\n    listKind: KafkaTriggerList\n    plural: kafkatriggers\n    singular: kafkatrigger\n  scope: Namespaced\n  version: v1beta1\n```\n\nKafka trigger custom objects will be created under `kafkatriggers.kubeless.io` CRD endpoint. An example Kafka trigger object looks like this:\n\n```console\n $ kubectl get kafkatrigger s3-python-kafka-trigger -o yaml\napiVersion: kubeless.io/v1beta1\nkind: KafkaTrigger\nmetadata:\n  labels:\n    created-by: kubeless\n  name: s3-python-kafka-trigger\n  namespace: default\nspec:\n  functionSelector:\n    matchLabels:\n      created-by: kubeless\n      topic: s3-python\n  topic: s3-python\n```\n\nKafka trigger object spec contains below fields:\n\n* functionSelector - label selector that selects list of matching functions\n* topic - Kafka topic messages to which the functions associated must be invoked.\n\n## Kubeless command-line client\n\nTogether with `kubeless-controller-manager`, we provide `kubeless` cli which enables users to interact with Kubeless system. At this moment, Kubeless cli provides these below actions:\n\n```console\n$ kubeless --help\nServerless framework for Kubernetes\n\nUsage:\n  kubeless [command]\n\nAvailable Commands:\n  autoscale         Manage autoscale to function on Kubeless\n  completion        Output shell completion code for the specified shell.\n  function          Function specific operations\n  get-server-config Print the current configuration of the controller\n  help              Help about any command\n  topic             Manage message topics in Kubeless\n  trigger           Trigger specific operations\n  version           Print the version of Kubeless\n\nFlags:\n  -h, --help   help for kubeless\n\nUse \"kubeless [command] --help\" for more information about a command.\n```\n\n## Implementation\n\nKubeless controller is written in Go programming language, and uses the Kubernetes client-go to interact with the Kubernetes apiserver.\n\nKubeless CLI is written in Go as well, using the popular cli library `github.com/spf13/cobra`. Basically it is a bundle of HTTP requests and kubectl commands. We send http requests to the Kubernetes apiserver in order to 'crud' CRD objects. Checkout [the cmd folder](https://github.com/kubeless/kubeless/tree/master/cmd/kubeless) for more details.\n\n## Directory structure\n\nIn order to help you getting a better feeling before you start diving into the project, we would give you the 10,000 feet view of the source code directory structure.\n\n- chart: chart to deploy Kubeless with Helm.\n- cmd: contains kubeless cli implementation and kubeless-controller.\n- docker: contains artifacts for building the kubeless-controller and runtime images.\n- docs: contains documentations.\n- examples: contains some samples of running function with kubeless.\n- manifests: collection of manifests for additional features.\n- pkg: contains shared packages.\n- script: contains build scripts.\n- vendor: contains dependencies packages.\n"
  },
  {
    "path": "docs/autoscaling.md",
    "content": "# Autoscaling function deployment in Kubeless\n\nThis document gives you an overview of how we do autoscaling for functions in Kubeless and also give you a walkthrough how to configure it for custom metric.\n\n## Overview\n\nKubernetes introduces [HorizontalPodAutoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) for pod autoscaling. In kubeless, each function is deployed into a separate Kubernetes deployment, so naturally we leverage HPA to automatically scale function based on defined workload metrics.\n\nIf you're on Kubeless CLI, this below command gives you an idea how to setup autoscaling for deployed function:\n\n```console\n$ kubeless autoscale --help\nautoscale command allows user to list, create, delete autoscale rule\nfor function on Kubeless\n\nUsage:\n  kubeless autoscale SUBCOMMAND [flags]\n  kubeless autoscale [command]\n\nAvailable Commands:\n  create      automatically scale function based on monitored metrics\n  delete      delete an autoscale from Kubeless\n  list        list all autoscales in Kubeless\n\nFlags:\n  -h, --help   help for autoscale\n\nUse \"kubeless autoscale [command] --help\" for more information about a command.\n```\n\nOnce you create an autoscaling rule for a specific function (with `kubeless autoscale create`), the corresponding HPA object will be added to the system which is going to monitor your function and auto-scale its pods based on the autoscaling rule you define in the command. The default metric is CPU, but you have option to do autoscaling with custom metrics. At this moment, Kubeless supports `qps` which stands for number of incoming requests to function per second.\n\n```console\n$ kubeless autoscale create --help\nautomatically scale function based on monitored metrics\n\nUsage:\n  kubeless autoscale create <name> FLAG [flags]\n\nFlags:\n  -h, --help               help for create\n      --max int32          maximum number of replicas (default 1)\n      --metric string      metric to use for calculating the autoscale. Supported\n      metrics: cpu, qps (default \"cpu\")\n      --min int32          minimum number of replicas (default 1)\n  -n, --namespace string   Specify namespace for the autoscale\n      --value string       value of the average of the metric across all replicas.\n      If metric is cpu, value is a number represented as percentage. If metric\n      is qps, value must be in format of Quantity\n```\n\nThe below part will walk you though setup need to be done in order to make function auto-scaled based on `qps` metric.\n\n## Autoscaling based on CPU usage\n\nTo autoscale based on CPU usage, it is *required* that your function has been deployed with CPU request limits.\n\nTo do this, use the `--cpu` parameter when deploying your function. Please see the [Meaning of CPU](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#meaning-of-cpu) for the format of the value that should be passed. \n\n### Further reading\n\n[Custom Metrics API](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/custom-metrics-api.md)\n\n[Support for custom metrics](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics)\n"
  },
  {
    "path": "docs/building-functions.md",
    "content": "# Build process for functions\n\n> **Warning**: This feature is still under heavy development\n\nKubeless includes a way of building and storing functions as docker images. This can be used to:\n\n - Persist function: Functions now become docker images that can be safely stored in a docker registry.\n - Speed up the process of redeploying the same function. This is specifically useful for scaling up your function.\n - Generate immutable function deployments. Once a function image is generated, the same image will be used every time the function is used.\n\n### [Optional] Start a Docker registry\n\nIt is possible to use the Docker Hub to store your functions but if you want your functions to be private it is necessary to deploy a different Docker registry. In case you want to use the Docker Hub or if you already have a private Docker Registry jump to the [setup section](#setup-the-build-process). In other case, if you are working with Minikube in a testing environment, you can still deploy a registry as a container in the Minikube VM. For doing that, the first step is to start Minikube setting an insecure registry IP range:\n\n```console\nminikube start --insecure-registry 192.168.99.100:5000\n```\n\nNote that `192.168.99.100` is the IP that the Minikube VM is going to use in the host machine. You will need to use a different one if the IP of your VM is different. You can retrieve the IP executing `minikube ip`. You can also specify a range: e.g. `0.0.0.0/0` would allow an insecure registry in any IP.\n\nIf you already have a running Minikube VM, the previous command would not work since the insecure registry property [is set in the first boot](https://github.com/kubernetes/minikube/issues/604#issuecomment-309296149). If that is your case, stop your minikube instance, edit the file `$HOME/.minikube/machines/minikube/config.json` and change the property `HostOptions > EngineOptions > InsecureRegistry` to specify your IP. Then start your instance again.\n\nOnce minikube has started you can start the registry container:\n\n```console\neval $(minikube docker-env)\ndocker run -d -p 5000:5000 --restart=always --name registry -v /data/docker-registry:/var/lib/registry registry:2\n```\n\nThat will start the Docker registry using `/data/docker-registry` as the data folder for your images. This directory will be persisted after stopping the Minikube instance [as documented in the Minikube repository](https://github.com/kubernetes/minikube/blob/master/docs/persistent_volumes.md#persistent-volumes).\n\n## Setup the build process\n\nIn order to setup the build process the steps needed are:\n\n - Generate a Kubernetes [secret](https://kubernetes.io/docs/concepts/configuration/secret) with the credentials required to push images to the docker registry and enable the build st. In order to do so, `kubectl` has an utility that allows you to create this secret in just one command:\n\n| **Note**: The command below will generate the correct secret only if the version of `kubectl` is 1.9+ \n\n```console\nkubectl create secret docker-registry kubeless-registry-credentials \\\n  --docker-server=https://index.docker.io/v1/ \\\n  --docker-username=user \\\n  --docker-password=password \\\n  --docker-email=user@example.com\n```\n\n> Note: In case you have followed the [previous guide](#start-a-docker-registry) to deploy an insecure registry you need to specify as docker-server `http://$(minikube ip):5000/v2` and any value as username, password and email.\n\nIf the secret has been generated correctly you should see the following output:\n\n```console\n$ kubectl get secret kubeless-registry-credentials --output=\"jsonpath={.data.\\.dockerconfigjson}\" | base64 -d\n\n{\"auths\":{\"https://index.docker.io/v1/\":{\"username\":\"user\",\"password\":\"password\",\"email\":\"user@example.com\",\"auth\":\"dGVfdDpwYZNz\"}}}\n```\n\n - Enable the build step in the Kubeless configuration. If you have already deploy Kubeless you can enable it editing the configmap. You will need to set the property `enable-build-step: \"false\"` to `\"true\"`. If you are using an insecure registry you will need to set the property `function-registry-tls-verify: \"false\"` as well.\n\n ```console\n kubectl edit configmaps -n kubeless kubeless-config\n ```\n\n - Once the build step is enabled you need to restart the controller in order for the changes to take effect:\n\n ```console\n kubectl delete pod -n kubeless -l kubeless=controller\n ```\n\nOnce the secret is available and the build step is enabled Kubeless will automatically start building function images.\n\n## Build process\n\nThe following diagram represents the building process:\n\n![Build Process](./img/build-process.png)\n\nWhen a new function is created the Kubeless Controller generates two items:\n \n - A [Kubernetes job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) that will use the registry credentials to push a new image under the `user` repository. It will use the checksum (SHA256) of the function specification as tag so any change in the function will generate a different image.\n - A Pod to run the function. This pod will wait until the previous job finishes in order to pull the function image.\n\n## Known limitations\n\n - It is only possible to use a single registry to pull images and push them so if the build system is used with a registry different than https://index.docker.io/v1/ (the official one) the images present in the Kubeless ConfigMap should be copied to the new registry.\n - Base images are not currently cached, that means that every time a new build is triggered it will download the base image.\n \n"
  },
  {
    "path": "docs/cronjob-triggers.md",
    "content": "# Scheduling the trigger of a function \n\nKubeless has its own CronJobTrigger which uses [Kubernetes CronJob](https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/) to trigger your function in a given schedule. On this page, we're going to cover how to use it, and some basic features.\n\n## Creating a new CronJobTrigger\n\nYou can create a new cron trigger using `kubeless-cli`. In this section, we're going to show you how to create a simple function that logs `Hello world!` every 1 minute. \n\nFor this example you're going to need the following tools:\n\n* [Minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/)\n* [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/)\n* [Kubeless CLI](/docs/quick-start/)\n\nAfter installing all the requirements, you can proceed to the step-by-step guide:\n\n### Step 1: Create a new Minikube cluster\n\nIn this step, you're going to create a new Minikube cluster called `kubeless`, where you're going to deploy the function triggers. You can run the following command on your shell:\n\n```shell\nminikube start -p kubeless\n```\n\n**IMPORTANT:** If you have already created any Minikube cluster called `kubeless` you should delete it first, with `minikube delete -p kubeless`\n\n### Step 2: Install Kubeless on your cluster\n\nNow that you have a Minikube cluster running, you can run the following command to install the latest version of Kubeless:\n\n```shell\nRELEASE=$(curl -s https://api.github.com/repos/kubeless/kubeless/releases/latest | grep tag_name | cut -d '\"' -f 4) && \\\n  kubectl create ns kubeless && \\\n  kubectl create -f https://github.com/kubeless/kubeless/releases/download/$RELEASE/kubeless-$RELEASE.yaml\n```\n\n### Step 3: Deploy a test function\n\nFor this CronJob test, we're going to use a simple function that just logs a \"Hello world!\" message. Since this isn't a tutorial explaining how to deploy a function you can just run the following command:\n\n```shell\n kubectl apply -f https://gist.githubusercontent.com/delucca/1f3a71b7ff05f31d492dc5bfd3f3afba/raw/5237991f018f99a697e937a85e60e57dd8ac1a1c/function.yaml\n```\n\n### Step 4: Create a new CronJob trigger\n\nTo create a CronJob trigger with `kubeless-cli` you can run the following command:\n\n```shell\nkubeless trigger cronjob create \\\n  cron-test-hello-world \\\n  --function cron-test-hello-world \\\n  --schedule \"*/1 * * * *\"\n```\n\nAbout the provided arguments:\n\n* **The first argument** must be the trigger name you want to use\n* **--function** should be the name of the function you want to trigger with that cron\n* **--schedule** the cron pattern to trigger your function\n\n### Step 5: Take a look on your function logs\n\nNow, wait 1 or 2 minutes and take a look at your function logs with this command:\n\n```shell\nkubeless function logs cron-test-hello-world\n```\n\nYou should see some `Hello world!` logs, showing that our CronJob is working as expected.\n\n## Advanced concepts\n\nIn this section, we're going to cover some advanced concepts regarding the CronJob trigger. Each item in this section will cover a given feature that you can use on your triggers.\n\n### Passing payload data to the function\n\nWhile triggering a function you could pass also a payload data to it. Those will be available on `event.data` (like any other request data). You can do so with the following command:\n\n```shell\nkubeless trigger cronjob (create or update) --payload <stringified JSON>\n```\n\nIf you're not willing to provide a stringified JSON to the `--payload` argument, you can use `--payload-from-file` instead and pass a file path. You can provide files on the following extensions:\n\n* `.json`\n* `.yaml`\n\n**IMPORTANT:** Your payload must be an object, so you cannot provide a JSON array to it, but you can add a key on your object that can contain a list of items instead.\n"
  },
  {
    "path": "docs/debug-functions.md",
    "content": "# Debug Kubeless Functions\n\nIn this document we will show how you can debug your function in order to spot possible errors. There could be several reasons that causes a wrong deployment. For learning how to successfully debug a function it is important to know what is the process of deploying a Kubeless function. In this guide we are going to assume that you are using the `kubeless` CLI tool to deploy your functions. If that is the case, this is the process to run a function:\n\n 1. The `kubeless` CLI read the parameters you give to it and produces a [Function](/docs/advanced-function-deployment) object that submits to the Kubernetes API server.\n 2. The Kubeless Function Controller detects that a new `Function` has been created and reads its content. From the function content it generates: a `ConfigMap` with the function code and its dependencies, a `Service` to make the function reachable through HTTP and a `Deployment` with the base image and all the required steps to install and run your functions. It is important to know this order because if the controller fails to deploy the `ConfigMap` or the `Service` it will never create the `Deployment`. A failure in any step will abort the process.\n 3. Once the `Deployment` has been created a `Pod` should be generated with your function. When a Pod starts it dinamically reads the content of your function (in case of interpreted languages).\n\nAfter all the above you are ready to call your function. Let's see some common mistakes and how to fix them.\n\n## \"kubeless function deploy\" fails\n\nThe first failure that can appear is an error in the parameters that we give to the `kubeless function deploy` command. Hopefully this errors are pretty easy to debug:\n\n```console\n$ kubeless function deploy --runtime node8 \\\n  --from-file hello.js \\\n  --handler todos.create \\\n  --dependencies package.json \\\n  hello\nFATA[0000] Invalid runtime: node8. Supported runtimes are: python2.7, python3.4, python3.6, nodejs6, nodejs8, ruby2.4, php7.2, go1.10\n```\n\nIn the above we can see that we have a typo in the runtime. It should be `nodejs8` instead of `node8`.\n\n## \"kubeless function ls\" returns \"MISSING: Check controller logs\"\n\nThere will be cases in which the validations done in the CLI won't be enough to spot a problem in the given parameters. If that is the case the function `Deployment` will never appear. To debug this kind of issues it is necessary to check what is the error in the controller logs. To retrieve these logs execute:\n\n```\n$ kubeless function deploy foo --from-file hellowithdata.py --handler hello,foo --runtime python3.6\nINFO[0000] Deploying function...\nINFO[0000] Function foo submitted for deployment\nINFO[0000] Check the deployment status executing 'kubeless function ls foo'\n$ kubeless function ls\nNAME \tNAMESPACE\tHANDLER  \tRUNTIME  \tDEPENDENCIES\tSTATUS\nfoo  \tdefault  \thello,foo\tpython3.6\t            \tMISSING: Check controller logs\n$ kubectl logs -n kubeless -l kubeless=controller -c kubeless-function-controller\ntime=\"2020-10-01T01:48:29Z\" level=info msg=\"Processing change to Function default/foo\" pkg=function-controller\ntime=\"2020-10-01T01:48:29Z\" level=error msg=\"Function can not be created/updated: failed: incorrect handler format. It should be module_name.handler_name\" pkg=function-controller\n```\n\nFrom the logs we can see that there is a problem with the handler: we specified `hello,foo` while the correct value is `hello.foo`.\n\n## Function pod is crashing\n\nThe most common error is finding that the `Deployment` is generated successfully but the function remains with the status `0/1 Not ready`. This is usually caused by a syntax error in our function or in the dependencies we specify.\n\nIf our function doesn't start we should check the status of the pods executing:\n\n```\n$ kubectl get pods -l function=foo\n```\n\n### Function pod crashes with Init:CrashLoopBackOff\n\nIf our function fails with an `Init` error that could mean that:\n\n - It fails to retrieve the function content.\n - It fails to install dependencies.\n - It fails to compile our function (in compiled languages).\n\nFor any of the above we should first identify which container is failing (since each step is performed in a different container):\n\n```console\n$ kubectl get pods -l function=foo\nNAME                   READY     STATUS                  RESTARTS   AGE\nfoo-74978bbf45-9xb4p   0/1       Init:CrashLoopBackOff   1         6m\n$ kubectl get pods -l function=foo -o yaml\n...\n      name: install\n      ready: false\n      restartCount: 2\n...\n```\n\nFrom the above we can see that is the container `install` is the one with the problem. Depending on the runtime the logs of the container will be shown as well so we can directly spot the issue. Unfortunately that is not the case so let's retrieve manually the logs of the `install` container:\n\n```console\n$ kubectl logs foo-74978bbf45-9xb4p -c install --previous\n...\nCollecting twiter (from -r /kubeless/requirements.txt (line 1))\n  Retrying (Retry(total=4, connect=None, read=None, redirect=None, status=None)) after connection broken by 'NewConnectionError('<pip._vendor.urllib3.connection.VerifiedHTTPSConnection object at 0x7f10eb4d7400>: Failed to establish a new connection: [Errno -3] Temporary failure in name resolution',)': /simple/twiter/\n```\n\nNow we can spot that the problem is a typo in our requirements: `twiter` should be `twitter`.\n\n### Function pod crashes with CrashLoopBackOff\n\nIn the case the Pod remains in that state we should retrieve the logs of the runtime container:\n\n```console\n$ kubectl get pods -l function=bar\nNAME                   READY     STATUS             RESTARTS   AGE\nbar-7d458f6d7c-2gsh7   0/1       CrashLoopBackOff   7          15m\n$ kubectl logs -l function=bar\nkubectl logs -l function=bar\nTraceback (most recent call last):\n...\n  File \"/kubeless/hello.py\", line 2\n    return Hello world\n                     ^\nSyntaxError: invalid syntax\n```\n\nWe can see that we have a syntax error: `return Hello world` should be modified with `return \"Hello world\"`.\n\n### Function returns an \"Internal Server Error\"\n\nThere will be cases in which the pod doesn't crash but the function returns an error:\n\n```console\n$ kubectl get pods -l function=test\nNAME                    READY     STATUS    RESTARTS   AGE\ntest-6845ff45cb-6q865   1/1       Running   0          1m\n$ kubeless function call test --data '{\"username\": \"test\"}'\nERRO[0000]\nFATA[0000] an error on the server (\"Internal Server Error\") has prevented the request from succeeding\n```\n\nThis usually means that the function is syntactically correct but it has a bug. Again for spotting the issue we should check the function logs:\n\n```console\n$ kubectl logs -l function=test\n...\n[27/Apr/2018:15:45:33 +0000] \"GET /healthz HTTP/1.1\" 200 2 \"-\" \"kube-probe/.\"\nFunction failed to execute: TypeError: Cannot read property 'name' of undefined\n    at handler (/kubeless/hello.js:3:39)\n    ...\n```\n\nWe can see that it is raising an error in the line 3 of our function:\n\n```js\nmodule.exports = {\n  handler: (event, context) => {\n    return \"Hello \" + event.data.user.name;\n  },\n};\n```\n\nWe are trying to access the property `name` of the property `user` while we are giving the function `username` instead.\n\n## Conclusion\n\nThese are just some tips to quickly identify what's gone wrong with a function. If after checking the controller and function logs (or any other information that Kubernetes may provide) you are not able to spot the error you can open an [Issue in our GitHub repository](https://github.com/kubeless/kubeless/issues) or contact us through [slack](http://slack.k8s.io) in the #kubeless channel.\n"
  },
  {
    "path": "docs/debugging.md",
    "content": "# Debugging Kubeless\n\nAs a developer you'll probably be interested on the investigation of Kubeless code. A possible result of this investigation process is the proposition of a new feature or any additional contribution that could make any sense.\n\nIn this context, debugging tools raises as a fundamental part of this mentioned understanding process. This document will describe the process that developers must execute in order to be able to debug Kubeless code.\n\n## 1. Delve\n\nDelve is the component that allows you to debug Go code. This way, the first thing you need to do is install the solution in your computer.\n\nYou can find the procedure to install and configure Delve in you PC (Linux, Mac and Windows) following [this link](https://github.com/derekparker/delve/tree/master/Documentation/installation).\n\n### Important Note\n\nSome versions of Mac OS has been facing some troubles related to injection of auto-generated digital certificate required by Delve installation via Homebrew process. The error seems like that one presented below.\n\n```console\n==> Tapping go-delve/delve\nCloning into '/usr/local/Homebrew/Library/Taps/go-delve/homebrew-delve'...\nremote: Counting objects: 7, done.\nremote: Compressing objects: 100% (6/6), done.\nremote: Total 7 (delta 0), reused 5 (delta 0), pack-reused 0\nUnpacking objects: 100% (7/7), done.\nTapped 1 formula (33 files, 41.4KB)\n==> Installing delve from go-delve/delve\n==> Using the sandbox\n==> Downloading https://github.com/derekparker/delve/archive/v1.0.0-rc.1.tar.gz\n==> Downloading from https://codeload.github.com/derekparker/delve/tar.gz/v1.0.0-rc.1\n######################################################################## 100.0%\nsecurity: SecKeychainSearchCopyNext: The specified item could not be found in the keychain.\n==> Generating dlv-cert\n==> openssl req -new -newkey rsa:2048 -x509 -days 3650 -nodes -config dlv-cert.cfg\n-extensions codesign_reqext -batch -out dlv-cert.cer -keyout dlv-cert.key\n==> [SUDO] Installing dlv-cert as root\n==> sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain\ndlv-cert.cer\nLast 15 lines from /Users/gta/Library/Logs/Homebrew/delve/02.sudo:\n2017-08-02 17:06:05 +0200\n\nsudo\nsecurity\nadd-trusted-cert\n-d\n-r\ntrustRoot\n-k\n/Library/Keychains/System.keychain\ndlv-cert.cer\n```\n\nThis error commonly occurs because the installer wasn't able (for some reason) to auto-generate the required certificate for Delve installer.\n\nYou can manually fix the error installing the certificate by yourself. To do that please follow the steps described below.\n\n**Unzip the delve-1.0.0-rc.1 file**\n\n```console\n$ tar  /Users/{you_user}/Library/Caches/Homebrew/delve-1.0.0-rc.1\n```\n\n**Navigate to Delve/Scripts directory**\n\n```console\n$ cd /Users/{your_user}/Library/Caches/Homebrew/delve-1.0.0-rc.1/scripts\n```\n\n**Execute gencert and provide your admin password**\n\n```console\n$ ./gencert.sh\n```\n\nDone. Now you can try to install Delve again via Homebrew. You'll see that the operation will be completed successfully.\n\n## 2. Configure Visual Studio Code\n\nIn order to demonstrate the debug process I'll use Visual Studio Code. Visual Studio Code is a lightweight but powerful source code editor which runs on your desktop and is available for Windows, macOS and Linux. It comes with built-in support for JavaScript, TypeScript and Node.js and has a rich ecosystem of extensions for other languages (such as C++, C#, Python, PHP, Go) and runtimes (such as .NET and Unity).\n\nTo know more about VS Code, follow [this link](https://code.visualstudio.com/docs).\n\nMicrosoft already did a great job describing the process to configure Delve on top of VS Code. In order to accomplish that, please, follow [this link](https://github.com/Microsoft/vscode-go/wiki/Debugging-Go-code-using-VS-Code).\n\n## 3. Debugging Kubeless\n\nIf you was successful VS Code debug setup task, you now have a new directory with one file called \"launch.json\" inside. This file must contain the follow content inside.\n\n```json\n{\n\t\"version\": \"0.2.0\",\n\t\"configurations\": [\n\t\t{\n\t\t\t\"name\": \"Launch\",\n\t\t\t\"type\": \"go\",\n\t\t\t\"request\": \"launch\",\n\t\t\t\"mode\": \"debug\",\n\t\t\t\"remotePath\": \"\",\n\t\t\t\"port\": 2345,\n\t\t\t\"host\": \"127.0.0.1\",\n\t\t\t\"program\": \"${workspaceRoot}\",\n\t\t\t\"env\": {},\n\t\t\t\"args\": [],\n\t\t\t\"showLog\": true\n\t\t}\n\t]\n}\n```\n\nIn order to debug a Go code, Delve looks for a \"main\" method, once that is the method that starts the entire execution flow. This way, could be a good practice replace the value of \"program\" property (currently \"`${workspaceRoot}`\") by the static path to the \"main\" file. In this case, the \"program\" property could be similar to this:\n\n```json\n \"program\": \"$/Users/{your_user}/Documents/Projects/.../kubeless/cmd/kubeless/\"\n```\n\nDone. Now Kubeless code is done to be debugged.\n"
  },
  {
    "path": "docs/dev-guide.md",
    "content": "# Kubeless developer guide\n\nThis will cover the steps need to be done in order to build your local development environment for Kubeless.\n\n## Setting things up\n\nAs Kubeless project is mainly developed in the Go Programming Language, the first thing you should do is guarantee that Go is installed and all environment variables are properly set.\n\nIn this example we will use Ubuntu Linux 16.04.2 LTS as the target host on where the project will be built.\n\n### Installing Go\n\n* Visit [https://golang.org/dl/](https://golang.org/dl/)\n* Download the most recent Go version (here we used 1.9) and unpack the file\n* Check the installation process on [https://golang.org/doc/install](https://golang.org/doc/install)\n* Set the Go environment variables\n\n```bash\nexport GOROOT=/GoDir/go\nexport GOPATH=/GoDir/go/bin\nexport PATH=$GOPATH:$PATH\n```\n\n### Create a working directory for the project\n\n```bash\nexport KUBELESS_WORKING_DIR=$GOROOT/src/github.com/kubeless/\nmkdir -p $KUBELESS_WORKING_DIR\n```\n\n### Fork the repository\n\n1. Visit the repo: [https://github.com/kubeless/kubeless](https://github.com/kubeless/kubeless)\n1. Click `Fork` button (top right) to establish a cloud-based fork.\n\n### Clone from your fork\n\n```bash\ncd $KUBELESS_WORKING_DIR\ngit clone https://github.com/<YOUR FORK>\ncd $KUBELESS_WORKING_DIR/kubeless\ngit remote add upstream https://github.com/kubeless/kubeless.git\n\n# Never push to upstream master\ngit remote set-url --push upstream no_push\n# Checking your remote set correctly\ngit remote -v\n```\n\n### Bootstrapping your local dev environment\n\nTo get all the needed tools to build and test, run:\n\n```bash\ncd $KUBELESS_WORKING_DIR/kubeless\nmake bootstrap\n```\n\nOr if you want to use a containerized environment you can use [minikube](https://github.com/kubernetes/minikube).\nIf you already have minikube use the following script to set it up:\n\n```bash\ncd $KUBELESS_WORKING_DIR/kubeless\n./script/start-test-environment.sh\n```\n\nThis will start a new minikube virtual machine and will open a bash shell in which you can build any local binary or execute the tests. Note that the Kubeless code will be mounted from outside so you can still edit your files with your favourite text editor.\n\n### Building local binaries\n\nTo make the binaries for your platform, run:\n\n```bash\ncd $KUBELESS_WORKING_DIR/kubeless\nmake binary\nmake function-controller\n```\n\nThis will instruct \"make\" to run the scripts to build the kubeless client and the kubeless controller image.\n\nYou can build kubeless for multiple platforms with:\n\n```bash\nmake binary-cross\n```\n\nThe binaries accordingly located at `bundles/kubeless_$OS_$arch` folder.\n\n### Building Trigger Controllers\n\nEach Kubeless trigger controller is being developed on its own repository. You can find more information about those controllers in their repositories:\n\n - [HTTP Trigger](https://github.com/kubeless/http-trigger)\n - [CronJob Trigger](https://github.com/kubeless/cronjob-trigger)\n - [Kafka Trigger](https://github.com/kubeless/kafka-trigger)\n - [NATS Trigger](https://github.com/kubeless/nats-trigger)\n - [AWS Kinesis Trigger](https://github.com/kubeless/kinesis-trigger)\n\n### Building k8s manifests file\n\nTo regenerate the most updated k8s manifests file, run:\n\n> Note that you will need the [`kubecfg`](https://github.com/ksonnet/kubecfg/releases/) in your `PATH` in order to generate the Kubeless manifests.\n\n```bash\ncd $KUBELESS_WORKING_DIR\nexport KUBECFG_JPATH=$PWD/ksonnet-lib\ngit clone --depth=1 https://github.com/ksonnet/ksonnet-lib.git\ncd $KUBELESS_WORKING_DIR/kubeless\nmake all-yaml\n```\n\nIf everything is ok, you'll have generated manifests file under the `$KUBELESS_WORKING_DIR` root directory:\n\n```\nkubeless-openshift.yaml\nkubeless-non-rbac.yaml\nkubeless.yaml\n```\n\nYou can also generate them separated using the following commands:\n\n```bash\nmake kubeless-openshift.yaml\nmake kubeless-non-rbac.yaml\nmake kubeless.yaml\n```\n\n### Uploading your kubeless image to Docker Hub\n\nUsually you will need to upload your controller image to a repository so you can make it available for your Kubernetes cluster, whenever it is running.\n\nTo do so, run the commands:\n\n```bash\ndocker login -u=<dockerhubuser> -e=<e-mail>\ndocker tag kubeless-controller-manager <your-docker-hub-repo>/kubeless-test:latest\ndocker push <your-docker-hub-repo>/kubeless-test:latest\n```\n\nMake sure your image repository is correctly referenced in the \"containers\" session on the yaml file.\n\n```yaml\n      containers:\n      - image: fabriciosanchez/kubeless-test:latest\n        imagePullPolicy: Always\n        name: kubeless-controller\n      serviceAccountName: controller-acct\n```\n\n**Hint:** take a look at the `imagePullPolicy` configuration if you are sending images with tags (e. g. \"latest\") to the Kubernetes cluster. This option controls the image caching mechanism for Kubernetes and you may encounter problems if new images enters the cluster with the same name. They might not be properly pulled for example.\n\nIn order to upload your kubeless controller image to Kubernetes, you should use kubectl as follows, informing the yaml file with the required descriptions of your deployment.\n\n```bash\nkubectl create ns kubeless\nkubectl create -f <path-to-yaml-file>/kubeless.yaml\n```\n\n### Working on your local branch\n\nBranch from it:\n\n```bash\ngit checkout -b myfeature\n```\n\nThen start working on your `myfeature` branch.\n\n#### Keep your branch in sync\n\n```bash\n# While on your myfeature branch\ngit fetch upstream\ngit rebase upstream/master\n```\n\n#### Commit your changes\n\n```bash\ngit commit\n```\n\nLikely you go back and edit/build/test some more then `commit --amend` in a few cycles.\n\n#### Push to your origin first\n\n```bash\ngit push origin myfeature\n```\n\n### Updating generated files\n\nThere are several files that are automatically generated by Kubernetes [code-generator](https://github.com/kubernetes/code-generator) based on the API [specification](https://github.com/kubeless/kubeless/tree/master/pkg/apis/kubeless) in the repository.\n\nThese include:\n\n* Clientset\n* Listers\n* Shared informers\n* Deepcopy functions\n\nIf you make any changes to API specification, you will need to run `make update` to regenerate clientset, informers, lister and deepcopy functions.\n\n### Testing kubeless with local minikube\n\nThe simplest way to try kubeless is deploying it with [minikube](https://github.com/kubernetes/minikube)\n\nYou can start working with the local minikube VM and test your changes building the controller image and running your tests. Once you are happy with the result and you are ready to send a pull request you should run the unit and end-to-end tests (to spot possible issues with your changes):\n\n```bash\nmake validation\nmake test\nmake build_and_test\n```\n\nNote that for running the end-to-end tests you need to provide a clean profile of minikube (you can create a specific profile for the tests with `minikube profile tests`).\n\nAny new feature/bug fix made to the code should be accompanied by a unit or end to end test.\n\n### Create a pull request\n\n1. Visit your fork at [https://github.com/$your_github_username/kubeless](https://github.com/$your_github_username/kubeless).\n1. Click the `Compare & pull request` button next to your `myfeature` branch.\n1. Make sure you fill up clearly the description, point out the particular\n   issue your PR is mitigating, and ask for code review.\n\n## Scripting build and publishing\n\nExample of shell script to setup a local environment, build the kubeless binaries and make it available on kubernetes.\n\n```bash\n#!/bin/bash\n\n\n# Please set GOROOT and GOPATH appropriately before running!\n#rm -rf $GOROOT/src/github.com\n\n#export GOROOT=\n#export GOPATH=\n#export PATH=$GOPATH:$PATH\n\n#KUBELESS_WORKING_DIR=$GOPATH/src/github.com/kubeless/\n#mkdir -p $KUBELESS_WORKING_DIR\n#cd $KUBELESS_WORKING_DIR\n#git clone https://github.com/<INCLUDE HERE YOUR FORK AND UNCOMMENT>\n#cd $KUBELESS_WORKING_DIR/kubeless\n#git remote add upstream https://github.com/DXBrazil/kubeless\n#git remote set-url --push upstream no_push\n#git remote -v\n# git checkout <INCLUDE HERE YOUR BRANCH AND UNCOMMENT>\n#git fetch\n\n#make binary\n#make controller-image\n\n#docker login -u=<your docker hub user> -e=<your e-mail>\n#docker tag kubeless-controller <yourrepo>/<your-image>\n#docker push <your repo>/<your-image>\n\n#kubectl delete -f <path-to-yaml>\n#kubectl delete namespace kubeless\n\n#a=Terminating\n\n#while [ $a == Terminating ]\n#do\n\n#a=`kubectl get ns | grep Termina | awk '{print $2}'`\n#sleep 5\n\n#done\n\n#kubectl create namespace kubeless\n#kubectl create -f <path-to-yaml>\n```\n\n## Manage dependencies\n\nWe use [dep](https://github.com/golang/dep) to vendor the dependencies. Take a quick look at the README to understand how it works. Packages that Kubeless relies on are listed at [Gopkg.toml](https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md).\n\nHappy hacking!\n"
  },
  {
    "path": "docs/function-controller-configuration.md",
    "content": "# Controller configurations for Functions\n\n## Using ConfigMap\n\nConfigurations for functions can be done in `ConfigMap`: `kubeless-config` which is a part of `Kubeless` deployment manifests.\n\nDeployments for function can be configured in `data` inside the `ConfigMap`, using key `deployment`, which takes a string in the form of `yaml/json` and is driven by the structure of [v1.Deployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#deployment-v1-apps).\nUnknown fields or duplicate keys in the provided deployment data will result in an error.\n\nE.g. In the below configuration, new **annotations** are added globally to all function deployments and podTemplates and **replicas** for each function pod will be `2`.\n\n```yaml\napiVersion: v1\ndata:\n  deployment: |-\n    {\n      \"metadata\": {\n          \"annotations\":{\n            \"annotation-to-deployment\": \"value\"\n          }\n      },\n      \"spec\": {\n        \"replicas\": 2,\n        \"template\": {\n          \"spec\": {\n            \"annotations\": {\n              \"annotations-to-pod\": \"value\"\n            },\n            \"containers\": [{\n              \"resources\": {\n                \"requests\": {\n                  \"cpu\": \"100m\"\n                }\n              }\n            }]\n          }\n        }\n      }\n    }\n  ingress-enabled: \"false\"\n  service-type: ClusterIP\nkind: ConfigMap\nmetadata:\n  name: kubeless-config\n  namespace: kubeless\n```\n\nThe following configuration will result in an error because of duplicate key:\n\n```yaml\napiVersion: v1\ndata:\n  deployment: |-\n    {\n      \"metadata\": {\n          \"annotations\":{\n            \"annotation-to-deployment\": \"value\",\n            \"annotation-to-deployment\": \"other value\",\n          }\n      }\n    }\n  ingress-enabled: \"false\"\n  service-type: ClusterIP\nkind: ConfigMap\nmetadata:\n  name: kubeless-config\n  namespace: kubeless\n```\n\nThe following configuration will result in an error because of unknown key:\n\n```yaml\napiVersion: v1\ndata:\n  deployment: |-\n    {\n      \"unknown\": \"hack\",\n    }\n  ingress-enabled: \"false\"\n  service-type: ClusterIP\nkind: ConfigMap\nmetadata:\n  name: kubeless-config\n  namespace: kubeless\n```\n\nIt is **recommended** to have controlled custom configurations on the following **items** (*but is not limited to just these*):\n\n> Warning: You should know what you are doing.\n\n- v1beta2.Deployment.ObjectMeta.Annotations\n- v1beta2.Deployment.Spec.replicas\n- v1beta2.Deployment.Spec.Strategy\n- v1beta2.Deployment.Spec.Template.ObjectMeta.Annotations\n- v1beta2.Deployment.Spec.Template.Spec.NodeSelector\n- v1beta2.Deployment.Spec.Template.Spec.NodeName\n\nHaving said all that, if one wants to override configurations from the `ConfigMap` then in `Function` manifest one needs to provide the details as follows:\n\n```yaml\napiVersion: kubeless.io/v1beta1\nkind: Function\nmetadata:\n  name: testfunc\nspec:\n  deployment:  ### Definition as per v1beta2.Deployment\n    metadata:\n      annotations:\n        \"annotation-to-deploy\": \"final-value-in-deployment\"\n    spec:\n      replicas: 2  ### Final deployment gets Replicas as 2\n      template:\n        metadata:\n          annotations:\n            \"annotation-to-pod\": \"value\"\n  deps: \"\"\n  function: |\n    module.exports = {\n      foo: function (req, res) {\n            res.end('hello world updated!!!')\n      }\n    }\n  function-content-type: text\n  handler: hello.foo\n  runtime: nodejs8\n  service:\n    ports:\n    - name: http-function-port\n      port: 8080\n      protocol: TCP\n      targetPort: 8080\n    type: ClusterIP\n```\n\n## Install kubeless in different namespace\n\nIf you have installed kubeless into some other namespace (which is not called `kubeless`) or changed the name of the config file from kubeless-config to something else, then you have to export the kubeless namespace and the name of kubeless config as environment variables before using kubless cli. This can be done as follows:\n\n```bash\n$ export KUBELESS_NAMESPACE=<name of namespace>\n$ export KUBELESS_CONFIG=<name of config file>\n```\n\nor the following information can be added to `functions.kubeless.io` `CustomResourceDefinition` as `annotations`. E.g. below `CustomResourceDefinition` will signify `kubeless-controller` is installed in namespace `kubless-new-namespace` and config name is `kubeless-config-new-name`\n\n```yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: functions.kubeless.io\n  annotations:\n    kubeless.io/namespace: kubless-new-namespace\n    kubeless.io/config: kubeless-config-new-name\nspec:\n  group: kubeless.io\n  names:\n    kind: Function\n    plural: functions\n    singular: function\n  scope: Namespaced\n  version: v1beta1\n```\n\nThe priority of deciding the `namespace` and `config name` (highest to lowest) is:\n\n- Environment variables\n- Annotations in `functions.kubeless.io` CRD\n- default: `namespace` is `kubeless` and `ConfigMap` is `kubeless-config`\n\n### Install several instances of kubeless (multi-tenancy)\n\nIt is possible to install Kubeless in several namespaces. This allow administrators to have several instances of Kubeless that can be configured differently (for example using different runtime images or with different Docker credentials).\n\nIn order to install Kubeless in a custom namespace (or in several ones) it's necessary to:\n\n - Install the `CustomResourceDefinitions` and `ClusterRoles` as in the default scenario. These resources are not namespaced which means that you need to install them just once. It is also recommendable to split the current rules of the `ClusterRole` into two different roles: one just for accessing cluster-wide resources like `CustomResourceDefinitions` and a second one with the rest of resources. That way it's possible to attach the first `ClusterRole` to a `ClusterRoleBinding` as the default scenario but attaching the second one with a namespaced `RoleBinding` to avoid unauthorized access to other namespaces. More information about `RBAC` [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/).\n - The rest of the resources you can find in the installation manifest (`Deployment`, `ConfigMap`, `ServiceAccount`...) are namespaced. This means that it's required to modify the `metadata.namespace` of each one of those to target the correct namespace.\n - The next step is to set in the Kubeless ConfigMap the namespace in which the controller should listen for functions. This is set in the variable `functions-namespace`. If this value is empty it will try to find functions in all namespaces.\n\nThis is an example of a manifest (simplified) for a Kubeless instance deployed in the namespace \"test\":\n\n```yaml\n# RBAC Configuration\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n  name: kubeless-controller-read\nrules:\n- apiGroups:\n  - apiextensions.k8s.io\n  resources:\n  - customresourcedefinitions\n  verbs:\n  - get\n  - list\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n  name: kubeless-controller-deployer\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - services\n  - configmaps\n  verbs:\n  ... # The rest of the ClusterRole has been omitted\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: controller-acct\n  namespace: test\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: kubeless-controller-read-test\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: kubeless-controller-read\nsubjects:\n- kind: ServiceAccount\n  name: controller-acct\n  namespace: test\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n  name: kubeless-controller-deployer\n  namespace: test\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: kubeless-controller-deployer\nsubjects:\n- kind: ServiceAccount\n  name: controller-acct\n\n# Kubeless Configuration\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: kubeless-config\n  namespace: test\ndata:\n  functions-namespace: \"test\"\n  ...  # The rest of the ConfigMap data has been omitted\n\n# Kubeless core controller\n---\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  labels:\n    kubeless: controller\n  name: kubeless-controller-manager\n  namespace: test\nspec:\n  ... # The rest of the Deployment has been omitted\n```\n\nThe same process should be followed for any trigger controller installed (Kafka, Nats, ...): Adapt the RBAC configuration and change the resources namespace. These controllers will read the `functions-namespace` property from the main ConfigMap.\n\n## Using custom images\n\nIt is possible to configure the different images that Kubeless uses to deploy and execute functions. In this ConfigMap you can configure:\n\n - Different or additional runtimes. For doing so it is possible to modify/add a runtime in the field `runtime-images`. Runtimes are categorized by major version. See the guide for [implementing a new runtime](/docs/implementing-new-runtime) for more information. Each major version has:\n  - Name: Unique ID of the runtime. It should contain the runtime name and version.\n  - Version: Major and minor version of the runtime.\n  - Runtime Image: Image used to execute the function.\n  - Init Image: Image used for installing the function and/or dependencies.\n  - (Optional) Image Pull Secrets: Secret required to pull the image in case the repository is private.\n  - (Optional) Environment variables.\n  - (Optional) Secrets: Shared with the container as volumes mounted at `/var/run/secrets/kubeless.io/`.\n - The image used to populate the base image with the function. This is called `provision-image`. This image should have at least `unzip`, `GNU tar`, `gzip`, `bzip2`, `xz` and `curl`. It is also possible to specify `provision-image-secret` to specify a secret to pull that image from a private registry.\n - The image used to build function images. This is called `builder-image`. This image is optional since its usage can be disabled with the property `enable-build-step`. A Dockerfile to build this image can be found [here](https://github.com/kubeless/kubeless/tree/master/docker/function-image-builder). It is also possible to specify `builder-image-secret` to specify a secret to pull that image from a private registry.\n\n## Authenticate Kubeless Function Controller using OAuth Bearer Token\n\nIn some non-RBAC k8s deployments using webhook authorization, service accounts may have insufficient privileges to perform all k8s operations that the Kubeless Function Controller requires for interacting with the cluster. It's possible to override the default behavior of the Kubeless Function Controller using a k8s serviceaccount for authentication with the cluster and instead use a provided OAuth Bearer token for all k8s operations.\n\nThis can be done by creating a k8s secret and mounting that secret as a volume on controller pods, then setting the environmental variable `KUBELESS_TOKEN_FILE_PATH` to the filepath of that secret. Be sure to set this environmental variable on the controller template spec or to every pod created in the deployment.\n\nFor example, if the bearer token is mounted at /mnt/secrets/bearer-token, this k8s spec can use it:\n\n```yaml\n# Kubeless core controller\n---\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  name: kubeless-controller-manager\n  namespace: kubeless\n  labels:\n    kubeless: controller\nspec:\n  template:\n    metadata:\n      labels:\n        kubeless: controller\n    spec:\n      containers:\n      - env:\n        - name: KUBELESS_TOKEN_FILE_PATH\n          value: /mnt/secrets/bearer-token\n  ... # The rest of the Deployment has been omitted\n```\n\n"
  },
  {
    "path": "docs/http-triggers.md",
    "content": "# Expose and secure Kubeless functions\n\nKubeless leverages [Kubernetes ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) to provide routing for functions. By default, a deployed function will be matched to a Kubernetes service using ClusterIP as the service. That means that the function is not exposed publicly. Because of that, we provide the `kubeless trigger http` command that can make a function publicly available. This guide provides a quick sample on how to do it.\n\n## Ingress controller\n\nIn order to create routes for functions in Kubeless, you must have an Ingress controller running. There are several options to deploy it. In this document we point to several different solutions that you can choose:\n\n> Note: In case Kubeless is running in a GKE cluster you will need to disable the default Ingress controller provided by GKE. The native controller doesn't work with services that have a type different  than NodePort (see [this issue](https://github.com/kubernetes/ingress-nginx/issues/1417)). In order to expose a Kubeless function, disable the default controller and deploy one of the options described below.\n\n### Minikube Ingress addon\n\nIf your cluster is running in Minikube you can enable the Ingress controller just executing:\n\n```console\nminikube addons enable ingress\n```\n\nAfter a couple of minutes you should be able to see the controller running in the `kube-system` namespace:\n\n```console\n$ kubectl get pod -n kube-system -l app=nginx-ingress-controller\nNAME                             READY     STATUS    RESTARTS   AGE\nnginx-ingress-controller-pj2pz   1/1       Running   0          25s\n```\n\n### Nginx Ingress\n\nYou can deploy a Nginx Ingress controller manually (it is the same controller than in the Minikube addon) following the instructions that can be found [here](https://github.com/kubernetes/ingress-nginx/blob/master/deploy/README.md).\n\n### Kong Ingress\n\n[Kong](https://getkong.org) have an Ingress controller that can be used to expose functions and secure them. You can check the deployment instructions in [their repository](https://github.com/Kong/kubernetes-ingress-controller/tree/master/docs/deployment). Once Kong is deployed you should be able to see the controller in the `kong` namespace:\n\n```console\nkubectl get pods -n kong\nNAME                                       READY     STATUS    RESTARTS   AGE\nkong-56c4cc55c9-78srh                      1/1       Running   0          1h\nkong-ingress-controller-79f48dd4d7-ql4vw   2/2       Running   0          1h\npostgres-0                                 1/1       Running   1          22h\n```\n\n### Traefik Ingress\n\n[Traefik](http://traefik.io) provides an Ingress controller as well. To deploy it follow the steps described at [this guide](https://docs.traefik.io/user-guide/kubernetes/). As a result, you will be able to see the traefik controller running in the `kube-system` namespace:\n\n```console\nkubectl get pod -n kube-system -l name=traefik-ingress-lb\nNAME                                          READY     STATUS    RESTARTS   AGE\ntraefik-ingress-controller-57b4767f99-g42n2   1/1       Running   0          1m\n```\n\n## Deploy function with Kubeless CLI\n\nOnce you have a Ingress Controller running you should be able to start deploying functions and expose them publicly. First deploy a function:\n\n```console\n$ cd examples\n$ kubeless function deploy get-python \\\n                    --runtime python2.7 \\\n                    --handler helloget.foo \\\n                    --from-file python/helloget.py\n\n$ kubectl get po\nNAME                          READY     STATUS    RESTARTS   AGE\nget-python-1796153810-krrf3   1/1       Running   0          2s\n\n$ kubectl get svc\nNAME         CLUSTER-IP   EXTERNAL-IP   PORT(S)    AGE\nget-python   10.0.0.26    <none>        8080/TCP   44s\n```\n\n## Expose a function\n\nIn order to expose a function, it is necessary to create a HTTP Trigger object. The Kubeless CLI provides the commands required to do so:\n\n```console\n$ kubeless trigger http create --help\nCreate a http trigger\n\nUsage:\n  kubeless trigger http create <http_trigger_name> FLAG [flags]\n\nFlags:\n      --basic-auth-secret string   Specify an existing secret name for basic authentication\n      --cors-enable                If true then cors will be enabled on Http Trigger\n      --enableTLSAcme              If true, routing rule will be configured for use with kube-lego\n      --function-name string       Name of the function to be associated with trigger\n      --gateway string             Specify a valid gateway for the Ingress. Supported: nginx, traefik, kong (default \"nginx\")\n  -h, --help                       help for create\n      --hostname string            Specify a valid hostname for the function\n      --namespace string           Specify namespace for the HTTP trigger\n      --path string                Ingress path for the function\n      --tls-secret string          Specify an existing secret that contains a TLS private key and certificate to secure ingress\n```\n\nWe will create a http trigger to `get-python` function:\n\n```console\n$ kubeless trigger http create get-python --function-name get-python\n```\n\nThis command will create an ingress object. We can see it with kubectl (this guide is run on minikube):\n\n```console\n$ kubectl get ing\nNAME           HOSTS                              ADDRESS          PORTS     AGE\nget-python    get-python.192.168.99.100.nip.io    192.168.99.100   80        59s\n```\n\nKubeless creates a default hostname in form of <function-name>.<master-address>.nip.io. Alternatively, you can provide a real hostname with `--hostname` flag or use a different `--path` like this:\n\n```console\n$ kubeless trigger http create get-python --function-name get-python --path echo --hostname example.com\n$ kubectl get ing\nNAME          HOSTS                              ADDRESS          PORTS     AGE\nget-python    example.com                                          80        6s\n```\n\nBut you have to make sure your hostname is configured properly.\n\nYou can test the created HTTP trigger with the following command:\n\n```console\n$ curl --data '{\"Another\": \"Echo\"}' \\\n  --header \"Host: get-python.192.168.99.100.nip.io\" \\\n  --header \"Content-Type:application/json\" \\\n  192.168.99.100/echo\n{\"Another\": \"Echo\"}\n```\n\n## Enable TLS\n\nOnce you have one of the supported Ingress Controller it is possible to enable TLS using a certificate:\n\n - Automatically generated using Let's Encrypt and [cert-manager](https://github.com/jetstack/cert-manager)\n - Self signed\n - Provided by a certificate issuer\n\n### Using Let’s Encrypt’s CA\n\nWhen you have running Kube-lego, you can deploy function and create an HTTP trigger with flag `--enableTLSAcme` enabled as below:\n\n```console\n$ kubeless trigger http create get-python --function-name get-python --path get-python --enableTLSAcme\n```\n\nRunning the above command, Kubeless will automatically create a ingress object with annotation `kubernetes.io/tls-acme: 'true'` set which will be used by Kube-lego to configure the service certificate.\n\n### Create a self-signed certificate\n\nIf you don't have a working certificate it is possible to generate a dummy one to be able to use TLS with your functions. To generate the certificate and its secret execute the following:\n\n```console\n$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \"/CN=foo.bar.com\"\nGenerating a 2048 bit RSA private key\n..........................................................................+++\n.......................................................+++\nwriting new private key to 'tls.key'\n-----\n$ kubectl create secret tls tls-secret --key tls.key --cert tls.crt\nsecret \"tls-secret\" created\n```\n\n### Use an existing certificate\n\nNow that you have a certificate, you can use it to setup TLS for the HTTP trigger, there by securing functions:\n\n```console\n$ kubeless trigger http create get-python --function-name get-python --hostname foo.bar.com --tls-secret secret-name\n```\n\nOnce the Ingress rule has been deployed you can verify that the function is accessible trough HTTPS:\n\n```console\n$ kubectl get ingress\nNAME             HOSTS            ADDRESS          PORTS     AGE\nget-python       foo.bar.com      192.168.99.100   80, 443   4m\n$ curl -k https://192.168.99.100 --header 'Host: foo.bar.com'\nhello world\n```\n\n## Enable Basic Authentication\n\nOnce you have one of the supported Ingress Controller it is possible to enable Basic Authentication either:\n\n - Creating a secret with the content of the user to authenticate. This is valid for the Nginx and Traefik controllers.\n - Adding the Kong plugin for basic authentication.\n\n### Enable Basic Authentication with Nginx or Traefik\n\nFor enabling authentication for a function, the first thing is creating a secret with the user and password:\n\n```console\n$ htpasswd -cb auth foo bar\nAdding password for user foo\n$ kubectl create secret generic basic-auth --from-file=auth\nsecret \"basic-auth\" created\n```\n\nNow you just need to create a HTTP trigger using that secret.\n\n```console\n$ kubeless trigger http create get-python --function-name get-python --basic-auth-secret basic-auth --gateway nginx\nINFO[0000] HTTP trigger get-python created in namespace default successfully!\n```\n\n> Note: The command is the same for the case of Traefik, just use `--gateway traefik` instead\n\nOnce the Ingress rule has been deployed you can verify that the function is accessible just for the proper user and password:\n\n```console\n$ kubectl get ingress\nNAME         HOSTS                              ADDRESS          PORTS     AGE\nget-python   get-python.192.168.99.100.nip.io   192.168.99.100   80        1m\n$ curl --header 'Host: get-python.192.168.99.100.nip.io' 192.168.99.100\n<html>\n<head><title>401 Authorization Required</title></head>\n<body bgcolor=\"white\">\n<center><h1>401 Authorization Required</h1></center>\n<hr><center>nginx/1.13.7</center>\n</body>\n</html>\n$ curl -u foo:bar --header 'Host: get-python.192.168.99.100.nip.io' 192.168.99.100\nhello world\n```\n\n### Enable Basic Authentication with Kong\n\nIt is not yet supported to create an HTTP trigger with basic authentication using Kong as backend but the steps to do it manually are pretty simple. It is possible to do so using Kong plugins. In the [next section](#enable-kong-security-plugins) we explain how to enable any of the available Kong plugins and in particular we explain how to enable the basic-auth plugin.\n\n## Enable CORS\n\nIt's possible to enable CORS requests at the HTTPTrigger level. To do so use the --cors-enable flag when deploying\nthe HTTPTrigger or add the field cors-enable: true to the YAML manifest.\n\n## Add arbitrary annotations\n\nIt is also possible to add any annotation to the resulting Ingress object if you add those to the HTTPTrigger. For example:\n\n```\napiVersion: kubeless.io/v1beta1\nkind: HTTPTrigger\nmetadata:\n name: cors-trigger\n annotations:\n  nginx.ingress.kubernetes.io/enable-cors: \"true\"\n  nginx.ingress.kubernetes.io/cors-allow-methods: \"GET\"\nspec:\n function-name: get-python\n host-name: example.com\n path: echo\n```\n\nThe above will create an Ingress object with the annotations nginx.ingress.kubernetes.io/enable-cors: \"true\"\nand nginx.ingress.kubernetes.io/cors-allow-methods: \"GET\".\n\n## Enable Kong Security plugins\n\nKong has available several free [plugins](https://konghq.com/plugins/) that can be used along with the Kong Ingress controller for securing the access to Kubeless functions. In particular, the list of security plugins that can be used is:\n\n - Basic Authentication\n - Key Authentication\n - OAuth 2.0\n - JWT\n - ACL\n - HMAC Authentication\n - LDAP Authentication\n\nOnce you have Kong and its Ingress controller running in your cluster the generic steps to use any plugin are:\n\n - Deploy a basic HTTP trigger for the target function using `--gateway kong`.\n - Create a Kubernetes object for the plugin you want to use.\n - Add a Kong Consumer.\n - Create the specific credentials or follow any additional steps that the plugin may require.\n - Associate the credentials/plugin with the Ingress object created in the first step.\n\nThe specific steps that are required to use a plugin can be found in the [plugins](https://konghq.com/plugins/) page. As an example we will configure the plugin [basic-auth](https://getkong.org/plugins/basic-authentication/) for our function `get-python`.\n\n### Deploy a basic HTTP trigger\n\nFirst we need to create a HTTP trigger to generate the Ingress object that will expose our function.\n\n```console\n$ kubeless trigger http create get-python --function-name get-python --gateway kong --hostname foo.bar.com\nINFO[0000] HTTP trigger get-python created in namespace default successfully!\n```\n\n### Add the basic-auth plugin\n\nThe next step is creating the Custom Resource related to the Kong basic authentication plugin. You can see the possible configuration options available in the [plugin documentation](https://getkong.org/plugins/basic-authentication).\n\n```console\n$ echo \"\napiVersion: configuration.konghq.com/v1\nkind: KongPlugin\nmetadata:\n  name: basic-auth\nconsumerRef: basic-auth\nconfig:\n  hide_credentials: false\n\" | kubectl create -f -\nkongplugin \"basic-auth\" created\n```\n\n#### Create a Consumer\n\nNow we need a [`Consumer`](https://getkong.org/docs/0.13.x/getting-started/adding-consumers/#adding-consumers) for the plugin.\n\n```console\n$ echo \"\napiVersion: configuration.konghq.com/v1\nkind: KongConsumer\nmetadata:\n  name: basic-auth\nusername: user\n\" | kubectl create -f -\nkongconsumer \"basic-auth\" created\n```\n\n#### Create user credentials\n\nNow that we have a consumer we need to create the basic authentication credentials that the function is going to use:\n\n```console\n$ echo \"\napiVersion: configuration.konghq.com/v1\nkind: KongCredential\nmetadata:\n  name: basic-auth\nconsumerRef: basic-auth\ntype: basic-auth\nconfig:\n  username: user\n  password: pass\n\" | kubectl create -f -\nkongcredential \"basic-auth\" created\n```\n\n#### Associate the credentials with the Ingress object\n\nThe final step is to enable the credentials and the plugin for the function. For doing so we just need to add an `Annotation` in the Ingress object that we generated in the first step:\n\n```console\n$ kubectl patch ingress get-python \\\n -p '{\"metadata\":{\"annotations\":{\"basic-auth.plugin.konghq.com\":\"basic-auth\"}}}'\ningress \"get-python\" patched\n```\n\nNow that the plugin has been enabled we can verify that it is working:\n\n```console\n$ export PROXY_IP=$(minikube   service -n kong kong-proxy --url --format \"{{ .IP }}\" | head -1)\n$ export HTTP_PORT=$(minikube  service -n kong kong-proxy --url --format \"{{ .Port }}\" | head -1)\n$ curl --header \"Host: foo.bar.com\" ${PROXY_IP}:${HTTP_PORT}\n{\"message\":\"Unauthorized\"}\n$ curl -u user:pass --header \"Host: foo.bar.com\" ${PROXY_IP}:${HTTP_PORT}\nhello world\n```\n"
  },
  {
    "path": "docs/implementing-new-runtime.md",
    "content": "# How to implement a new Kubeless run time\n\nRuntimes are developed in this repository:\n\n[https://github.com/kubeless/runtimes](https://github.com/kubeless/runtimes)\n\nTo implement a new runtime or improve the existing ones check the [Contributing](https://github.com/kubeless/runtimes/blob/master/CONTRIBUTING.md) and [Developer](https://github.com/kubeless/runtimes/blob/master/DEVELOPER_GUIDE.md) guides.\n"
  },
  {
    "path": "docs/implementing-new-trigger.md",
    "content": "# How to add a new event source as Trigger\n\nKubeless [architecture](/docs/architecture) is built on core concepts of Functions, Triggers and Runtime. A _Trigger_ in Kubeless represents association between an event source and functions that need to be invoked on an event in the event source. Kubeless fully leverages the Kubernetes concepts of [custom resource definition](https://kubernetes.io/docs/concepts/api-extension/custom-resources/)(CRD) and [custom controllers](https://kubernetes.io/docs/concepts/api-extension/custom-resources/#custom-controllers). Each trigger is expected to be modelled as Kubernetes CRD. A trigger specific custom resource controller is expected to be written that realizes how deployed functions are invoked when event occurs. Following sections document how one can add a new event source as _Trigger_ into Kubeless.\n\n## Triggers development repository\n\nEach Kubeless trigger controller is being developed on its own repository. You can find more information about those controllers in their repositories. If you want to create a new trigger you will need to create a new repository for that. These are the triggers currently available that can be used as templates for new ones:\n\n - [HTTP Trigger](https://github.com/kubeless/http-trigger)\n - [CronJob Trigger](https://github.com/kubeless/cronjob-trigger)\n - [Kafka Trigger](https://github.com/kubeless/kafka-trigger)\n - [NATS Trigger](https://github.com/kubeless/nats-trigger)\n\n## Model event source as CRD\n\nFirst step is to create a new CRD for the event source. CRD for the new triggers will be largely similar to the existing ones. For example below is the CRD for Kafka trigger\n\n```yaml\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: kafkatriggers.kubeless.io\nspec:\n  group: kubeless.io\n  names:\n    kind: KafkaTrigger\n    plural: kafkatriggers\n    singular: kafkatrigger\n  scope: Namespaced\n  version: v1beta1\n```\n\nGive appropriate and intutive name to the event source.\n\n## Model the CRD spec\n\nOnce CRD is defined, you need to model the event source and its attributes as resource object spec. Please see [API conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md) for key attributes of Kubernetes API resource object. Except fot `Spec` part rest of the needed parts to define a Trigger are pretty similar to other Triggers.\n\nFor e.g below is the definition of [Kafka Trigger](https://github.com/kubeless/kafka-trigger/blob/master/pkg/apis/kubeless/v1beta1/kafka_trigger.go)\n\n```go\ntype KafkaTrigger struct {\n\tmetav1.TypeMeta   `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\tSpec              KafkaTriggerSpec `json:\"spec\"`\n}\n```\n\nYou need to model the event source attributes in to Spec attribute of new trigger. Depending on the nature of event source you may want to associate single function or multiple functions with the event source. Use appropriate mechanism to represent the association. For e.g Kafka trigger uses Kubernetes label selector to associate any function with matching label with the event source.\n\n## Code Generation\n\nOnce you have definged the new trigger, please ensure its placed in `pkg/apis/kubeless/v1beta1/` path, and update `register.go` go to include new trigger type.\n\nNow you can auto-generate the clientset, lister and informers for the new API resource object as well but running `make update` or `./hack/update-codegen.sh` within the trigger repository. Auto-generated clientset, lister and informers comes handy in writing the controller in next step.\n\n## CRD controller\n\nHere is the most important step, i.e. writing controller itself. As far as the skeleton of controller goes, it would be pretty similar to existing controllers like Kafka trigger controller, nats trigger controller or http trigger controller. Functionally controller does two important things\n\n- watch Kuberentes API server for CRUD operations on the new trigger object and take appropriate actions.\n- when an event occurs in the event source trigger the associated functions.\n\nPlease read the code and logic for the existing [Kafka controller](https://github.com/kubeless/kafka-trigger/tree/master/pkg/controller) as a reference.\n\n## Building controller binary and docker image\n\nEnsure you controller is an independent binary that can be built from the Makefile. Please follow one of the existing controller [cmd](https://github.com/kubeless/kafka-trigger/tree/master/cmd) as referance. Also ensure there is corresponding `Dockerfile` to build the controller image. Please see the dockerfile for other trigger controller as a [reference](https://github.com/kubeless/kafka-trigger/tree/master/docker).\n\nAdd appropriate Makefile targets so that controller binary and docker image can be built.\n\n## Manifest\n\nCreate a jsonnet file for the new trigger and ensure that generated yaml file has CRD definition, deployment for the trigger controller and necessary RBAC rules. Again most of the stuff is common with Kafka or HTTP triggers, so take existing jsonnet manifests as a referance.\n\n## CI\n\nOnce the new trigger is working it's important to add tests to ensure and preserve the trigger functionality. Each trigger should contain:\n\n - Unit tests covering the basic functionality.\n - End-to-end tests that ensure the compatibility with the latest image of the Kubeless core.\n\nThe CI used to run the tests is TravisCI. You can check examples of how Travis is configured [here](https://github.com/kubeless/kafka-trigger/blob/master/.circleci/config.yml). This file should define at least 4 jobs:\n\n - One for building the binaries and manifests.\n - Another one to test the functionality end-to-end in a Minikube scenario.\n - A third one to push the image used in the tests as `latest`.\n - A final one to auto generate a release in Github in case it's building a new tag.\n\nMost of the functionality for the above depends on scripts that have been already developed so you just need to change some data and names from the YAML to make it work.\n\nThe tests to run are defined in the folder `tests/` of each repository. These are [`bats`](https://github.com/sstephenson/bats) that loads a common library (`script/libtest.bash`) and execute some simple scenarios. Again you can take Kafka as an example for some useful scenarios to test.\n"
  },
  {
    "path": "docs/kubeless-functions.md",
    "content": "# Kubeless Functions\n\nFunctions are the main entity in Kubeless. It is possible to write Functions in different languages but all of them share common properties like the generic interface, the default timeout or the runtime UID. In this document we are going to explain some these common properties and different runtimes availables in Kubeless. You can find in depth details about the Function specification [here](/docs/advanced-function-deployment). \n\n## Functions Interface\n\nEvery function receives two arguments: `event` and `context`. The first argument contains information about the source of the event that the function has received. The second contains general information about the function like its name or maximum timeout. This is a representation in YAML of a Kafka event:\n\n```yaml\nevent:                                  \n  data:                                         # Event data\n    foo: \"bar\"                                  # The data is parsed as JSON when required\n  event-id: \"2ebb072eb24264f55b3fff\"            # Event ID\n  event-type: \"application/json\"                # Event content type\n  event-time: \"2009-11-10 23:00:00 +0000 UTC\"   # Timestamp of the event source\n  event-namespace: \"kafkatriggers.kubeless.io\"  # Event emitter\n  extensions:                                   # Optional parameters\n    request: ...                                # Reference to the request received \n    response: ...                               # Reference to the response to send \n                                                # (specific properties will depend on the function language)\ncontext:\n    function-name: \"pubsub-nodejs\"\n    timeout: \"180\"\n    runtime: \"nodejs6\"\n    memory-limit: \"128M\"\n```\n\nFunctions should return a string that will be used as the HTTP response for the caller. Some runtimes may support different types (like objects) for the returned values.\n\nYou can check basic examples of every language supported in the [examples](https://github.com/kubeless/kubeless/tree/master/examples) folder.\n\n## Functions Timeout\n\nRuntimes have a maximum timeout set by the environment variable FUNC_TIMEOUT. This environment variable can be set using the CLI option `--timeout`. The default value is 180 seconds. If a function takes more than that in being executed, the process will be terminated.\n\n## Runtime User\n\nAs a [Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) functions are configured to run with an unprivileged user (UID 1000) by default (except for OpenShift where the UID is automatically set). This prevent functions from having root privileges. This default behaviour can be overridden specifying a different Security Context in the `Deployment` template that is part of the Function Spec.\n\n## Scheduled functions\n\nIt is possible to deploy functions that should be triggered following a certain schedule. For specifying the execution frequency we use the [Cron](https://en.wikipedia.org/wiki/Cron) format. Every time a scheduled function is executed, a [Job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) is started. This Job will do a HTTP GET request to the function service and will be successful as far as the function returns 200 OK.\n\nFor executing scheduled functions we use Kubernetes [CronJobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) using mostly the default options which means:\n - If a Job fails, it won't be restarted but it will be retried in the next scheduled event. The maximum time that a Job will exist is specified with the function timeout (180 seconds by default).\n - The concurrency policy is set to `Allow` so concurrent jobs may exists.\n - The history limit is set to maintain as maximum three successful jobs (and one failed).\n\nIf for some reason you want to modify one of the default values for a certain function you can execute `kubectl edit cronjob trigger-<func_name>` (where `func_name` is the name of your function) and modify the fields required. Once it is saved the CronJob will be updated.\n\n## Monitoring functions\n\nSome Kubeless runtimes expose metrics at `/metrics` endpoint and these metrics will be collected by Prometheus. We also include a prometheus setup in [`manifests/monitoring`](https://github.com/kubeless/kubeless/blob/master/manifests/monitoring/prometheus.yaml) to help you easier set it up. The metrics collected are: Number of calls, succeeded and error executions and the time spent per call.\n\n## Runtime variants\n\nCheck [this document](/docs/runtimes) to get more details about supported runtimes and languages.\n"
  },
  {
    "path": "docs/kubeless-on-AKS.md",
    "content": "# Kubeless on Azure Kubernetes Service\n\n## 1. Introduction\n\nThis guide goes over the required steps for deploying Kubeless in Azure AKS (Azure Kubernetes Service). The steps in this guide require for you to install:\n\n - [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) (`az`): This CLI will be used to create the cluster in AKS.\n - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/): Used for installing Kubeless.\n\n## 2. Creating an AKS cluster\n\nIn order to get Kubeless up and running on top of AKS of course you'll need an AKS cluster. Fortunately, Microsoft already did a great job documenting the entire process to accomplish that. You can reach out that documentation following [this link](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough#create-aks-cluster).\n\n### Important notes regarding the cluster creation itself\n\n* In the same document the property `--generate-ssh-keys` was used to generate the required SSH keys to the cluster deployment. If you would like to create your own keys, please use `--ssh-key-value` passing the path to your SSH pub file.\n\n## 3. Installing \"Kubeless-Controller\"\n\nAssuming that the Kubernetes cluster is up and running on top of ACS, its time to install Kubeless. To accomplish, please, follow the steps described on Kubeless [Quick-Start Guide](/docs/quick-start).\n\n> NOTE: For [Azure AD enabled AKS clusters](https://docs.microsoft.com/en-us/azure/aks/aad-integration), support for the `kubeless` CLI to authenticate against Azure AD is only available in versions greater than `v1.0.1`.\n"
  },
  {
    "path": "docs/misc/kafka-pv-gke.yaml",
    "content": "apiVersion: v1\nkind: PersistentVolume\nmetadata:\n  name: kafka-pv\n  labels:\n    kubeless: kafka\nspec:\n  capacity:\n    storage: 1Gi\n  accessModes:\n    - ReadWriteOnce\n  persistentVolumeReclaimPolicy: Retain\n  gcePersistentDisk:\n    pdName: kubeless-kafka\n    fsType: ext4\n\n"
  },
  {
    "path": "docs/misc/kubeless-grafana-dashboard.json",
    "content": "{\n  \"id\": 1,\n  \"title\": \"Kubeless\",\n  \"description\": \"Dashboard for Kubeless\",\n  \"tags\": [],\n  \"style\": \"dark\",\n  \"timezone\": \"browser\",\n  \"editable\": true,\n  \"hideControls\": false,\n  \"sharedCrosshair\": false,\n  \"rows\": [\n    {\n      \"collapse\": false,\n      \"editable\": true,\n      \"height\": \"250px\",\n      \"panels\": [\n        {\n          \"aliasColors\": {},\n          \"bars\": false,\n          \"datasource\": \"prometheus\",\n          \"editable\": true,\n          \"error\": false,\n          \"fill\": 1,\n          \"grid\": {\n            \"threshold1\": null,\n            \"threshold1Color\": \"rgba(216, 200, 27, 0.27)\",\n            \"threshold2\": null,\n            \"threshold2Color\": \"rgba(234, 112, 112, 0.22)\"\n          },\n          \"id\": 1,\n          \"isNew\": true,\n          \"legend\": {\n            \"avg\": false,\n            \"current\": false,\n            \"max\": false,\n            \"min\": false,\n            \"show\": true,\n            \"total\": false,\n            \"values\": false\n          },\n          \"lines\": true,\n          \"linewidth\": 2,\n          \"links\": [],\n          \"nullPointMode\": \"connected\",\n          \"percentage\": false,\n          \"pointradius\": 5,\n          \"points\": false,\n          \"renderer\": \"flot\",\n          \"seriesOverrides\": [],\n          \"span\": 6,\n          \"stack\": false,\n          \"steppedLine\": false,\n          \"targets\": [\n            {\n              \"expr\": \"sum( rate(function_calls_total[5m])) by (function)\",\n              \"interval\": \"\",\n              \"intervalFactor\": 2,\n              \"legendFormat\": \"function={{function}}\",\n              \"metric\": \"function_calls_total\",\n              \"refId\": \"A\",\n              \"step\": 10\n            }\n          ],\n          \"timeFrom\": null,\n          \"timeShift\": null,\n          \"title\": \"Function call rate\",\n          \"tooltip\": {\n            \"msResolution\": true,\n            \"shared\": true,\n            \"sort\": 0,\n            \"value_type\": \"cumulative\"\n          },\n          \"type\": \"graph\",\n          \"xaxis\": {\n            \"show\": true\n          },\n          \"yaxes\": [\n            {\n              \"format\": \"short\",\n              \"label\": null,\n              \"logBase\": 1,\n              \"max\": null,\n              \"min\": null,\n              \"show\": true\n            },\n            {\n              \"format\": \"short\",\n              \"label\": null,\n              \"logBase\": 1,\n              \"max\": null,\n              \"min\": null,\n              \"show\": true\n            }\n          ]\n        },\n        {\n          \"aliasColors\": {},\n          \"bars\": false,\n          \"datasource\": \"prometheus\",\n          \"editable\": true,\n          \"error\": false,\n          \"fill\": 1,\n          \"grid\": {\n            \"threshold1\": null,\n            \"threshold1Color\": \"rgba(216, 200, 27, 0.27)\",\n            \"threshold2\": null,\n            \"threshold2Color\": \"rgba(234, 112, 112, 0.22)\"\n          },\n          \"id\": 2,\n          \"isNew\": true,\n          \"legend\": {\n            \"avg\": false,\n            \"current\": false,\n            \"max\": false,\n            \"min\": false,\n            \"show\": true,\n            \"total\": false,\n            \"values\": false\n          },\n          \"lines\": true,\n          \"linewidth\": 2,\n          \"links\": [],\n          \"nullPointMode\": \"connected\",\n          \"percentage\": false,\n          \"pointradius\": 5,\n          \"points\": false,\n          \"renderer\": \"flot\",\n          \"seriesOverrides\": [],\n          \"span\": 6,\n          \"stack\": false,\n          \"steppedLine\": false,\n          \"targets\": [\n            {\n              \"expr\": \"sum( rate(function_failures_total[5m])) by (function)\",\n              \"interval\": \"\",\n              \"intervalFactor\": 2,\n              \"legendFormat\": \"function={{function}}\",\n              \"metric\": \"function_failures_total\",\n              \"refId\": \"A\",\n              \"step\": 10\n            }\n          ],\n          \"timeFrom\": null,\n          \"timeShift\": null,\n          \"title\": \"Function failure rate\",\n          \"tooltip\": {\n            \"msResolution\": true,\n            \"shared\": true,\n            \"sort\": 0,\n            \"value_type\": \"cumulative\"\n          },\n          \"type\": \"graph\",\n          \"xaxis\": {\n            \"show\": true\n          },\n          \"yaxes\": [\n            {\n              \"format\": \"short\",\n              \"label\": null,\n              \"logBase\": 1,\n              \"max\": null,\n              \"min\": null,\n              \"show\": true\n            },\n            {\n              \"format\": \"short\",\n              \"label\": null,\n              \"logBase\": 1,\n              \"max\": null,\n              \"min\": null,\n              \"show\": true\n            }\n          ]\n        }\n      ],\n      \"title\": \"Row\"\n    },\n    {\n      \"title\": \"New row\",\n      \"height\": \"250px\",\n      \"editable\": true,\n      \"collapse\": false,\n      \"panels\": [\n        {\n          \"title\": \"Execution duration\",\n          \"error\": false,\n          \"span\": 12,\n          \"editable\": true,\n          \"type\": \"graph\",\n          \"isNew\": true,\n          \"id\": 3,\n          \"targets\": [\n            {\n              \"refId\": \"A\",\n              \"expr\": \"sum(rate(function_duration_seconds_sum[1m])) by (function)\",\n              \"intervalFactor\": 2,\n              \"metric\": \"function_duration_seconds_sum\",\n              \"step\": 4\n            }\n          ],\n          \"datasource\": \"prometheus\",\n          \"renderer\": \"flot\",\n          \"yaxes\": [\n            {\n              \"label\": null,\n              \"show\": true,\n              \"logBase\": 1,\n              \"min\": null,\n              \"max\": null,\n              \"format\": \"short\"\n            },\n            {\n              \"label\": null,\n              \"show\": true,\n              \"logBase\": 1,\n              \"min\": null,\n              \"max\": null,\n              \"format\": \"short\"\n            }\n          ],\n          \"xaxis\": {\n            \"show\": true\n          },\n          \"grid\": {\n            \"threshold1\": null,\n            \"threshold2\": null,\n            \"threshold1Color\": \"rgba(216, 200, 27, 0.27)\",\n            \"threshold2Color\": \"rgba(234, 112, 112, 0.22)\"\n          },\n          \"lines\": true,\n          \"fill\": 1,\n          \"linewidth\": 2,\n          \"points\": false,\n          \"pointradius\": 5,\n          \"bars\": false,\n          \"stack\": false,\n          \"percentage\": false,\n          \"legend\": {\n            \"show\": true,\n            \"values\": false,\n            \"min\": false,\n            \"max\": false,\n            \"current\": false,\n            \"total\": false,\n            \"avg\": false\n          },\n          \"nullPointMode\": \"connected\",\n          \"steppedLine\": false,\n          \"tooltip\": {\n            \"value_type\": \"cumulative\",\n            \"shared\": true,\n            \"sort\": 0,\n            \"msResolution\": true\n          },\n          \"timeFrom\": null,\n          \"timeShift\": null,\n          \"aliasColors\": {},\n          \"seriesOverrides\": [],\n          \"links\": []\n        }\n      ]\n    }\n  ],\n  \"time\": {\n    \"from\": \"2017-11-23T05:29:50.547Z\",\n    \"to\": \"2017-11-23T06:51:57.387Z\"\n  },\n  \"timepicker\": {\n    \"refresh_intervals\": [\n      \"5s\",\n      \"10s\",\n      \"30s\",\n      \"1m\",\n      \"5m\",\n      \"15m\",\n      \"30m\",\n      \"1h\",\n      \"2h\",\n      \"1d\"\n    ],\n    \"time_options\": [\n      \"5m\",\n      \"15m\",\n      \"1h\",\n      \"6h\",\n      \"12h\",\n      \"24h\",\n      \"2d\",\n      \"7d\",\n      \"30d\"\n    ]\n  },\n  \"templating\": {\n    \"list\": []\n  },\n  \"annotations\": {\n    \"list\": []\n  },\n  \"refresh\": false,\n  \"schemaVersion\": 12,\n  \"version\": 5,\n  \"links\": [],\n  \"gnetId\": null\n}"
  },
  {
    "path": "docs/misc/zookeeper-pv-gke.yaml",
    "content": "apiVersion: v1\nkind: PersistentVolume\nmetadata:\n  name: zookeeper-pv\n  labels:\n    kubeless: zookeeper\nspec:\n  capacity:\n    storage: 1Gi\n  accessModes:\n    - ReadWriteOnce\n  persistentVolumeReclaimPolicy: Retain\n  gcePersistentDisk:\n    pdName: kubeless-zookeeper\n    fsType: ext4\n\n"
  },
  {
    "path": "docs/monitoring.md",
    "content": "# Monitoring\n\n## Prometheus\n\nKubeless monitoring relies on Prometheus. The language runtimes are instrumented to automatically collect metrics for each function. \nPrometheus will scrape those metrics and display them in the default Prometheus dashboard.\n\n## Grafana\n\nYou could also use Grafana to visualize the prometheus metrics exposed by Kubeless. Example of a Grafana dashboard for Kubeless showing function call rate, function failure rate and execution duration:\n\n![Grafana](./img/kubeless-grafana-dashboard.png)\n\nSample dashboard JSON file available [here](./misc/kubeless-grafana-dashboard.json)\n"
  },
  {
    "path": "docs/proposals/decoupling-triggers-and-runtimes.md",
    "content": "# Decoupling triggers and runtimes\n\n## Definition of the problem\nCurrently for each new runtime we need to add a container image per trigger. We should design a runtime abstraction. So that:\n - Triggers can be added (http or event or something else).\n - These can be in a single language i.e golang\n - Runtimes can be added more easily\n - One function can be triggered by more than one trigger source\n - One trigger can execute more than one function\n\nWe need to define interface between trigger container and runtime. What type of protocol to use to pass the request and response.\n\n### **Warning**\n\nChanging the interface between triggers (currently embed in the runtime container) and functions will cause a breaking change. Functions working with previous versions of Kubeless may not work depending on the format of the interface chosen.\n\n## User POV\nFrom the users point of view we would support:\n\n```bash\n# As today, deploy runtime + trigger\nkubeless function deploy func --trigger-http [...]\n\n# Deploy the runtime without a trigger\nkubeless function deploy func [...]\n\n# Add a trigger linking it to a function\nkubeless trigger add http --path /func func\nkubeless trigger add kafka_topic --topic s3 func\n```\n\nNote that splitting the trigger type in different \"verbs\" allow us to easily have flags per trigger type.\nDisclaimer: we would need to define possible flags for each trigger\n\n## Trigger CRD\n\nFor enabling the above, we propose to create a new Custom Resource Definition (CRD) for triggers. This CRD will contain the fields required by its _trigger controller_ to create the resulting actionable items (like an Ingress rule for HTTP requests or a Kafka consumers). Each _trigger_ instance will contain as well the IDs of the functions that the _trigger_ is bind to.\n\n## Suggested architecture approach\n\nFor the moment, we will assume that the interface protocol between the trigger and the runtime will be HTTP (discussed later). \n\nSo far we can identify two types of trigger, each one of them will be managed by a _trigger controller_:\n - HTTP Trigger: This trigger should redirect HTTP(s) requests from/to the _runtime_. \n - Kafka Topic Trigger: This trigger should translate topic messages to HTTP request. This way runtimes can have an unique interface, regardless of its trigger.\n\nThis diagram shows a simplified desired architecture:\n\n![Triggers and runtime relation](./img/triggers-runtime-diagram.png)\n\nRegardless of implementation details, the Kubeless Client (or any other client) will create a Custom Resource for the desired trigger type (HTTP or Kafka), this new instance will be detected by the _Controller_ that will obtain the required information and create an _actionable item_ (like an Ingress rule). Whenever a request is made, the _actionable item_ will make an HTTP request to the _runtime container_. This will call the user function with the interface in the [section below](#function-input). Finally, the runtime container will send back the returned value of the function to the caller. This response can be discarded if the function is triggered asynchronously (for example if the trigger is a Kafka message or a scheduled event). \n\nWe will not enter into the details of the trigger resource definitions or implementations since they will be handled separately following the above architecture.\n\n## Functions interface\nRight now it doesn’t exist a standard for the interface between functions and triggers. The [CNCF document](https://docs.google.com/document/d/1UjW8bt5O8QBgQRILJVKZJej_IuNnxl20AJu9wA8wcdI/) doesn’t get into specifics about how the two pieces should communicate between them or which protocol they should use. Some of the existing solutions are:\n\n - AWS Lambda:\n   - Protocol: The interface between functions and trigger are \"events\". There are several types of events: s3, DynamoDB, custom applications.\n   - Parameters: AWS functions receive different parameters depending on the language but, in general, all the functions receive at least this two: \n     - \"event\"/\"input\": This is a blob in which the function receives the information to process. The blob could contain any structure and that will depend on the event source.\n     - \"context\": General information about the function environment. E.g Invoke ID, function version, function ARN…\n - OpenWhisk: \n   - Protocol: A Kafka service transforms HTTP Requests to Kafka messages in any case.\n   - Parameters: Functions receive a single argument \"parameters\" that contains a blob with the body of the HTTP request.\n - Fission:\n   - Protocol: Functions request are received in the runtimes as HTTP request what gives the opportunity to give a response directly.\n   - Parameters: All the functions receive an object \"context\". This object has different properties depending on the runtime but as minimum it has a property \"request\" to read inputs and \"response\" to answer them.\n\nRegarding the available solutions and the current architecture of Kubeless we choose a similar solution to Fission/OpenWhisk/Lambda: Use the HTTP protocol to communicate runtimes and triggers and expose at least two parameters (explained below). We can tweak this parameter depending on the runtime language in order to give different functionalities (if needed). This is the simplest solution for our use case, the most flexible and it is easy to use. \nAs specific proposal, the parameter should contain at least the information about the request. The properties of the request object will change depending on the runtime and the trigger source but in any case it should contain a parameter with the inputs of the request (the body of a HTTP POST or the message of a Kafka entry).\n\n### Function input\nFollowing the above premises and the [CNCF suggestion](https://docs.google.com/document/d/1UjW8bt5O8QBgQRILJVKZJej_IuNnxl20AJu9wA8wcdI/edit#heading=h.3s49zyc) this can be the a possible implementation for the input object that functions will receive the following schema (represented in JSON but the serialization may vary depending on the language):\n\n```json\n{\n  \"event\": {\n    [\"key\": \"value\"],\n    \"source\": \"string\",\n    [\"content-type\": \"string\"],\n    [\"path\": \"string\"],\n    [\"method\": \"string\"],\n    [\"headers\": \"object\"],\n    [\"topic\": \"string\"],\n    ...\n  },\n  \"context\": {\n    \"function-name\": \"string\",\n    \"runtime\": \"string\",\n    \"namespace\": \"string\",\n    \"memory-limit\": \"string\",\n    [\"schedule\": \"string\"],\n    [\"logger\": \"object\"],\n    ...\n  }\n}\n```\n\nNote: Properties with brackets can be empty. Any required property can be added in the future maintaining backwards compatibility.\n\n - Event: Information about the request\n   - \"Key\": Used to send data to the function, can be any \"key\" identifier. For example a message `{\"message\": \"Hello world!\"}` will be read in the function as `event.message`. That's the way Lambda and Openwhisk handle parameters.\n   - Source: Event emmiter information\n   - Content-type: Explicit content type\n   - Path: (HTTP request only) Path of the call \n   - Method: (HTTP request only) HTTP method used (GET, POST, PUT…)\n   - Headers: (HTTP request only) Request headers\n   - Topic: (PubSub only) Topic of the request\n - Context: Information about the function\n   - Function name: ID of the function\n   - Runtime: Runtime ID and version\n   - Namespace: Kubernetes namespace used\n   - Memory limit: Pod memory limit\n   - Schedule: Function schedule\n   - Logger: (To be implemented) Once we have a way to store and retrieve log this interface should implement the basic methods to write/read them. For the moment we will continue working with stdout so we won’t include this property in the first version.\n \n"
  },
  {
    "path": "docs/proposals/http-triggers.md",
    "content": "# http trigger improvements\n\nThough there is no standard on what http/https triggers of FAAS platform should support, most hosted FAAS solution like AWS Lambda, google cloud functions, IBM cloud functions, Azure functions etc provide common functionality\n\n- http/https endpoint for the function: a fully qualified URL is automatically generated and assigned to an HTTP triggered Cloud Function which can retrieved through respective cli or consoles\n- ability to customize the endpoint url by specifying route\n- a way to authenticate and authorize the function invoker through url\n- a way to restrict http method GET/POST/DELETE etc used to invoke function\n- stage and version functions\n\nKubeless already supports `--trigger-http`. It does seem reasonable to expect similar functionality with kubeless for http triggers. This proposal would like to articulate current gaps and suggest changes.\n\n## Challenges\n\n- In typical FAAS platforms you have [API gateway](https://martinfowler.com/articles/serverless.html) or router (for e.g AWS API gateway) which receives the requests and calls the relevant FaaS function. Some times also perform authentication, input validation, response code mapping, etc. Control path (rest endpoint to create/update/delete functions) and data path (invoking function and getting response) are either combined into one enity or sepearated. Kubeless as native Kubernetes solution intelligently leverages kubernetes constructs and offloads control path (Kubernetes API server through CRD) and data path (through services). While it helps in many aspects, it also means we are constrained by kubernetes constructs. For e.g authenticating the function caller.\n\n- Leveraging kubernetes service as data path to call the function means we need to deal with various service types of Kubernetes for various scenarios. For e.g, a function deployed with Kubeless, if its only caller is microservice running in-cluster then perhaps service of `clusterIP` is needed. If you expect out of cluster callers but does not care about L7 then service of type NodePort is enough. For cases where you want L7, tls etc then Kubeless already leverages Kubernetes Ingress. Also there is PR to support headless service which make sense for baremetal deployment. While Kubeless should be flexible to allow differnet use-cases, it will be challenging to generate a http endpoint for the function.\n\n- On managed FAAS platform, since function user/developer is completly taken out of the infrastrcutre ops there is clear seperation of concerns. Kubeless leveraging K8S constructs, conscious effort must be put not to splill infrastructure or k8s concpets on to the function user. In other words be mindful of two personas using kubeless: function user and cluster/kubeless deployment operator.\n\n## Gaps\n\nWhile its debatable whats the desirable kubeless view of http-triggers, here are current gaps from the point of view of this proposal.\n\n* tight coupling of function routing with kubernetes ingress.So what if some one does not want ingress (i.e want to use node port, headless service etc)\n  * `kubeless ingress`: function user explicitly dealing with ingress\n  * though optional `--hostname` flag for `kubeless ingress`, why shoud function user be aware of ingress object hosts?\n  * function user specifying the tls flags\n- kubeless client creates ingress objects\n- as function user how do i know what http/https endpoint for my function. alternatively how controller can generate consitently the URL for the function. \n- as a cluster operator how do i tell which ingress controller to use, or how do i customize my ingress\n\n## proposed changes\n\n- [formal specification](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) of the function spec that is devoid of any k8s/infrastcture constucts for writable fields\n- function user just do the route management i.e) express desired path for the function. Function spec to carry the function user intent ie. path to function mapping.\n- rename `kubeless ingress` to `kubeless route`\n- move the ingress object creation to kubeless controller.\n- kubeless controller that has ability to provision service (as cluster ip, node port, load balancer or headless) backing the function as desired by the cluster operator  \n- introduce configmap for the controller that cluster oprator can use to configure controller. for e.g details like which ingress controller to use\n- controllers ability consistently generate http endpoint for the function irrespective the service type backing the function and whether kubernetes ingress is used or not\n- clean up the current kubeless flags related to ingress\n\n## what to do we achieve?\n\n- small step toward some of the comman functionality of http triggers in other FAAS platforms\n- clean separation of concerns of function user and cluster/kubeless operator\n- extensibility of controller (where applicable) with configmap\n\n## tracking issues\n\n- [#417](https://github.com/kubeless/kubeless/issues/417) kubeless list option should give info on http/https endpoint\n- [#476](https://github.com/kubeless/kubeless/issues/476) move ingress object creation to ingress controller\n- [#474](https://github.com/kubeless/kubeless/issues/474) support flexible service types for the service backing functions\n- [#475](https://github.com/kubeless/kubeless/issues/475) introduce configmap for kubeless-controller\n- [#478](https://github.com/kubeless/kubeless/issues/478) rename `ingress` command to `route` \n\n\n      \n"
  },
  {
    "path": "docs/pubsub-functions.md",
    "content": "# PubSub events\n\nYou can trigger any Kubeless function by a PubSub mechanism. The PubSub function is expected to consume input messages from a predefined topic from a messaging system. Kubeless currently supports using events from Kafka and NATS messaging systems.\n\n## Kafka\n\nIn Kafka [release page](https://github.com/kubeless/kafka-trigger/releases), you can find the manifest to quickly deploy a collection of Kafka and Zookeeper statefulsets. If you have a Kafka cluster already running in the same Kubernetes environment, you can also deploy PubSub function with it. Check out [this tutorial](/docs/use-existing-kafka) for more details how to do that.\n\nIf you want to deploy the manifest we provide to deploy Kafka and Zookeeper execute the following command:\n\n```console\n$ export RELEASE=$(curl -s https://api.github.com/repos/kubeless/kafka-trigger/releases/latest | grep tag_name | cut -d '\"' -f 4)\n$ kubectl create -f https://github.com/kubeless/kafka-trigger/releases/download/$RELEASE/kafka-zookeeper-$RELEASE.yaml\n```\n\n> NOTE: Kafka statefulset uses a PVC (persistent volume claim). Depending on the configuration of your cluster you may need to provision a PV (Persistent Volume) that matches the PVC or configure dynamic storage provisioning. Otherwise Kafka pod will fail to get scheduled. Also note that Kafka is only required for PubSub functions, you can still use http triggered functions. Please refer to [PV](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) documentation on how to provision storage for PVC.\n\nOnce deployed, you can verify two statefulsets up and running:\n\n```\n$ kubectl -n kubeless get statefulset\nNAME      DESIRED   CURRENT   AGE\nkafka     1         1         40s\nzoo       1         1         42s\n\n$ kubectl -n kubeless get svc\nNAME        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)             AGE\nbroker      ClusterIP   None            <none>        9092/TCP            1m\nkafka       ClusterIP   10.55.250.89    <none>        9092/TCP            1m\nzoo         ClusterIP   None            <none>        9092/TCP,3888/TCP   1m\nzookeeper   ClusterIP   10.55.249.102   <none>        2181/TCP            1m\n```\n\nA function can be as simple as:\n\n```python\ndef foobar(event, context):\n  print event['data']\n  return event['data']\n```\n\nNow you can deploy a pubsub function. \n\n```console\n$ kubeless function deploy test --runtime python2.7 \\\n                                --handler test.foobar \\\n                                --from-file test.py\n```\n\nYou need to create a _Kafka_ trigger that lets you associate a function with a topic specified by `--trigger-topic` as below:\n\n```console\n$ kubeless trigger kafka create test --function-selector created-by=kubeless,function=test --trigger-topic test-topic\n```\n\nAfter that you can invoke the function by publishing messages in that topic. To allow you to easily manage topics `kubeless` provides a convenience function `kubeless topic`. You can create/delete and publish to a topic easily.\n\n```console\n$ kubeless topic create test-topic\n$ kubeless topic publish --topic test-topic --data \"Hello World!\"\n```\n\nYou can check the result in the pod logs:\n\n```console\n$ kubectl logs test-695251588-cxwmc\n...\nHello World!\n```\n## NATS\n\nIf you do not have NATS cluster its pretty easy to setup a NATS cluster. Run below command to deploy a [NATS operator](https://github.com/nats-io/nats-operator)\n\n```console\n$ kubectl apply -f https://github.com/nats-io/nats-operator/releases/latest/download/10-deployment.yaml\n```\n\nOnce NATS operator is up and running run below command to deploy a NATS cluster\n\n```console\necho '\napiVersion: \"nats.io/v1alpha2\"\nkind: \"NatsCluster\"\nmetadata:\n  name: \"nats\"\nspec:\n  size: 3\n  version: \"1.1.0\"\n' | kubectl apply -f - -n nats-io\n```\n\nAbove command will create NATS cluster IP service `nats.nats-io.svc.cluster.local:4222` which is the default URL Kubeless NATS trigger contoller expects.\n\nNow use this manifest to deploy Kubeless NATS triggers controller.\n\n```console\n$ export RELEASE=$(curl -s https://api.github.com/repos/kubeless/nats-trigger/releases/latest | grep tag_name | cut -d '\"' -f 4)\n$ kubectl create -f https://github.com/kubeless/nats-trigger/releases/download/$RELEASE/nats-$RELEASE.yaml\n```\n\nBy default NATS trigger controller expects NATS cluster is available as Kubernetes cluster service `nats.nats-io.svc.cluster.local:4222`. You can overide the default NATS cluster url used by setting the environment variable `NATS_URL` in the manifest. Once NATS trigger controller is setup you can deploy the function and associate function with a topic on the NATS cluster.\n\n```console\n$ kubeless function deploy pubsub-python-nats --runtime python2.7 \\\n                                --handler test.foobar \\\n                                --from-file test.py\n```\n\nAfter function is deployed you can use `kubeless trigger nats` CLI command to  associate function with a topic on NATS cluster as below.\n\n```console\n$ kubeless trigger nats create pubsub-python-nats --function-selector created-by=kubeless,function=pubsub-python-nats --trigger-topic test\n```\n\nAt this point you are all set try Kubeless NATS triggers.\n\nYou could quickly test the functionality by publishing a message to the topic, and verifying that message is seen by the pod running the function.\n\n```console\n$ kubeless trigger nats publish --url nats://nats-server-ip:4222 --topic test --message \"Hello World!\"\n```\n\nYou can check the result in the pod logs:\n\n```console\n$ kubectl logs pubsub-python-nats-5b9c849fc-tvq2l\n...\nHello World!\n```\n\n## Other commands\n\nYou can create, list and delete PubSub topics (for Kafka):\n\n```console\n$ kubeless topic create another-topic\nCreated topic \"another-topic\".\n\n$ kubeless topic delete another-topic\n\n$ kubeless topic ls\n```\n"
  },
  {
    "path": "docs/quick-start.md",
    "content": "# Installation\n\nInstallation is made of three steps:\n\n* Download the `kubeless` CLI from the [release page](https://github.com/kubeless/kubeless/releases).\n* Create a `kubeless` namespace (used by default)\n* Then use one of the YAML manifests found in the release page to deploy kubeless. It will create a _functions_ Custom Resource Definition and launch a controller.\n\nThere are several kubeless manifests being shipped for multiple k8s environments (non-rbac, rbac and openshift), pick the one that corresponds to your environment:\n\n* `kubeless-$RELEASE.yaml` is used for RBAC Kubernetes cluster.\n* `kubeless-non-rbac-$RELEASE.yaml` is used for non-RBAC Kubernetes cluster.\n* `kubeless-openshift-$RELEASE.yaml` is used to deploy Kubeless to OpenShift (1.5+).\n\nFor example, this below is a show case of deploying kubeless to a Kubernetes cluster (with RBAC available).\n\n```console\n$ export RELEASE=$(curl -s https://api.github.com/repos/kubeless/kubeless/releases/latest | grep tag_name | cut -d '\"' -f 4)\n$ kubectl create ns kubeless\n$ kubectl create -f https://github.com/kubeless/kubeless/releases/download/$RELEASE/kubeless-$RELEASE.yaml\n\n$ kubectl get pods -n kubeless\nNAME                                           READY     STATUS    RESTARTS   AGE\nkubeless-controller-manager-567dcb6c48-ssx8x   1/1       Running   0          1h\n\n$ kubectl get deployment -n kubeless\nNAME                          DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE\nkubeless-controller-manager   1         1         1            1           1h\n\n$ kubectl get customresourcedefinition\nNAME                          AGE\ncronjobtriggers.kubeless.io   1h\nfunctions.kubeless.io         1h\nhttptriggers.kubeless.io      1h\n```\n\n> Details on [installing kubeless in a different namespace](/docs/function-controller-configuration#install-kubeless-in-different-namespace) can be found here.\n\nFor installing `kubeless` CLI using execute:\n\n#### Linux and macOS\n\n```console\nexport OS=$(uname -s| tr '[:upper:]' '[:lower:]')\ncurl -OL https://github.com/kubeless/kubeless/releases/download/$RELEASE/kubeless_$OS-amd64.zip && \\\n  unzip kubeless_$OS-amd64.zip && \\\n  sudo mv bundles/kubeless_$OS-amd64/kubeless /usr/local/bin/\n```\n\nBinaries for x86 architectures can be found as well [in the releases page](https://github.com/kubeless/kubeless/releases).\n\n#### Windows\n\n1. Download the latest release from [the releases page](https://github.com/kubeless/kubeless/releases).\n2. Extract the content and add the `kubeless` binary to the system PATH.\n\nYou are now ready to create functions.\n\n# Sample function\n\nYou can use the CLI to create a function. Here is a toy:\n\n```python\ndef hello(event, context):\n  print (event)\n  return event['data']\n```\n\nFunctions in Kubeless have the same format regardless of the language of the function or the event source. In general, every function:\n\n - Receives an object `event` as their first parameter. This parameter includes all the information regarding the event source. In particular, the key 'data' should contain the body of the function request.\n - Receives a second object `context` with general information about the function.\n - Returns a string/object that will be used as response for the caller.\n\nYou can find more details about the function interface [here](/docs/kubeless-functions#functions-interface)\n\nYou create it with:\n\n```console\n$ kubeless function deploy hello --runtime python3.8 \\\n                                --from-file test.py \\\n                                --handler test.hello\nINFO[0000] Deploying function...\nINFO[0000] Function hello submitted for deployment\nINFO[0000] Check the deployment status executing 'kubeless function ls hello'\n```\n\nLet's dissect the command:\n\n* `hello`: This is the name of the function we want to deploy.\n* `--runtime python3.8`: This is the runtime we want to use to run our function. Available runtimes can be found executing `kubeless get-server-config`.\n* `--from-file test.py`: This is the file containing the function code. Specifying a zip file or a gzip/bzip2/xz compressed tar file (see [list of supported suffixes](https://en.wikipedia.org/wiki/Tar_(computing)#Suffixes_for_compressed_files) for compressed tar files) is supported as long as it doesn't exceed the maximum size for an etcd entry (1 MB).\n* `--handler test.hello`: This specifies the file and the exposed function that will be used when receiving requests. In this example we are using the function `hello` from the file `test.py`.\n\nYou can find the rest of options available when deploying a function executing `kubeless function deploy --help`\n\nYou will see the function custom resource created:\n\n```console\n$ kubectl get functions\nNAME         AGE\nhello        1h\n\n$ kubeless function ls\nNAME           \tNAMESPACE\tHANDLER       RUNTIME  \tDEPENDENCIES\tSTATUS\nhello         \tdefault  \thelloget.foo  python3.8\t            \t1/1 READY\n```\n\nYou can then call the function with:\n\n```console\n$ kubeless function call hello --data 'Hello world!'\nHello world!\n```\n\nOr you can curl directly with `kubectl proxy`using an [apiserver proxy URL](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#manually-constructing-apiserver-proxy-urls).\nFor example:\n\n```console\n$ kubectl proxy -p 8080 &\n\n$ curl -L --data '{\"Another\": \"Echo\"}' \\\n  --header \"Content-Type:application/json\" \\\n  localhost:8080/api/v1/namespaces/default/services/hello:http-function-port/proxy/\n{\"Another\": \"Echo\"}\n```\n\nKubeless also supports [ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) which means you can provide your custom URL to the function. Please refer to [this doc](/docs/http-triggers) for more details.\n\n## Clean up\n\nYou can delete the function and uninstall Kubeless:\n\n```console\n$ kubeless function delete hello\n\n$ kubeless function ls\nNAME        NAMESPACE   HANDLER     RUNTIME     DEPENDENCIES    STATUS\n\n$ kubectl delete -f https://github.com/kubeless/kubeless/releases/download/$RELEASE/kubeless-$RELEASE.yaml\n```\n\n## Examples\n\nSee the [examples](https://github.com/kubeless/kubeless/tree/master/examples) directory for a list of simple examples in all the languages supported. NodeJS, Python, Golang etc ...\n\nAlso checkout the [functions repository](https://github.com/kubeless/functions), where we're building a library of ready to use kubeless examples, including an [incubator](https://github.com/kubeless/functions/tree/master/incubator) to encourage contributions from the community - **your PR is welcome** ! :)\n"
  },
  {
    "path": "docs/release-flow.md",
    "content": "# Introduction\n\nKubeless leverages [travis-ci](https://travis-ci.org/) to construct an automated release flow. A release package includes kubeless binaries for multiple platforms (linux and osx are supported) and one yaml file to deploy kubeless controller.\n\n# Checks before releasing\n\nBefore releasing it is necessary to check that the rest of projects of the Kubeless environment do not present regressions for the new changes. Before creating a new release, deploy Kubeless using the latest commit of master (using the tag \"latest\" for the controller image). Make sure that the latest image build in Travis for the Kubeless controller is being used. After that, ensure that the following projects support the new version:\n\n - [Serverless Plugin](https://github.com/serverless/serverless-kubeless)\n - [Kubeless UI](https://github.com/kubeless/kubeless-ui)\n\nIf any error is found after doing some manual testing, make sure the error is addressed before doing a release.\n\n# Kubeless release flow\n\nA release is triggered by [Travis Github Releases](https://docs.travis-ci.com/user/deployment/releases/) and based on GitHub tagging. Once a commit in the master branch is tagged, a travis job will be started to build and upload assets to Github release page under a new release with the tag name. The setup is described at `before_deploy` and `deploy` sections in `.travis.yaml`.\n\n`before_deploy` defines commands executed before releasing. At this stage, we prepare assets which will be uploaded including kubeless binaries and the yaml file. The yaml file is converted from [kubeless.jsonnet](https://github.com/kubeless/kubeless/blob/master/kubeless.jsonnet) file using [kubecfg](https://github.com/ksonnet/kubecfg). The kubeless-controller is built in format of docker image and push to [Bitnami repository](https://hub.docker.com/r/bitnami/kubeless-controller/) on DockerHub. Because we use sha256 digest for labeling docker images to be deployed when installing kubeless, we need to update these digests for the new release.\n\n`deploy` defines configuration for a github release. API key is encrypted version of our Github token with scope `public_repo`. The condition for a release to be triggered is defined at `on` section:\n- it will be triggered once a commit is tagged\n- the repository is `kubeless/kubeless`\n- only travis job for `os: linux` and `go: 1.8` can do the release\n\nOnce the release job has finished a `Draft` with the release notes will appear in the [releases page](https://github.com/kubeless/kubeless/releases). Review the notes and include a summary of the changes included in the release. Delete information that is not useful for the users. Make sure that breaking changes are properly highlighted. After that click on \"Publish\" for making the new release available for anyone.\n\n# Update the rest of projects to use the new version\n\n_Note: These steps are suitable for being automated in the Travis release job_\n\nOnce the new version is available, there are several projects/files that require to be updated in order to point to the latest version:\n \n - Kubeless docs site: To point to the latest version in the docs of http://kubeless.io rebuild the last build on https://travis-ci.org/kubeless/kubeless-website.\n - Kubeless chart: Update the references for the different images or any other required change in the `chart` folder of this repository.\n - Serverless plugin: Update the `KUBELESS_VERSION` environment variable in the `.travis` file to point to the latest version.\n - [Optional] Brew recipes: An automated PR will be generated in the `homebrew-core` repository with the new version and commit ID. Unless the recipe should contain breaking changes the update will be handled by the homebrew team. If it is not the case the [recipe](https://github.com/Homebrew/homebrew-core/blob/master/Formula/kubeless.rb) manually.\n"
  },
  {
    "path": "docs/runtimes.md",
    "content": "# Kubeless Runtime Variants\n\nBy default Kubeless has support for runtimes in different states: stable and incubator. You can find the different runtimes available in this repository:\n\n[https://github.com/kubeless/runtimes](https://github.com/kubeless/runtimes).\n\nYou can also see the list of supported runtimes that your Kubeless installation can use executing:\n\n```console\n$ kubeless get-server-config\nINFO[0000] Current Server Config:\nINFO[0000] Supported Runtimes are: python2.7, python3.4, python3.6, nodejs6, nodejs8, ruby2.3, ruby2.4, ruby2.5, php7.2, go1.10, dotnetcore2.0, java1.8, ballerina0.981.0\n```\n\nEach runtime is encapsulated in a container image. The reference to these images are injected in the Kubeless configuration.\n\n### NodeJS\n\n#### Example\n\n```js\nmodule.exports = {\n  foo: function (event, context) {\n    console.log(event);\n    return event.data;\n  }\n}\n```\n\n#### Description\n\nNodeJS functions should export the desired method using `module.exports`. You can specify dependencies using a `package.json` file. It is also possible to return an object instead of a string, this object will be stringified before returning.\n\nWhen using the Node.js runtime, it is possible to configure a [custom registry or scope](https://docs.npmjs.com/misc/scope#associating-a-scope-with-a-registry) in case a function needs to install modules from a different source. For doing so it is necessary to set up the environment variables *NPM_REGISTRY* and *NPM_SCOPE* when deploying the function:\n\n```console\n$ kubeless function deploy myFunction --runtime nodejs6 \\\n                                --env NPM_REGISTRY=http://my-registry.com \\\n                                --env NPM_SCOPE=@myorg \\\n                                --dependencies package.json \\\n                                --handler test.foo \\\n                                --from-file test.js\n```\n\nIt's also possible to add another piece of configuration for your NPM file if the variable `NPM_CONFIG_EXTRA` is set. In case it's used, the build process will execute `npm config set $NPM_CONFIG_EXTRA` before installing dependencies.\n\nDepending on the size of the payload sent to the NodeJS function it is possible to find the error `413 PayloadTooLargeError`. It is possible to increase this limit setting the environment variable `REQ_MB_LIMIT`. This will define the maximum size in MB that the function will accept:\n\n```console\n$ kubeless function deploy myFunction --runtime nodejs6 \\\n                                --env REQ_MB_LIMIT=50 \\\n                                --handler test.foo \\\n                                --from-file test.js\n```\n\n**For Webpack Users**\n\nYour webpacked functions will be `require()`-d in so your bundle should work out of the box. However, if your bundle size is approaching 1mb you should take advantage of Kubeless' ability to install dependencies for you instead of bundling them all into your payload.\n\nYou will need to customize your webpack config to suit your own project, but below is an sample config of how to achieve this in Webpack 4.x:\n\n_webpack.config.js_\n\n```js\nconst path = require(\"path\");\nconst nodeExternals = require(\"webpack-node-externals\");\nconst CopyWebpackPlugin = require(\"copy-webpack-plugin\");\n\nmodule.exports = {\n  entry: {\n    handlers: \"./handlers.js\"\n  },\n  node: {\n    __filename: true,\n    __dirname: true\n  },\n  target: \"node\",\n  // do not include dependencies in the bundle\n  externals: [nodeExternals()],\n  devtool: \"source-map\",\n  module: {\n    rules: [\n      {\n        test: /\\.js$/,\n        use: \"babel-loader\",\n        // do not transpile the depedencies\n        exclude: /node_modules/\n      }\n    ]\n  },\n  plugins: [\n    // do include the project's `package.json` in the bundle\n    new CopyWebpackPlugin([\n      {\n        from: path.join(__dirname, \"path\", \"to\", \"your\", \"package.json\"),\n        to: \"package.json\"\n      }\n    ])\n  ]\n};\n```\n\nAdditionally, in your babel config, you can specify the transpile target to be the version of node you're using for your runtime. This is an example for Babel 7.x:\n\n```js\nmodule.exports = {\n  plugins: [\n    \"@babel/plugin-proposal-class-properties\",\n    \"@babel/plugin-proposal-object-rest-spread\",\n    \"@babel/plugin-syntax-dynamic-import\",\n    \"@babel/plugin-transform-runtime\"\n  ],\n  // note the target node version here for nodejs8\n  presets: [[\"@babel/preset-env\", { targets: { node: \"8.10\" } }]]\n};\n```\n\n#### Server implementation\n\nFor the Node.js runtime we start an [Express](http://expressjs.com) server and we include the routes for serving the health check and exposing the monitoring metrics. Apart from that we enable [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS) requests and [Morgan](https://github.com/expressjs/morgan) for handling the logging in the server. Monitoring is supported if the function is synchronous or if it uses promises.\n\n#### Distroless Variant\n\nThere is the [distroless](https://github.com/GoogleContainerTools/distroless) variant of the Node.js 8 runtime.\nThe distroless Node.js runtime contains only the kubeless function and its runtime dependencies.\nIn particular, this variant does not contain package manager, shells or any other programs which are part of a standard Linux distribution.\n\nThe same example Node.js function from above can then be deployed:\n\n```console\n$ kubeless function deploy myFunction --runtime nodejs_distroless8 \\\n                                --env NPM_REGISTRY=http://my-registry.com \\\n                                --env NPM_SCOPE=@myorg \\\n                                --dependencies package.json \\\n                                --handler test.foo \\\n                                --from-file test.js\n```\n\n#### CloudEvents 0.1 Variant\n\n[CloudEvents](https://cloudevents.io) is a CNCF effort to standardize the way events are represented in the Cloud. There is a variant of the Node.js 8 runtime that is ready to receive events that follow that specification (v0.1).\n\nThis variant expects the header `application/cloudevents+json` in order to be parsed as a JSON cloud event or the different headers that are defined in the [specification](https://github.com/cloudevents/spec/blob/master/spec.md) adapting them to the Kubeless function format.\n\nThe same example Node.js function from above can then be deployed:\n\n```console\n$ kubeless function deploy myFunction --runtime nodejsCE8 \\\n                                --dependencies package.json \\\n                                --handler test.foo \\\n                                --from-file test.js\n```\n\n### Python\n\n#### Example\n\n```py\ndef handler(event, context):\n    print (event)\n    return event['data']\n```\n\n#### Description\n\nPython functions should define the desired method. You can specify dependencies using a `requirements.txt` file.\n\n#### Server implementation\n\nFor python we use [Bottle](https://bottlepy.org) and we also add routes for health check and monitoring metrics.\n\n### Ruby\n\n#### Example\n\n```rb\ndef handler(event, context)\n  puts event\n  JSON.generate(event[:data])\nend\n```\n\n#### Description\n\nRuby functions should define the desired method. You can specify dependencies using a `Gemfile` file.\n\n#### Server implementation\n\nFor the case of Ruby we use [Sinatra](http://www.sinatrarb.com) as web framework and we add the routes required for the function and the health check. Monitoring is currently not supported yet for this framework. PR is welcome :-)\n\n### Go\n\n#### Example\n\n```go\npackage kubeless\n\nimport \"github.com/kubeless/kubeless/pkg/functions\"\n\nfunc Handler(event functions.Event, context functions.Context) (string, error) {\n\treturn event.Data, nil\n}\n```\n\n#### Description\n\nGo functions require to import the package `github.com/kubeless/kubeless/pkg/functions` that is used to define the input parameters. The desired method should be exported in the package. You can specify dependencies using [go modules](https://blog.golang.org/using-go-modules).\n\n#### Go with Dependency Example\n\nThis is an example of a function using the `github.com/sirupsen/logrus` dependency.\n\n```go\n// hellowithdeps.go\n\npackage kubeless\n\nimport (\n\t\"github.com/kubeless/kubeless/pkg/functions\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// Hello sample function with dependencies\nfunc Hello(event functions.Event, context functions.Context) (string, error) {\n\tlogrus.Info(event.Data)\n\treturn \"Hello world!\", nil\n}\n```\n\n```go\n//go.mod\n\nmodule function\n\ngo 1.14\n\nrequire (\n\tgithub.com/sirupsen/logrus v1.6.0\n)\n```\n\n```bash\nkubeless function deploy get-go-deps --runtime go1.14 --handler hellowithdeps.Hello --from-file hellowithdeps.go --dependencies go.mod\n```\n\n#### Server implementation\n\nThe Go HTTP server doesn't include any framework since the native packages includes enough functionality to fit our needs. Since there is not a standard package that manages server logs that functionality is implemented in the same server. It is also required to implement the `ResponseWriter` interface in order to retrieve the Status Code of the response. Apart from that we enable [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS) to accept any request.\n\n#### Debugging compilation\n\nIf there is an error during the compilation of a function, the error message will be dumped to the termination log. If you see that the pod is crashed in a init container:\n\n```console\nNAME                      READY     STATUS                  RESTARTS   AGE\nget-go-6774465f95-x55lw   0/1       Init:CrashLoopBackOff   1          1m\n```\n\nThat can mean that the compilation failed. You can obtain the compilation logs executing:\n\n```console\n$ kubectl get pod -l function=get-go -o yaml\n...\n    - containerID: docker://253fb677da4c3106780d8be225eeb5abf934a961af0d64168afe98159e0338c0\n      image: andresmgot/go-init:1.10\n      lastState:\n        terminated:\n          containerID: docker://253fb677da4c3106780d8be225eeb5abf934a961af0d64168afe98159e0338c0\n          exitCode: 2\n          finishedAt: 2018-04-06T09:01:16Z\n          message: |\n            # kubeless\n            /go/src/kubeless/handler.go:6:1: syntax error: non-declaration statement outside function body\n...\n```\n\nYou can see there that there is a syntax error in the line 6 of the function. You can also retrieve the same information with this one-liner:\n\n```console\n$ kubectl get pod -l function=get-go -o go-template=\"{{range .items}}{{range .status.initContainerStatuses}}{{.lastState.terminated.message}}{{end}}{{end}}\"\n\n<no value># kubeless\n/go/src/kubeless/handler.go:6:1: syntax error: non-declaration statement outside function body\n```\n\n#### Timeout handling\n\nOne peculiarity of the Go runtime is that the user has a `Context` object as part of the `Event.Extensions` parameter. This can be used to handle timeouts in the function. For example:\n\n```go\nfunc Foo(event functions.Event, context functions.Context) (string, error) {\n  select {\n  case <-event.Extensions.Context.Done():\n    return \"\", nil\n  case <-time.After(5 * time.Second):\n  }\n  return \"Function returned after 5 seconds\", nil\n}\n```\n\nIf the function above has a timeout smaller than 5 seconds it will exit and the code after the `select{}` won't be executed.\n\n### Java\n\n#### Example\n\n```java\npackage io.kubeless;\n\nimport io.kubeless.Event;\nimport io.kubeless.Context;\n\npublic class Foo {\n    public String foo(io.kubeless.Event event, io.kubeless.Context context) {\n        return \"Hello world!\";\n    }\n}\n```\n\n#### Description\n\nJava functions must use `io.kubeless` as package and should import both `io.kubeless.Event` and `io.kubeless.Context` packages. Function should be made part of a public class and should have a function signature that takes `Event` and `Context` as inputs and produces `String` output. Once you have Java function meeting the requirements it can be deployed with Kubeless as below. Where handler part `--handler Foo.foo` takes `Classname.Methodname` format.\n\n```cmd\n  kubeless function deploy get-java --runtime java1.8 --handler Foo.foo --from-file Foo.java\n```\n\nKubeless supports Java functions with dependencies. Kubeless uses Maven for both dependency management and building user given functions. Users are expected to provide function dependencies expresses in Maven pom.xml format.\n\nLets take Java function with dependency on `org.joda.time.LocalTime`.\n\n```java\npackage io.kubeless;\n\nimport io.kubeless.Event;\nimport io.kubeless.Context;\n\nimport org.joda.time.LocalTime;\n\npublic class Hello {\n    public String sayHello(io.kubeless.Event event, io.kubeless.Context context) {\n        System.out.println(event.Data);\n        LocalTime currentTime = new LocalTime();\n        return \"Hello world! Current local time is: \" + currentTime;\n    }\n}\n```\n\n#### Dependencies\n\nDependencies are expressed through standard Maven pom.xml file format as below.\n\n```xml\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n  <modelVersion>4.0.0</modelVersion>\n  <artifactId>function</artifactId>\n  <name>function</name>\n  <version>1.0-SNAPSHOT</version>\n  <dependencies>\n     <dependency>\n       <groupId>joda-time</groupId>\n       <artifactId>joda-time</artifactId>\n       <version>2.9.2</version>\n     </dependency>\n      <dependency>\n          <groupId>io.kubeless</groupId>\n          <artifactId>params</artifactId>\n          <version>1.0-SNAPSHOT</version>\n      </dependency>\n  </dependencies>\n  <parent>\n    <groupId>io.kubeless</groupId>\n    <artifactId>kubeless</artifactId>\n    <version>1.0-SNAPSHOT</version>\n  </parent>\n</project>\n```\n\nNotice the reference to `kubeless` parent pom module and dependency on `params` artifact. pom.xml should also use `function` as artifact ID.\n\nOnce you have Java function with dependencies and pom.xml file expressing the dependencies Java function can be deployed with Kubeless as below.\n\n```cmd\n\tkubeless function deploy get-java-deps --runtime java1.8 --handler Hello.sayHello --from-file java/HelloWithDeps.java --dependencies java/pom.xml\n```\n> Note: Maven command line arguments can be set using environment flag `--env`. For instance proxy details can be set as\n\n```cmd\n    kubeless function deploy get-java --runtime java1.8 --handler Foo.foo --from-file Foo.java --env MAVEN_OPTS='-DproxySet=true -DproxyHost=<proxy_host> -DproxyPort=<proxy_port>'\n```\n\n### .NET Core (C#)\n\n#### Example\n\n```csharp\nusing System;\nusing Kubeless.Functions;\n\npublic class module\n{\n    public object handler(Event k8Event, Context k8Context)\n    {\n        return k8Event.Data;\n    }\n}\n```\n\nDeploy it using the following command:\n```bash\nkubeless function deploy helloget --from-file helloget.cs --handler module.handler --runtime dotnetcore2.0\n```\n\n#### Description\nTo get started using .NET Core with kubeless, you should use the following commands:\n\n```bash\ndotnet new library\ndotnet add package Kubeless.Functions\n```\n\n.NET Core (C#) functions supports returns for any primitive or complex type. The method signature needs to have first an `Kubeless.Functions.Event` followed by an `Kubeless.Functions.Context`. The models are definied as it follows:\n\n```csharp\npublic class Context\n{\n    public string ModuleName { get; }\n    public string FunctionName { get; }\n    public string FunctionPort { get; }\n    public string Timeout { get; }\n    public string Runtime { get; }\n    public string MemoryLimit { get; }\n}\n```\n\n```csharp\npublic class Event\n{\n    public object Data { get; }\n    public string EventId { get; }\n    public string EventType { get; }\n    public string EventTime { get; }\n    public string EventNamespace { get; }\n    public Extensions Extensions { get; }\n}\n```\n\n#### Dependencies\n\nDependencies are handled in `.csproj` extension. You can use the regular `.csproj` file outputted by the `dotnet new library` command.\n\n```xml\n<Project Sdk=\"Microsoft.NET.Sdk\">\n\n  <PropertyGroup>\n    <TargetFramework>netstandard2.0</TargetFramework>\n  </PropertyGroup>\n\n  <ItemGroup>\n    <PackageReference Include=\"Kubeless.Functions\" Version=\"0.1.1\" />\n    <PackageReference Include=\"YamlDotNet\" Version=\"4.3.1\" />\n  </ItemGroup>\n\n</Project>\n\n```\n\nThe runtime already have built-in the package `Kubeless.Functions:0.1.1`, necessary to all functions - so you don't need to include that. Then, if you have a function which does not need any external references than `Kubeless.Functions`, you don't need to even send the `--dependencies` flag on kubeless cli.\n\nYou can deploy them using the command:\n\n```bash\nkubeless function deploy fibonacci --from-file fibonacci.cs --handler module.handler --dependencies fibonacci.csproj --runtime dotnetcore2.0\n```\n\n##### `nuget.config`\n\nIf you happen to be using custom nuget repositories through a `nuget.config` file, you'll need to include the file along with the code inside a `.zip` file and then you can deploy the function with the `nuget.config` using the command:\n\n```bash\nkubeless function deploy custom-deps --from-file custom-deps.zip --handler module.handler --dependencies custom-deps.csproj --runtime dotnetcore2.0\n```\n\n### Ballerina\n\n#### Example\n\n```ballerina\nimport kubeless/kubeless;\nimport ballerina/io;\n\npublic function foo(kubeless:Event event, kubeless:Context context) returns (string|error) {\n    io:println(event);\n    io:println(context);\n    return \"Hello Ballerina\";\n}\n\n```\n\n#### Description\n\nThe Ballerina functions should import the package `kubeless/kubeless`. This [package](https://central.ballerina.io/kubeless/kubeless) contains two types `Event` and `Context`.\n\n```console\n$ kubeless function deploy foo\n    --runtime ballerina0.981.0\n    --from-file foo.bal\n    --handler foo.foo\n```\n\nWhen using the Ballerina runtime, it is possible to provide a configuration via `kubeless.toml` file. The values in kubeless.toml file are available for the function. The function(.bal file) and conf file should be in the same directory.\nThe zip file containing both files should be passed to the Kubeless CLI.\n\n```console\nfoo\n├── hellowithconf.bal\n└── kubeless.toml\n\n$ zip -r -j foo.zip foo/\n\n$ kubeless function deploy foo\n      --runtime ballerina0.981.0\n      --from-file foo.zip\n      --handler hellowithconf.foo\n```\n\n#### Server implementation\n\nFor the Ballerina runtime we start a [Ballerina HTTP server](../docker/runtime/ballerina/kubeless_run.tpl.bal) with two resources, '/' and '/healthz'.\n\n## Use a custom runtime\n\nThe Kubeless configuration defines a set of default container images per supported runtime variant.\n\nThese default container images can be configured via Kubernetes environment variables on the Kubeless controller's deployment container. Or modifying the `kubeless-config` ConfigMap that is deployed along with the Kubeless controller. For more information about how to modify the Kubeless configuration check [this guide](https://kubeless.io/docs/function-controller-configuration/).\n\nApart than changing the configuration, it is possible to use a custom runtime specifying the image that the function will use. If you are interested in developing a new runtime from scratch (i.e. for a new language) you should follow [this guide](https://kubeless.io/docs/implementing-new-runtime/). In the linked guide you can find the requirements that a new runtime should fulfill and how you can submit new runtimes to the Kubeless project.\n\nIn any case, if you want to use one of the existing runtimes but you want to modify it to support a specific feature you can easily do that. The first thing is to modify the files in [`docker/runtime`](https://github.com/kubeless/kubeless/tree/master/docker/runtime) folder. For example, if we want to add the `lodash` `npm` module globally in the NodeJS runtime we can modify its [Dockerfile](https://github.com/kubeless/kubeless/tree/master/docker/runtime/nodejs/Dockerfile.8):\n\n```patch\n...\n  RUN apt-get update && apt-get install git\n+ RUN npm install -g lodash\n\n...\n```\n\nNow we can use the Makefile in the folder to generate the base image:\n\n```console\n▶ make build8\ndocker build -t kubeless/nodejs:8$RUNTIME_TAG_MODIFIER -f Dockerfile.8 .\nSending build context to Docker daemon  7.059MB\nStep 1/10 : FROM node:8\n ---> 55791187f71c\nStep 2/10 : RUN apt-get update &&  apt-get install git\n ---> Using cache\n ---> 70f1565e9353\nStep 3/10 : RUN npm install -g lodash\n ---> Running in 03602280a37d\n+ lodash@4.17.10\nadded 1 package in 1.369s\n...\nSuccessfully built d68eccb2568b\nSuccessfully tagged kubeless/nodejs:8\n```\n\nWe can now retag the image and push it using a different account:\n\n```console\n▶ docker tag kubeless/nodejs:8 andresmgot/nodejs-with-lodash:8\n\n▶ docker push andresmgot/nodejs-with-lodash:8\nThe push refers to repository [docker.io/andresmgot/nodejs-with-lodash]\n5a9aabfdd819: Pushed\n...\n8: digest: sha256:dfd26034130e5aae5a3db7b3df969649c44c3f7d1168bee7c4e1e6e7e75726d7 size: 3261\n```\n\nFinally in order to use this new flavor we need to add it to the Kubeless config. We will just copy the official `nodejs` runtime and rename it to reflect the changes:\n\n```console\n▶ kubectl edit -n kubeless configmap kubeless-config\n# Add the following object within the \"runtime-images\" array\n#      {\n#        \"ID\": \"nodejsWithLodash\",\n#        \"compiled\": false,\n#        \"versions\": [\n#          {\n#            \"name\": \"node8\",\n#            \"version\": \"8\",\n#            \"runtimeImage\": \"andresmgot/nodejs-with-lodash:8\",\n#            \"initImage\": \"node:8\"\n#          }\n#        ],\n#        \"depName\": \"package.json\",\n#        \"fileNameSuffix\": \".js\"\n#      },\nconfigmap \"kubeless-config\" edited\n```\n\n> NOTE: You should just use lowercase and uppercase characters for the ID. The runtime selection is made concatenating the runtime ID and the version (i.e. nodejsWithLodash8 for this example)\n\nThe last step in order to deploy a function with the new runtime is to restart the Kubeless controller pod:\n\n```console\n▶ kubectl delete pods -n kubeless -l kubeless=controller\npod \"kubeless-controller-manager-67fbc78f6d-w2vnk\" deleted\n\n▶ kubeless function deploy my-nodejs-func --runtime nodejsWithLodash8 --handler helloget.foo --from-file examples/nodejs/helloget.js\nINFO[0000] Deploying function...\nINFO[0000] Function my-nodejs-func submitted for deployment\nINFO[0000] Check the deployment status executing 'kubeless function ls my-nodejs-func'\n\n# Wait for the function pod to be deployed\n▶ kubectl exec -it my-nodejs-func-55546fcf68-78fpz -- npm list -g | grep lodash\n+-- lodash@4.17.10\n```\n\n## Use a custom livenessProbe\n\nOne can use kubeless-config to override the default liveness probe. By default, the liveness probe is `http-get` this can be overriden by providing the livenessprobe info in `kubeless-confg` under `runtime-images`. It has been implemented in such a way that each runtime can have its own liveness probe info. To use custom liveness probe paste the following info in `runtime-images`:\n\n```json\n\"version\": [],\n\"livenessProbeInfo\": {\n  \"exec\": {\n    \"command\": [\n      \"curl\",\n      \"-f\",\n      \"http://localhost:8080/healthz\"\n    ]\n  },\n  \"initialDelaySeconds\": 5,\n  \"periodSeconds\": 5,\n  \"failureThreshold\": 3,\n  \"timeoutSeconds\": 30\n},\n\"depname\": \"\"\n```\n"
  },
  {
    "path": "docs/streaming-functions.md",
    "content": "# Data Stream events\n\nKubeless lets you trigger any Kubeless function in response to ingested records into a data stream. Kubeless currently supports AWS Kinesis streaming service.\n\n## AWS Kinesis\n\nTo trigger Kubeless functions in response to ingested records into the AWS kinesis stream you need to deploy Kubeless AWS Kinesis trigger controller. Please use this manifest to deploy Kubeless AWS Kinesis trigger controller.\n\n```console\nexport RELEASE=$(curl -s https://api.github.com/repos/kubeless/kinesis-trigger/releases/latest | grep tag_name | cut -d '\"' -f 4)\nkubectl create -f https://github.com/kubeless/kinesis-trigger/releases/download/$RELEASE/kinesis-$RELEASE.yaml\n```\n\nOnce you deploy the manifest you shall see Kinesis trigger controller running in the Kubeless namespace as below.\n\n```console\n$ kubectl get pods -n kubeless\nNAME                                           READY     STATUS    RESTARTS   AGE\nkinesis-trigger-controller-65c78f9f44-v5flq    1/1       Running   0          1h\nkubeless-controller-manager-6b7cdcdc76-x6gsd   1/1       Running   0          13h\n```\n\nYou shall also notice a CRD resource type `kinesistriggers.kubeless.io` created as below.\n\n```console\n$ kubectl get crd\nNAME                          AGE\ncronjobtriggers.kubeless.io   13h\nfunctions.kubeless.io         13h\nhttptriggers.kubeless.io      13h\nkinesistriggers.kubeless.io   13h\n```\n\nKubeless cli lets you create Kubeless triggers of Kinesis type. Kubeless cli provides necessary functionality to manage the life cycle of Kinesis triggers.\n\n```console\n$ kubeless trigger kinesis --help\nkinesis trigger command allows users to create, list, update, delete Kinesis triggers running on Kubeless\n\nUsage:\n  kubeless trigger kinesis SUBCOMMAND [flags]\n  kubeless trigger kinesis [command]\n\nAvailable Commands:\n  create        Create a Kinesis trigger\n  create-stream Create a Kinesis stream\n  delete        Delete a Kinesis trigger\n  list          list all Kinesis triggers deployed to Kubeless\n  publish       publish message to a Kinesis stream\n  update        Update a Kinesis trigger\n\nFlags:\n  -h, --help   help for kinesis\n\nUse \"kubeless trigger kinesis [command] --help\" for more information about a command.\n```\n\nIn order to deploy a Kinesis trigger and associate a Kubeless function to be invoked in response to ingested records in Kinesis data stream, you need to first let Kubeless know the credentials required to acess your AWS Kinesis stream. Kubeless will leverage Kubernetes secrets to store the credentials in the cluster and use them to access the Kinesis stream.\n\nFirst you need to creat Kubernetes secret that can store you AWS `aws_access_key_id` and `aws_secret_access_key`. Usually if you are using AWS cli your keys will be present in `~/.aws/credentials` or you can create AWS access keys from AWS console.\n\n```console\nkubectl create secret generic ec2 --from-literal=aws_access_key_id=$AWS_ACCESS_KEY_ID --from-literal=aws_secret_access_key=$AWS_SECRET_ACCESS_KEY\n```\n\nOnce you have created a secret you are ready to deploy Kubeless Kinesis trigger as below.\n\n```console\nkubeless trigger kinesis create test-trigger --function-name post-python --aws-region us-west-2 --shard-id shardId-000000000000 --stream my-kinesis-stream --secret ec2\n```\n\nLets look into the flags expected. `--aws-region` is the AWS region in which your Kinesis stream is avilable. `--shard-id` is the id of shard into which records are placed. You should be able to get the `shard-id` from the stream description. `--stream` is the name of the Kinesis stream.\n\n```console\n$ aws kinesis describe-stream --stream-name my-kinesis-stream\n{\n    \"StreamDescription\": {\n        \"RetentionPeriodHours\": 24,\n        \"StreamName\": \"my-kinesis-stream\",\n        \"Shards\": [\n            {\n                \"ShardId\": \"shardId-000000000000\",\n                \"HashKeyRange\": {\n                    \"EndingHashKey\": \"340282366920938463463374607431768211455\",\n                    \"StartingHashKey\": \"0\"\n                },\n                \"SequenceNumberRange\": {\n                    \"StartingSequenceNumber\": \"49584495912138607235774073050889122383423872293029281794\"\n                }\n            }\n        ],\n        \"StreamARN\": \"arn:aws:kinesis:us-west-2:159706291352:stream/my-kinesis-stream\",\n        \"EnhancedMonitoring\": [\n            {\n                \"ShardLevelMetrics\": []\n            }\n        ],\n        \"StreamStatus\": \"ACTIVE\"\n    }\n}\n```\n\nOnce you deploy the Kinesis trigger you shall see a `kinesistrigger` CRD object as below.\n\n```console\n$ kubectl get kinesistriggers.kubeless.io test -o yaml\napiVersion: kubeless.io/v1beta1\nkind: KinesisTrigger\nmetadata:\n  labels:\n    created-by: kubeless\n  name: test\n  namespace: default\nspec:\n  aws-region: us-west-2\n  function-name: post-python\n  secret: ec2\n  shard: shardId-000000000000\n  stream: my-kinesis-stream\n```\n\nAt this point you shall be able to publish a record in to the stream either through Kubeless CLI or using AWS cli as below.\n\n```console\nkubeless trigger kinesis publish --aws-region us-west-2  --secret ec2 --partition-key \"123\" --stream my-kinesis-stream  --message \"hello world\"\n```\n\nor\n\n```console\naws kinesis put-record --stream-name my-kinesis-stream --partition-key 123 --data testdata1\naws kinesis put-record --stream-name my-kinesis-stream --partition-key 123 --data testdata2\naws kinesis put-record --stream-name my-kinesis-stream --partition-key 123 --data testdata3\n```\n\nYou shall see the log of received messages in the function pod associated with the Kinesis trigger.\n\n```console\n$ kubectl logs post-python-59f7fc4b54-4nhbb\nBottle v0.12.13 server starting up (using CherryPyServer())...\nListening on http://0.0.0.0:8080/\nHit Ctrl-C to quit.\n\n{'event-time': '2018-05-18 05:40:42.881137473 +0000 UTC', 'extensions': {'request': <LocalRequest: POST http://post-python.default.svc.cluster.local:8080/>}, 'event-type': 'application/x-www-form-urlencoded', 'event-namespace': 'kinesistriggers.kubeless.io', 'data': 'testdata12', 'event-id': 'bDRMSN3NPC81ktU'}\n172.17.0.7 - - [18/May/2018:05:40:42 +0000] \"POST / HTTP/1.1\" 200 10 \"\" \"Go-http-client/1.1\" 0/11758\n{'event-time': '2018-05-18 05:40:44.891994208 +0000 UTC', 'extensions': {'request': <LocalRequest: POST http://post-python.default.svc.cluster.local:8080/>}, 'event-type': 'application/x-www-form-urlencoded', 'event-namespace': 'kinesistriggers.kubeless.io', 'data': 'testdata22', 'event-id': 'uHdiWN-lzeKYQyQ'}\n172.17.0.7 - - [18/May/2018:05:40:44 +0000] \"POST / HTTP/1.1\" 200 10 \"\" \"Go-http-client/1.1\" 0/8983\n{'event-time': '2018-05-18 05:40:45.878361324 +0000 UTC', 'extensions': {'request': <LocalRequest: POST http://post-python.default.svc.cluster.local:8080/>}, 'event-type': 'application/x-www-form-urlencoded', 'event-namespace': 'kinesistriggers.kubeless.io', 'data': 'testdata32', 'event-id': 'sRRjSasGVApy8tA'}\n```\n"
  },
  {
    "path": "docs/triggers.md",
    "content": "# Triggers\n\nTo invoke deployed functions, you need to create **triggers**. A function can have multiple triggers, but each of those will only reference a single deployed function.\n\nEach trigger has its own schema and usage, so we've created a separate page for each one of those.\n\n## Available triggers\n\nIn this section, we're going to list our triggers. Since Kubeless is an open-source tool there are multiple triggers that we haven't listed here. Feel free to add your trigger to this list.\n\n* [HTTP Trigger](/docs/http-triggers)\n* [CronJob Trigger](/docs/cronjob-triggers)\n* [PubSub Triggers](/docs/pubsub-functions)\n  * [Kafka Trigger](/docs/pubsub-functions#kafka)\n  * [NATS Trigger](/docs/pubsub-functions#nats)\n* [Data Stream Triggers](/docs/streaming-functions)\n  * [AWS Kinesis Trigger](/docs/streaming-functions/#aws-kinesis)\n \n\n## Creating a new trigger\n\nIt is really simple to create a new trigger on Kubeless. Take a look at the [Implementing a New Trigger](/docs/implementing-new-trigger) page to learn more about it.\n"
  },
  {
    "path": "docs/troubleshooting.md",
    "content": "# Troubleshooting\n\n## Installation\n\nIf installing using\n\n```console\nkubectl create -f kubeless.yaml --namespace kubeless\n```\n\ngives the following error:\n\n```console\ncustomresourcedefinition \"functions.k8s.io\" created error: error validating\n\"kubeless.yaml\": error validating data: unknown object type \nschema.GroupVersionKind{Group:\"\", Version:\"v1\", Kind:\"Service\"}; if you\nchoose to ignore these errors, turn validation off with --validate=false\n```\n\nYou probably have an older version of Kubernetes. Make sure\nyou are using at least version `1.7`.\n\n## Kafka and Zookeeper Persistent Volume creation\n\nSince Kubeless 0.5, there is a standalone manifest for deploying Kafka and Zookeeper. In some platforms, the Persistent Volumes that these applications require are not automatically generated. If that is your case you will see the deployments and Persistent Volume Claims as Pending:\n\n```\n$ kubectl get pods -n kubeless\nNAME                                           READY     STATUS    RESTARTS   AGE\nkafka-0                                        1/1       Pending   0          1h\nkafka-trigger-controller-7f4f458f8b-l6f5m      1/1       Running   0          1h\nkubeless-controller-manager-58d78fff74-g7fsd   1/1       Running   0          1h\nzoo-0                                          1/1       Pending   0          1h\n$ kubectl get pvc -n kubeless\nNAME              STATUS    VOLUME    CAPACITY   ACCESSMODES   STORAGECLASS   AGE\ndatadir-kafka-0   Pending                                                     2m\nzookeeper-zoo-0   Pending                                                     2m\n\n```\n\nIf you are running Kubernetes in GKE check the specific guide [here](/docs/GKE-deployment) to create the required disks and PVs. In other case, check the provider documentation of how to create these required volumes. Note that `kafka` and `zookeeper` are only needed when working with Kafka events, you can still use Kubeless to trigger functions using HTTP requests.\n"
  },
  {
    "path": "docs/use-existing-kafka.md",
    "content": "# Use an existing Kafka cluster with Kubeless\n\nIn Kubeless [release page](https://github.com/kubeless/kubeless/releases), we provide along with Kubeless manifests a collection of Kafka and Zookeeper statefulsets which helps user to quickly deploying PubSub function. These statefulsets are deployed in `kubeless` namespace. However, if you have a Kafka cluster already running in the same Kubernetes cluster, this doc will walk you through how to deploy Kubeless PubSub function with it.\n\nLet's assume that you have Kafka cluster running at `pubsub` namespace like below:\n\n```console\n$ kubectl -n pubsub get po\nNAME      READY     STATUS    RESTARTS   AGE\nkafka-0   1/1       Running   0          7h\nzoo-0     1/1       Running   0          7h\n\n$ kubectl -n pubsub get svc\nNAME        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)             AGE\nkafka       ClusterIP   10.55.253.151   <none>        9092/TCP            7h\nzookeeper   ClusterIP   10.55.248.146   <none>        2181/TCP            7h\n```\n\n**Note**: If you want to use the command `kubeless topic` you need add a label to your Kafka deployment (`kubeless=kafka`) in order for the CLI to find it. \n\nAnd Kubeless already running at `kubeless` namespace:\n\n```console\n$ kubectl -n kubeless get po\nNAME                                           READY     STATUS    RESTARTS   AGE\nkubeless-controller-manager-58676964bb-l79gh   1/1       Running   0          5d\n```\n\nNow we need to deploy the Kafka consumer and the Kafka Trigger CRD. We can do that extracting the Deployment, CRD and ClusterRoles from the generic Kafka manifest. The key part is adding the environment variable `KAFKA_BROKERS` pointing to the right URL:\n\n```yaml\n$ echo '\n---\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  labels:\n    kubeless: kafka-trigger-controller\n  name: kafka-trigger-controller\n  namespace: kubeless\nspec:\n  selector:\n    matchLabels:\n      kubeless: kafka-trigger-controller\n  template:\n    metadata:\n      labels:\n        kubeless: kafka-trigger-controller\n    spec:\n      containers:\n      - image: bitnami/kafka-trigger-controller:latest\n        imagePullPolicy: IfNotPresent\n        name: kafka-trigger-controller\n        env:\n        - name: KAFKA_BROKERS\n          value: kafka.pubsub:9092 # CHANGE THIS!\n      serviceAccountName: controller-acct\n---\napiVersion: apiextensions.k8s.io/v1beta1\nkind: CustomResourceDefinition\nmetadata:\n  name: kafkatriggers.kubeless.io\nspec:\n  group: kubeless.io\n  names:\n    kind: KafkaTrigger\n    plural: kafkatriggers\n    singular: kafkatrigger\n  scope: Namespaced\n  version: v1beta1\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: kafka-controller-deployer\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: kafka-controller-deployer\nsubjects:\n- kind: ServiceAccount\n  name: controller-acct\n  namespace: kubeless\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n  name: kafka-controller-deployer\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - services\n  - configmaps\n  verbs:\n  - get\n  - list\n- apiGroups:\n  - kubeless.io\n  resources:\n  - functions\n  - kafkatriggers\n  verbs:\n  - get\n  - list\n  - watch\n  - update\n  - delete\n' | kubectl create -f -\ndeployment \"kafka-trigger-controller\" created\nclusterrolebinding \"kafka-controller-deployer\" created\nclusterrole \"kafka-controller-deployer\" created\ncustomresourcedefinition \"kafkatriggers.kubeless.io\" created\n```\n\nNow we need to create `s3-python` topic and try to publish some messages. You can do it on your own kafka client. In this example, I will try to use the bundled binaries in the kafka container:\n\n```console\n# create s3-python topic\n$ kubectl -n pubsub exec -it kafka-0 -- /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper.pubsub:2181 --replication-factor 1 --partitions 1 --topic s3-python\n\n# send test message to s3-python topic\n$ kubectl -n pubsub exec -it kafka-0 -- /opt/bitnami/kafka/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic s3-python\n> hello world\n```\n\nOpen another terminal and check for the pubsub function log to see if it receives the message:\n\n```console\n$ kubectl logs -f pubsub-python-5445bdcb64-48bv2\nhello world\n```\n\nWhen using SASL you must add `KAFKA_ENABLE_SASL`, `KAFKA_USERNAME` and `KAFKA_PASSWORD` env var to set authentification (might use a secret).:\n\n```yaml\n$ echo '\n---\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  labels:\n    kubeless: kafka-trigger-controller\n  name: kafka-trigger-controller\n  namespace: kubeless\nspec:\n  selector:\n    matchLabels:\n      kubeless: kafka-trigger-controller\n  template:\n    metadata:\n      labels:\n        kubeless: kafka-trigger-controller\n    spec:\n      containers:\n      - image: bitnami/kafka-trigger-controller:latest\n        imagePullPolicy: IfNotPresent\n        name: kafka-trigger-controller\n        env:\n        ...\n        - name: KAFKA_ENABLE_SASL\n          value: true # CHANGE THIS!\n        - name: KAFKA_USERNAME\n          value: kafka-sasl-username # CHANGE THIS!\n        - name: KAFKA_PASSWORD\n          value: kafka-sasl-password # CHANGE THIS!\n...\n```\n\nWhen using SSL to secure kafka communication, you must set `KAFKA_ENABLE_TLS`, and specify some of these: \n* `KAFKA_CACERTS` to check server certificate\n* `KAFKA_CERT` and `KAFKA_KEY` to check client certificate\n* `KAFKA_INSECURE` to skip TLS verfication\n\nExample for Kafka controller deployments using TLS\n\n`Prerequisite` : Create secrets to hold certificates and keys.\n\n```yaml\n---\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  labels:\n    kubeless: kafka-trigger-controller\n  name: kafka-trigger-controller\n  namespace: kubeless\nspec:\n  selector:\n    matchLabels:\n      kubeless: kafka-trigger-controller\n  template:\n    metadata:\n      labels:\n        kubeless: kafka-trigger-controller\n    spec:\n      volumes:\n      - name: kafka-volume\n        secret:\n          secretName: certs-and-keys-secret # REPLACE WITH SECRET HOLDING CERTS AND KEYS\n      containers:\n      - image: bitnami/kafka-trigger-controller:latest\n        imagePullPolicy: IfNotPresent\n        name: kafka-trigger-controller\n        volumeMounts:\n        - name: kafka-volume\n          mountPath: /path/to/certsandkeys\n        env:\n        ...\n        - name: KAFKA_ENABLE_TLS\n          value: \"true\" # ENABLE TLS\n        - name: KAFKA_CACERTS\n          value: \"/path/to/certsandkeys/ca.crt\" # CHANGE THIS! (NOTE : PATH HERE MATCHING THE MOUNT PATH ABOVE)\n        - name: KAFKA_CERT\n          value: \"/path/to/certsandkeys/cert.pem\" # CHANGE THIS! (NOTE : PATH HERE MATCHING THE MOUNT PATH ABOVE)\n        - name: KAFKA_KEY\n          value: \"/path/to/certsandkeys/key.pem\" # CHANGE THIS! (NOTE : PATH HERE MATCHING THE MOUNT PATH ABOVE)\n...\n```\n"
  },
  {
    "path": "examples/Makefile",
    "content": "GIT_SHA1 ?= master\nBASE_URL := https://raw.githubusercontent.com/kubeless/kubeless/$(GIT_SHA1)\n\nget-python:\n\tkubeless function deploy get-python --runtime python3.7 --handler helloget.foo --from-file python/helloget.py\n\nget-python-verify:\n\tkubeless function call get-python |egrep hello.world\n\tkubeless function top --function get-python --out yaml |egrep total_calls.*[1-100000]\n\nget-python-update:\n\t$(eval TMPDIR := $(shell mktemp -d))\n\tprintf 'def foo(event, context):\\n%4sreturn \"hello world updated\"\\n' > $(TMPDIR)/hello-updated.py\n\tkubeless function update get-python --from-file $(TMPDIR)/hello-updated.py\n\trm -rf $(TMPDIR)\n\nget-python-update-verify:\n\tkubeless function call get-python |egrep hello.world.updated\n\nget-python-deps:\n\tcd python && zip hellowithdeps.zip hellowithdeps.py hellowithdepshelper.py && cd ..\n\tkubeless function deploy get-python-deps --runtime python3.7 --handler hellowithdeps.foo --from-file python/hellowithdeps.zip --dependencies python/requirements.txt\n\nget-python-deps-tar-gz:\n\tcd python && tar czf hellowithdeps.tar.gz hellowithdeps.py hellowithdepshelper.py && cd ..\n\tkubeless function deploy get-python-deps-tar-gz --runtime python3.7 --handler hellowithdeps.foo --from-file python/hellowithdeps.tar.gz --dependencies python/requirements.txt\n\nget-python-deps-tar-bz2:\n\tcd python && tar cjf hellowithdeps.tar.bz2 hellowithdeps.py hellowithdepshelper.py && cd ..\n\tkubeless function deploy get-python-deps-tar-bz2 --runtime python3.7 --handler hellowithdeps.foo --from-file python/hellowithdeps.tar.bz2 --dependencies python/requirements.txt\n\nget-python-deps-tar-xz:\n\tcd python && tar cJf hellowithdeps.tar.xz hellowithdeps.py hellowithdepshelper.py && cd ..\n\tkubeless function deploy get-python-deps-tar-xz --runtime python3.7 --handler hellowithdeps.foo --from-file python/hellowithdeps.tar.xz --dependencies python/requirements.txt\n\nget-python-deps-verify:\n\tkubeless function call get-python-deps |egrep Google\n\nget-python-deps-tar-gz-verify:\n\tkubeless function call get-python-deps-tar-gz |egrep Google\n\nget-python-deps-tar-bz2-verify:\n\tkubeless function call get-python-deps-tar-bz2 |egrep Google\n\nget-python-deps-tar-xz-verify:\n\tkubeless function call get-python-deps-tar-xz |egrep Google\n\nget-python-custom-port:\n\tkubeless function deploy get-python-custom-port --runtime python3.7 --handler helloget.foo --from-file python/helloget.py --port 8081\n\nget-python-custom-port-verify:\n\tkubectl get svc get-python-custom-port -o yaml | grep 'targetPort: 8081'\n\tkubeless function call get-python-custom-port |egrep hello.world\n\nget-python-deps-update:\n\t$(eval TMPDIR := $(shell mktemp -d))\n\tprintf 'bs4\\ntwitter\\n' > $(TMPDIR)/requirements.txt\n\tkubeless function update get-python-deps --dependencies $(TMPDIR)/requirements.txt\n\trm -rf $(TMPDIR)\n\nget-python-deps-update-verify:\n\tpod=`kubectl get pod -l function=get-python-deps -o go-template -o custom-columns=:metadata.name --no-headers=true`; \\\n\techo \"Checking updated deps of $$pod\"; \\\n\tkubectl exec -it $$pod pip freeze | grep -q \"twitter==\"\n\nget-python-url-deps:\n\tcd python && tar czf hellowithdeps.tgz hellowithdeps.py hellowithdepshelper.py && cd ..\n\tkubeless function deploy get-python-url-deps --runtime python3.7 --handler hellowithdeps.foo --from-file python/hellowithdeps.tgz --dependencies $(BASE_URL)/examples/python/requirements.txt\n\nget-python-url-deps-verify:\n\tkubeless function call get-python-url-deps |egrep Google\t\n\nget-node-url-zip:\n\tkubeless function deploy get-node-url-zip --runtime nodejs10 --handler index.helloGet --from-file $(BASE_URL)/examples/nodejs/helloFunctions.zip\n\nget-node-url-tar-gz:\n\tkubeless function deploy get-node-url-tar-gz --runtime nodejs10 --handler index.helloGet --from-file $(BASE_URL)/examples/nodejs/helloFunctions.tar.gz\n\nget-node-url-tar-bz2:\n\tkubeless function deploy get-node-url-tar-bz2 --runtime nodejs10 --handler index.helloGet --from-file $(BASE_URL)/examples/nodejs/helloFunctions.tar.bz2\n\nget-node-url-tar-xz:\n\tkubeless function deploy get-node-url-tar-xz --runtime nodejs10 --handler index.helloGet --from-file $(BASE_URL)/examples/nodejs/helloFunctions.tar.xz\n\nget-node-url-zip-verify:\n\tkubeless function call get-node-url-zip |egrep hello.world\n\nget-node-url-tar-gz-verify:\n\tkubeless function call get-node-url-tar-gz |egrep hello.world\n\nget-node-url-tar-bz2-verify:\n\tkubeless function call get-node-url-tar-bz2 |egrep hello.world\n\nget-node-url-tar-xz-verify:\n\tkubeless function call get-node-url-tar-xz |egrep hello.world\n\nscheduled-get-python:\n\tkubeless function deploy scheduled-get-python --schedule \"* * * * *\" --runtime python3.7 --handler helloget.foo --from-file python/helloget.py\n\nscheduled-get-python-verify:\n\tnumber=\"1\"; \\\n\ttimeout=\"70\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=scheduled-get-python`; \\\n\t\tlogs=`kubectl logs $$pod | grep \"GET / HTTP/1.1\\\" 200 11 \\\"\\\"\"`; \\\n    \tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\ntimeout-python:\n\t$(eval TMPDIR := $(shell mktemp -d))\n\tprintf 'def foo(event, context):\\n%4swhile 1: pass\\n%4sreturn \"hello world\"\\n' > $(TMPDIR)/hello-loop.py\n\tkubeless function deploy timeout-python --runtime python3.7 --handler helloget.foo  --from-file $(TMPDIR)/hello-loop.py --timeout 3\n\trm -rf $(TMPDIR)\n\ntimeout-python-verify:\n\t$(eval MSG := $(shell kubeless function call timeout-python 2>&1 || true))\n\techo $(MSG) | egrep Request.timeout.exceeded\n\nget-nodejs:\n\tkubeless function deploy get-nodejs --runtime nodejs10 --handler helloget.foo --from-file nodejs/helloget.js\n\nget-nodejs-verify:\n\tkubeless function call get-nodejs |egrep hello.world\n\nget-nodejs-custom-port:\n\tkubeless function deploy get-nodejs-custom-port --runtime nodejs10 --handler helloget.foo --from-file nodejs/helloget.js --port 8083\n\nget-nodejs-custom-port-verify:\n\tkubectl get svc get-nodejs-custom-port -o yaml | grep 'targetPort: 8083'\n\tkubeless function call get-nodejs-custom-port |egrep hello.world\n\nget-nodejs-stream:\n\tkubeless function deploy get-nodejs-stream --runtime nodejs10 --handler hellostream.foo --from-file nodejs/hellostream.js --dependencies nodejs/package.json\n\nget-nodejs-stream-verify:\n\tkubeless function call get-nodejs-stream |egrep hello.world\n\ntimeout-nodejs:\n\t$(eval TMPDIR := $(shell mktemp -d))\n\tprintf 'module.exports = { foo: function (event, context) { while(true) {} } }\\n' > $(TMPDIR)/hello-loop.js\n\tkubeless function deploy timeout-nodejs --runtime nodejs10 --handler helloget.foo  --from-file $(TMPDIR)/hello-loop.js --timeout 4\n\trm -rf $(TMPDIR)\n\ntimeout-nodejs-verify:\n\t$(eval MSG := $(shell kubeless function call timeout-nodejs 2>&1 || true))\n\techo $(MSG) | egrep Request.timeout.exceeded\n\nget-nodejs-deps:\n\tkubeless function deploy get-nodejs-deps --runtime nodejs10 --handler helloget.handler --from-file nodejs/hellowithdeps.js --dependencies nodejs/package.json\n\nget-nodejs-deps-verify:\n\tkubeless function call get-nodejs-deps --data '{\"hello\": \"world\"}' | grep -q 'hello.*world.*date.*UTC'\n\nget-nodejs-multi:\n\tcd nodejs; zip helloFunctions.zip *js\n\tkubeless function deploy get-nodejs-multi --runtime nodejs10 --handler index.helloGet --from-file nodejs/helloFunctions.zip\n\trm nodejs/helloFunctions.zip\n\nget-nodejs-multi-verify:\n\tkubeless function call get-nodejs-multi |egrep hello.world\n\nget-go:\n\tkubeless function deploy get-go --runtime go1.14 --handler handler.Foo --from-file golang/helloget.go\n\nget-go-verify:\n\tkubeless function call get-go |egrep Hello.world\n\nget-go-custom-port:\n\tkubeless function deploy get-go-custom-port --runtime go1.14 --handler helloget.Foo --from-file golang/helloget.go --port 8083\n\nget-go-custom-port-verify:\n\tkubectl get svc get-go-custom-port -o yaml | grep 'targetPort: 8083'\n\tkubeless function call get-go-custom-port |egrep Hello.world\n\ntimeout-go:\n\t$(eval TMPDIR := $(shell mktemp -d))\n\tprintf 'package kubeless\\nimport \"github.com/kubeless/kubeless/pkg/functions\"\\nfunc Foo(event functions.Event, context functions.Context) (string, error) {\\nfor{\\n}\\nreturn \"\", nil\\n}' > $(TMPDIR)/hello-loop.js\n\tkubeless function deploy timeout-go --runtime go1.14 --handler helloget.Foo  --from-file $(TMPDIR)/hello-loop.js --timeout 4\n\trm -rf $(TMPDIR)\n\ntimeout-go-verify:\n\t$(eval MSG := $(shell kubeless function call timeout-go 2>&1 || true))\n\techo $(MSG) | egrep Request.timeout.exceeded\n\nget-go-deps:\n\tkubeless function deploy get-go-deps --runtime go1.14 --handler helloget.Hello --from-file golang/hellowithdeps.go --dependencies golang/go.mod\n\nget-go-deps-verify:\n\tkubeless function call get-go-deps --data '{\"hello\": \"world\"}'\n\tkubectl logs --tail=1000 -l function=get-go-deps | grep -q 'level=info msg=.*hello.*world'\n\npost-go:\n\tkubeless function deploy post-go --runtime go1.14 --handler hellowithdata.Handler --from-file golang/hellowithdata.go\n\npost-go-verify:\n\tkubeless function call post-go --data '{\"it-s\": \"alive\"}'| egrep \"it.*alive\"\n\t# Verify event context\n\tlogs=`kubectl logs --tail=1000 -l function=post-go`; \\\n\techo $$logs | grep -q \"it.*alive\" && \\\n\techo $$logs | grep -q \"Z\" && \\\n\techo $$logs | grep -q \"application/json\" && \\\n\techo $$logs | grep -q \"cli.kubeless.io\"\n\nget-python-metadata:\n\tkubeless function deploy get-python-metadata --runtime python3.7 --handler helloget.foo --from-file python/helloget.py --env foo:bar,bar=foo --memory 128Mi --label foo:bar,bar=foo,foobar\n\nget-python-metadata-verify:\n\tkubeless function call get-python-metadata |egrep hello.world\n\tkubectl get po -o jsonpath='{.items[0].spec.containers[0].env}' -l function=get-python-metadata | grep '\"name\":\"foo\",\"value\":\"bar\"'\n\tkubectl get po -o jsonpath='{.items[0].spec.containers[0].env}' -l function=get-python-metadata | grep '\"name\":\"bar\",\"value\":\"foo\"'\n\tkubectl get po -o jsonpath='{.items[0].metadata.labels}' -l function=get-python-metadata | grep '\"foo\":\"bar\"'\n\tkubectl get po -o jsonpath='{.items[0].metadata.labels}' -l function=get-python-metadata | grep '\"bar\":\"foo\"'\n\tkubectl get po -o jsonpath='{.items[0].metadata.labels}' -l function=get-python-metadata | grep '\"foobar\":\"\"'\n\nget-python-secrets:\n\tkubectl create secret generic test-secret --from-literal=key=MY_KEY || true\n\tkubeless function deploy get-python-secrets --runtime python3.7 --handler helloget.foo --from-file python/helloget.py --secrets test-secret\n\nget-python-secrets-verify:\n\t$(eval pod := $(shell kubectl get pod -l function=get-python-secrets -o go-template -o custom-columns=:metadata.name --no-headers=true))\n\tkubectl exec -it $(pod) cat /test-secret/key | egrep \"MY_KEY\"\n\nget-ruby:\n\tkubeless function deploy get-ruby --runtime ruby2.4 --handler helloget.foo --from-file ruby/helloget.rb\n\nget-ruby-verify:\n\tkubeless function call get-ruby |egrep hello.world\n\nget-ruby-deps:\n\tkubeless function deploy get-ruby-deps --runtime ruby2.4 --handler hellowithdeps.foo --from-file ruby/hellowithdeps.rb --dependencies ruby/Gemfile\n\nget-ruby-deps-verify:\n\tkubeless function call get-ruby-deps |egrep hello.world\n\nget-ruby-custom-port:\n\tkubeless function deploy get-ruby-custom-port --runtime ruby2.4 --handler helloget.foo --from-file ruby/helloget.rb --port 8082\n\nget-ruby-custom-port-verify:\n\tkubectl get svc get-ruby-custom-port -o yaml | grep 'targetPort: 8082'\n\tkubeless function call get-ruby-custom-port |egrep hello.world\n\nget-php:\n\tkubeless function deploy get-php --runtime php7.2 --handler helloget.foo --from-file php/helloget.php\n\nget-php-update:\n\t$(eval TMPDIR := $(shell mktemp -d))\n\tprintf '<?php\\n function foo() { return \"hello world updated\"; } \\n' > $(TMPDIR)/hello-updated.php\n\tkubeless function update get-php --from-file $(TMPDIR)/hello-updated.php\n\trm -rf $(TMPDIR)\n\nget-php-update-verify:\n\tkubeless function call get-php | egrep \"hello.world.updated\"\n\nget-php-verify:\n\tkubeless function call get-php | egrep \"hello world\"\n\nget-php-deps:\n\tkubeless function deploy get-php-deps --runtime php7.2 --handler hellowithdeps.foo --from-file php/hellowithdeps.php --dependencies php/composer.json\n\nget-php-deps-verify:\n\tkubeless function call get-php-deps &> /dev/null\n\tkubectl logs --tail=1000 -l function=get-php-deps | egrep \"Hello\"\n\nget-php-deps-update:\n\t$(eval TMPDIR := $(shell mktemp -d))\n\tsed \"s/1\\.23/1\\.20/\" php/composer.json > $(TMPDIR)/composer.json\n\tkubeless function update get-php-deps --dependencies $(TMPDIR)/composer.json\n\nget-php-deps-update-verify:\n\t$(eval pod := $(shell kubectl get pod -l function=get-php-deps -o go-template -o custom-columns=:metadata.name --no-headers=true))\n\tkubectl exec -it $(pod) cat /kubeless/composer.json | egrep \"1.20\"\n\npost-php:\n\tkubeless function deploy post-php --runtime php7.2 --handler hellowithdata.foo --from-file php/hellowithdata.php\n\npost-php-verify:\n\tkubeless function call post-php --data '{\"it-s\": \"alive\"}'| egrep \"it.*alive\"\n\ntimeout-php:\n\t$(eval TMPDIR := $(shell mktemp -d))\n\tprintf '<?php\\n function foo() { while(1) {} } \\n' > $(TMPDIR)/hello-loop.php\n\tkubeless function deploy timeout-php --runtime php7.2 --handler helloget.foo  --from-file $(TMPDIR)/hello-loop.php --timeout 4\n\trm -rf $(TMPDIR)\n\ntimeout-php-verify:\n\t$(eval MSG := $(shell kubeless function call timeout-php 2>&1 || true))\n\techo $(MSG) | egrep Request.timeout.exceeded\n\ntimeout-ruby:\n\t$(eval TMPDIR := $(shell mktemp -d))\n\tprintf 'def foo(event, context)\\n%4swhile true do;sleep(1);end\\n%4s\"hello world\"\\nend' > $(TMPDIR)/hello-loop.rb\n\tkubeless function deploy timeout-ruby --runtime ruby2.4 --handler helloget.foo  --from-file $(TMPDIR)/hello-loop.rb --timeout 4\n\trm -rf $(TMPDIR)\n\ntimeout-ruby-verify:\n\t$(eval MSG := $(shell { time kubeless function call timeout-ruby; } 2>&1 || true))\n\techo $(MSG) | egrep Request.timeout.exceeded\n\techo $(MSG) | egrep \"real\\s*0m4.\"\n\nget-dotnetcore:\n\tkubeless function deploy get-dotnetcore --runtime dotnetcore2.0 --handler module.handler --from-file dotnetcore/helloget.cs\n\nget-dotnetcore-verify:\n\tkubeless function call get-dotnetcore |egrep hello.world\n\tkubeless function top --function get-dotnetcore --out yaml |egrep \"Function does not expose metrics\" \n\nget-dotnetcore-dependency:\n\tkubeless function deploy get-dotnetcore-dependency --runtime dotnetcore2.0 --handler module.handler --from-file dotnetcore/dependency-yaml.cs --dependencies dotnetcore/dependency-yaml.csproj\n\nget-dotnetcore-dependency-verify:\n\tkubeless function call get-dotnetcore-dependency |egrep Name:\\ Michael\n\ncustom-get-python:\n\tkubeless function deploy --runtime-image kubeless/get-python-example@sha256:6a14400f14e26d46a971445b7a850af533fe40cb75a67297283bdf536e09ca5e custom-get-python\n\ncustom-get-python-verify:\n\tkubeless function call custom-get-python |egrep hello.world\n\ncustom-get-python-update:\n\tkubeless function update --runtime-image kubeless/get-python-example@sha256:174beab98e6fa454e21121302395375e90a324e9276367296aab0eb5b4aa8922 custom-get-python\n\ncustom-get-python-update-verify:\n\tkubeless function call custom-get-python |egrep hello.world.updated\n\nget: get-python get-nodejs get-python-metadata get-ruby get-ruby-deps get-python-custom-port\n\npost-python:\n\tkubeless function deploy post-python --runtime python3.7 --handler hellowithdata.handler --from-file python/hellowithdata.py\n\npost-python-verify:\n\tkubeless function call post-python --data '{\"it-s\": \"alive\"}'|egrep \"it.*alive\"\n\t# Verify event context\n\tlogs=`kubectl logs --tail=1000 -l function=post-python`; \\\n\techo $$logs | grep -q \"it.*alive\" && \\\n\techo $$logs | grep -q \"event-time.*Z\" && \\\n\techo $$logs | grep -q \"event-type.*application/json\" && \\\n\techo $$logs | grep -q \"event-namespace.*cli.kubeless.io\" && \\\n\techo $$logs | grep -q \"event-id.*\"\n\npost-python-custom-port:\n\tkubeless function deploy post-python-custom-port --runtime python3.7 --handler hellowithdata.handler --from-file python/hellowithdata.py --port 8081\n\npost-python-custom-port-verify:\n\tkubectl get svc post-python-custom-port -o yaml | grep 'targetPort: 8081'\n\tkubeless function call post-python-custom-port --data '{\"it-s\": \"alive\"}'|egrep \"it.*alive\"\n\npost-nodejs:\n\tkubeless function deploy post-nodejs --runtime nodejs10 --handler hellowithdata.handler --from-file nodejs/hellowithdata.js\n\npost-nodejs-verify:\n\tkubeless function call post-nodejs --data '{\"it-s\": \"alive\"}'|egrep \"it.*alive\"\n\t# Verify event context\n\tlogs=`kubectl logs --tail=1000 -l function=post-nodejs`; \\\n\techo $$logs | grep -q \"it.*alive\" && \\\n\techo $$logs | grep -q \"event-time.*Z\" && \\\n\techo $$logs | grep -q \"event-type.*application/json\" && \\\n\techo $$logs | grep -q \"event-namespace.*cli.kubeless.io\" && \\\n\techo $$logs | grep -q \"event-id.*\"\n\npost-ruby:\n\tkubeless function deploy post-ruby --runtime ruby2.4 --handler hellowithdata.handler --from-file ruby/hellowithdata.rb\n\npost-ruby-verify:\n\tkubeless function call post-ruby --data '{\"it-s\": \"alive\"}'|egrep \"it.*alive\"\n\t# Verify event context\n\tlogs=`kubectl logs --tail=1000 -l function=post-ruby`; \\\n\techo $$logs | grep -q \"it.*alive\" && \\\n\techo $$logs | grep -q \"event-time.*Z\" && \\\n\techo $$logs | grep -q \"event-type.*application/json\" && \\\n\techo $$logs | grep -q \"event-namespace.*cli.kubeless.io\" && \\\n\techo $$logs | grep -q \"event-id.*\"\n\npost-dotnetcore:\n\tkubeless function deploy post-dotnetcore --runtime dotnetcore2.0 --handler module.handler --from-file dotnetcore/hellowithdata.cs\n\npost-dotnetcore-verify:\n\tkubeless function call post-dotnetcore --data '{\"it-s\": \"alive\"}'|egrep \"it.*alive\"\n\npost: post-python post-nodejs post-ruby post-python-custom-port\n\npubsub-python:\n\tkubeless topic create s3-python || true\n\tkubeless function deploy pubsub-python  --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py\n\tkubeless trigger kafka create pubsub-python --function-selector created-by=kubeless,function=pubsub-python --trigger-topic s3-python\n\n# Generate a random string to inject into s3 topic,\n# then \"tail -f\" until it shows (with timeout)\npubsub-python-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\tkubeless topic publish --topic s3-python --data '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=pubsub-python`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n    \tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\t# Verify event context\n\tlogs=`kubectl logs --tail=1000 -l function=pubsub-python`; \\\n\techo $$logs | grep -q \"event-time.*UTC\" && \\\n\techo $$logs | grep -q \"event-type.*application/json\" && \\\n\techo $$logs | grep -q \"event-namespace.*kafkatriggers.kubeless.io\" && \\\n\techo $$logs | grep -q \"event-id.*\"\n\npython-nats:\n\tkubeless function deploy python-nats --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py\n\tkubeless trigger nats create python-nats --function-selector created-by=kubeless,function=python-nats --trigger-topic test\n\npython-nats-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\t$(eval NODEPORT := $(shell kubectl get svc nats -n nats-io -o jsonpath=\"{.spec.ports[0].nodePort}\"))\n\t$(eval MINIKUBE_IP := $(shell minikube ip))\n\tkubeless trigger nats publish --url nats://$(MINIKUBE_IP):$(NODEPORT) --topic test --message '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=python-nats`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n\t\tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\t# Verify event context\n\tlogs=`kubectl logs --tail=1000 -l function=python-nats`; \\\n\techo $$logs | grep -q \"event-time.*UTC\" && \\\n\techo $$logs | grep -q \"event-type.*application/json\" && \\\n\techo $$logs | grep -q \"event-namespace.*natstriggers.kubeless.io\" && \\\n\techo $$logs | grep -q \"event-id.*\"\n\npython-kinesis:\n\tkubeless function deploy python-kinesis --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py\n\t$(eval NODEPORT := $(shell kubectl get svc kinesis -o jsonpath=\"{.spec.ports[0].nodePort}\"))\n\t$(eval MINIKUBE_IP := $(shell minikube ip))\n\tkubectl create secret generic ec2 --from-literal=aws_access_key_id=kinesalite --from-literal=aws_secret_access_key=kinesalite\n\tkubeless trigger kinesis create-stream --aws-region kinesalite --secret ec2 --endpoint http://$(MINIKUBE_IP):$(NODEPORT) --shard-count 1 --stream-name kubeless-stream\n\tkubeless trigger kinesis create kinesis-trigger --function-name python-kinesis --aws-region kinesalite --shard-id shardId-000000000000 --stream kubeless-stream --secret ec2 --endpoint http://$(MINIKUBE_IP):$(NODEPORT)\n\npython-kinesis-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\t$(eval NODEPORT := $(shell kubectl get svc kinesis -o jsonpath=\"{.spec.ports[0].nodePort}\"))\n\t$(eval MINIKUBE_IP := $(shell minikube ip))\n\tkubeless trigger kinesis publish --aws-region kinesalite --secret ec2  --endpoint http://$(MINIKUBE_IP):$(NODEPORT) --partition-key key1 --stream  kubeless-stream --records '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=python-kinesis`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n\t\tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\t# Verify event context\n\tlogs=`kubectl logs --tail=1000 -l function=python-kinesis`; \\\n\techo $$logs | grep -q \"event-time.*UTC\" && \\\n\techo $$logs | grep -q \"event-type.*application/json\" && \\\n\techo $$logs | grep -q \"event-namespace.*kinesistriggers.kubeless.io\" && \\\n\techo $$logs | grep -q \"event-id.*\"\n\npython-kinesis-multi-record:\n\tkubeless function deploy python-kinesis-multi-record --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py\n\t$(eval NODEPORT := $(shell kubectl get svc kinesis -o jsonpath=\"{.spec.ports[0].nodePort}\"))\n\t$(eval MINIKUBE_IP := $(shell minikube ip))\n\tkubeless trigger kinesis create kinesis-trigger-mr --function-name python-kinesis-multi-record --aws-region kinesalite --shard-id shardId-000000000000 --stream kubeless-stream --secret ec2 --endpoint http://$(MINIKUBE_IP):$(NODEPORT)\n\npython-kinesis-multi-record-verify:\n\t$(eval DATA1 := $(shell mktemp -u -t XXXXXXXX))\n\t$(eval DATA2 := $(shell mktemp -u -t XXXXXXXX))\n\t$(eval NODEPORT := $(shell kubectl get svc kinesis -o jsonpath=\"{.spec.ports[0].nodePort}\"))\n\t$(eval MINIKUBE_IP := $(shell minikube ip))\n\tkubeless trigger kinesis publish --aws-region kinesalite --secret ec2  --endpoint http://$(MINIKUBE_IP):$(NODEPORT) --partition-key key1 --stream  kubeless-stream --records '{\"payload\":\"$(DATA1)\"}' --records '{\"payload\":\"$(DATA2)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=python-kinesis-multi-record`; \\\n\t\tlogs1=`kubectl logs $$pod | grep $(DATA1)`; \\\n\t\tlogs2=`kubectl logs $$pod | grep $(DATA2)`; \\\n\t\tif [ \"$$logs1\" != \"\" ] && [ \"$$logs2\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\t# Verify event context\n\tlogs=`kubectl logs --tail=1000 -l function=python-kinesis-multi-record`; \\\n\techo $$logs | grep -q \"event-time.*UTC\" && \\\n\techo $$logs | grep -q \"event-type.*application/json\" && \\\n\techo $$logs | grep -q \"event-namespace.*kinesistriggers.kubeless.io\" && \\\n\techo $$logs | grep -q \"event-id.*\"\n\nnats-python-func1-topic-test:\n\tkubeless function deploy nats-python-func1-topic-test --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py  --label topic=nats-topic-test\n\nnats-python-func2-topic-test:\n\tkubeless function deploy nats-python-func2-topic-test --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py  --label topic=nats-topic-test\n\nnats-python-func-multi-topic:\n\tkubeless function deploy nats-python-func-multi-topic --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py  --label func=nats-python-func-multi-topic\n\nnats-python-trigger-topic-test:\n\tkubeless trigger nats create nats-python-trigger-topic-test --function-selector created-by=kubeless,topic=nats-topic-test --trigger-topic topic-test\n\nnats-python-trigger-topic1:\n\tkubeless trigger nats create nats-python-trigger-topic1 --function-selector created-by=kubeless,func=nats-python-func-multi-topic --trigger-topic topic1\n\nnats-python-trigger-topic2:\n\tkubeless trigger nats create nats-python-trigger-topic2 --function-selector created-by=kubeless,func=nats-python-func-multi-topic --trigger-topic topic2\n\nnats-python-func1-topic-test-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\t$(eval NODEPORT := $(shell kubectl get svc nats -n nats-io -o jsonpath=\"{.spec.ports[0].nodePort}\"))\n\t$(eval MINIKUBE_IP := $(shell minikube ip))\n\tkubeless trigger nats publish --url nats://$(MINIKUBE_IP):$(NODEPORT) --topic topic-test --message '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=nats-python-func1-topic-test`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n\t\tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\t# Verify event context\n\tlogs=`kubectl logs --tail=1000 -l function=nats-python-func1-topic-test`; \\\n\techo $$logs | grep -q \"event-time.*UTC\" && \\\n\techo $$logs | grep -q \"event-type.*application/json\" && \\\n\techo $$logs | grep -q \"event-namespace.*natstriggers.kubeless.io\" && \\\n\techo $$logs | grep -q \"event-id.*\"\n\nnats-python-func2-topic-test-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\t$(eval NODEPORT := $(shell kubectl get svc nats -n nats-io -o jsonpath=\"{.spec.ports[0].nodePort}\"))\n\t$(eval MINIKUBE_IP := $(shell minikube ip))\n\tkubeless trigger nats publish --url nats://$(MINIKUBE_IP):$(NODEPORT) --topic topic-test --message '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=nats-python-func2-topic-test`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n\t\tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\t# Verify event context\n\tlogs=`kubectl logs --tail=1000 -l function=nats-python-func2-topic-test`; \\\n\techo $$logs | grep -q \"event-time.*UTC\" && \\\n\techo $$logs | grep -q \"event-type.*application/json\" && \\\n\techo $$logs | grep -q \"event-namespace.*natstriggers.kubeless.io\" && \\\n\techo $$logs | grep -q \"event-id.*\"\n\nnats-python-func-multi-topic-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\t$(eval NODEPORT := $(shell kubectl get svc nats -n nats-io -o jsonpath=\"{.spec.ports[0].nodePort}\"))\n\t$(eval MINIKUBE_IP := $(shell minikube ip))\n\tkubeless trigger nats publish --url nats://$(MINIKUBE_IP):$(NODEPORT) --topic topic1 --message '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=nats-python-func-multi-topic`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n\t\tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\t# Verify event context\n\tlogs=`kubectl logs --tail=1000 -l function=nats-python-func-multi-topic`; \\\n\techo $$logs | grep -q \"event-time.*UTC\" && \\\n\techo $$logs | grep -q \"event-type.*application/json\" && \\\n\techo $$logs | grep -q \"event-namespace.*natstriggers.kubeless.io\" && \\\n\techo $$logs | grep -q \"event-id.*\"\n\n\tkubeless trigger nats publish --url nats://$(MINIKUBE_IP):$(NODEPORT) --topic topic2 --message '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=nats-python-func-multi-topic`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n\t\tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\t# Verify event context\n\tlogs=`kubectl logs --tail=1000 -l function=nats-python-func-multi-topic`; \\\n\techo $$logs | grep -q \"event-time.*UTC\" && \\\n\techo $$logs | grep -q \"event-type.*application/json\" && \\\n\techo $$logs | grep -q \"event-namespace.*natstriggers.kubeless.io\" && \\\n\techo $$logs | grep -q \"event-id.*\"\n\nkafka-python-func1-topic-s3-python:\n\tkubeless topic create s3-python || true\n\tkubeless function deploy kafka-python-func1-topic-s3-python --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py --label topic=s3-python\n\nkafka-python-func1-topic-s3-python-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\tkubeless topic publish --topic s3-python --data '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=kafka-python-func1-topic-s3-python`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n\t\tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\nkafka-python-func2-topic-s3-python:\n\tkubeless topic create s3-python || true\n\tkubeless function deploy kafka-python-func2-topic-s3-python --runtime python3.7 --handler pubsub.handler --from-file python/hellowithdata.py --label topic=s3-python\n\nkafka-python-func2-topic-s3-python-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\tkubeless topic publish --topic s3-python --data '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=kafka-python-func2-topic-s3-python`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n\t\tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\ns3-python-kafka-trigger:\n\tkubeless trigger kafka create s3-python-kafka-trigger --function-selector created-by=kubeless,topic=s3-python --trigger-topic s3-python\n\npubsub-python34:\n\tkubeless topic create s3-python34 || true\n\tkubeless function deploy pubsub-python34 --runtime python3.4 --handler pubsub-python.handler --from-file python/hellowithdata34.py\n\tkubeless trigger kafka create pubsub-python34 --function-selector created-by=kubeless,function=pubsub-python34 --trigger-topic s3-python34\n\npubsub-python34-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\tkubeless topic publish --topic s3-python34 --data '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=pubsub-python34`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n    \tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\npubsub-python36:\n\tkubeless topic create s3-python36 || true\n\tkubeless function deploy pubsub-python36 --runtime python3.6 --handler pubsub-python.handler --from-file python/pubsub.py\n\tkubeless trigger kafka create pubsub-python36 --function-selector created-by=kubeless,function=pubsub-python36 --trigger-topic s3-python36\n\npubsub-python36-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\tkubeless topic publish --topic s3-python36 --data '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=pubsub-python36`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n    \tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\npubsub-nodejs:\n\tkubeless topic create s3-nodejs || true\n\tkubeless function deploy pubsub-nodejs --runtime nodejs10 --handler pubsub-nodejs.handler --from-file nodejs/hellowithdata.js\n\tkubeless trigger kafka create pubsub-nodejs --function-selector created-by=kubeless,function=pubsub-nodejs --trigger-topic s3-nodejs\n\npubsub-nodejs-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\tkubeless topic publish --topic s3-nodejs --data '{\"test\": \"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=pubsub-nodejs`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n    \tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\npubsub-nodejs-update:\n\tkubeless topic create s3-nodejs-2 || true\n\tkubeless trigger kafka update pubsub-nodejs --trigger-topic s3-nodejs-2\n\npubsub-nodejs-update-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\tkubeless topic publish --topic s3-nodejs-2  --data '{\"test\": \"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=pubsub-nodejs`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n        if [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\npubsub-ruby:\n\tkubeless topic create s3-ruby || true\n\tkubeless function deploy pubsub-ruby --runtime ruby2.4 --handler pubsub-ruby.handler --from-file ruby/hellowithdata.rb\n\tkubeless trigger kafka create pubsub-ruby --function-selector created-by=kubeless,function=pubsub-ruby --trigger-topic s3-ruby\n\npubsub-ruby-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\tkubeless topic publish --topic s3-ruby --data '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=pubsub-ruby`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n    \tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\npubsub-go:\n\tkubeless topic create s3-go || true\n\tkubeless function deploy pubsub-go --runtime go1.14 --handler pubsub-go.Handler --from-file golang/hellowithdata.go\n\tkubeless trigger kafka create pubsub-go --function-selector created-by=kubeless,function=pubsub-go --trigger-topic s3-go\n\npubsub-go-verify:\n\t$(eval DATA := $(shell mktemp -u -t XXXXXXXX))\n\tkubeless topic publish --topic s3-go --data '{\"payload\":\"$(DATA)\"}'\n\tnumber=\"1\"; \\\n\ttimeout=\"60\"; \\\n\tfound=false; \\\n\twhile [ $$number -le $$timeout ] ; do \\\n\t\tpod=`kubectl get po -oname -l function=pubsub-go`; \\\n\t\tlogs=`kubectl logs $$pod | grep $(DATA)`; \\\n    \tif [ \"$$logs\" != \"\" ]; then \\\n\t\t\tfound=true; \\\n\t\t\tbreak; \\\n\t\tfi; \\\n\t\tsleep 1; \\\n\t\tnumber=`expr $$number + 1`; \\\n\tdone; \\\n\t$$found\n\n\npubsub: pubsub-python pubsub-nodejs pubsub-ruby\n\nget-java:\n\tkubeless function deploy get-java --runtime java1.8 --handler Foo.foo --from-file java/HelloGet.java\n\nget-java-verify:\n\tkubeless function call get-java |egrep Hello.world\n\npost-java:\n\tkubeless function deploy post-java --runtime java1.8  --handler Foo.foo --from-file java/HelloWithData.java\n\npost-java-verify:\n\tkubeless function call post-java --data '{\"its\": \"alive\"}'| egrep \"its.*alive\"\n\nget-java-deps:\n\tkubeless function deploy get-java-deps --runtime java1.8 --handler Hello.sayHello --from-file java/HelloWithDeps.java --dependencies java/pom.xml\n\nget-java-deps-verify:\n\tkubeless function call get-java-deps --data '{\"hello\": \"world\"}'\n\tkubectl logs --tail=1000 -l function=get-java-deps | grep -q '.*Hello.*world! Current local time is:'\n\nget-jvm-java:\n\tkubeless function deploy get-jvm-java --runtime jvm1.8 --from-file jvm/java/test-java-jvm.jar --handler io_ino_Handler.sayHello\n\nget-jvm-java-verify:\n\tkubeless function call get-jvm-java | grep \"Hello world\"\n\nget-nodejs-distroless:\n\tkubeless function deploy get-nodejs-distroless --runtime nodejs_distroless8 --handler helloget.foo --from-file nodejs/helloget.js\n\nget-nodejs-distroless-verify:\n\tkubeless function call get-nodejs-distroless |egrep hello.world\n\nget-nodejs-distroless-deps:\n\tkubeless function deploy get-nodejs-distroless-deps --runtime nodejs_distroless8 --handler helloget.handler --from-file nodejs/hellowithdeps.js --dependencies nodejs/package.json\n\nget-nodejs-distroless-deps-verify:\n\tkubeless function call get-nodejs-distroless-deps --data '{\"hello\": \"world\"}' | grep -q 'hello.*world.*date.*UTC'\n\nget-ballerina:\n\tkubeless function deploy get-ballerina --runtime ballerina0.981.0 --from-file ballerina/helloget.bal --handler helloget.foo\n\nget-ballerina-verify:\n\tkubeless function call get-ballerina |egrep Hello.World.Ballerina\n\nget-ballerina-custom-port:\n\tkubeless function deploy get-ballerina-custom-port --runtime ballerina0.981.0 --handler helloget.foo --from-file ballerina/helloget.bal --port 8083\n\nget-ballerina-custom-port-verify:\n\tkubectl get svc get-ballerina-custom-port -o yaml | grep 'targetPort: 8083'\n\tkubeless function call get-ballerina-custom-port |egrep Hello.World.Ballerina\n\nget-ballerina-data:\n\tkubeless function deploy get-ballerina-data --runtime ballerina0.981.0 --from-file ballerina/hellowithdata.bal --handler hellowithdata.foo\n\nget-ballerina-data-verify:\n\tkubeless function call get-ballerina-data --data '{\"hello\":\"world\"}' |egrep hello\n\nget-ballerina-conf:\n\tzip -r -j ballerina/bar.zip ballerina/hello_with_conf/\n\tkubeless function deploy get-ballerina-conf --runtime ballerina0.981.0 --from-file ballerina/bar.zip --handler hello_with_conf.bar\n\trm ballerina/bar.zip\n\nget-ballerina-conf-verify:\n\tkubeless function call get-ballerina-conf | egrep john\n"
  },
  {
    "path": "examples/README.md",
    "content": "# Examples\n\nThis directory contains basic examples for kubeless.\n\nSpecifically it contains examples that we can test quickly using the `Makefile`. Some of these examples are run during our integration tests.\n\nCheck the [Makefile](Makefile)\n\nThen run some of the examples like so:\n\n```\nmake post-python\n```\n\nOr a different runtime:\n\n```\nmake post-dotnetcore\n```\n\nOr a PubSub example:\n\n```\nmake pubsub-python\n```\n\n# Looking for more function examples?\n\nYou can find more examples at [https://github.com/kubeless/functions](https://github.com/kubeless/functions)\n"
  },
  {
    "path": "examples/ballerina/hello_with_conf/hello_with_conf.bal",
    "content": "import kubeless/kubeless;\nimport ballerina/io;\nimport ballerina/config;\n\npublic function bar(kubeless:Event event, kubeless:Context context) returns (string|error) {\n    io:println(event);\n    io:println(context);\n    return config:getAsString(\"hello.userid\");\n}\n"
  },
  {
    "path": "examples/ballerina/hello_with_conf/kubeless.toml",
    "content": "[hello]\nuserid=\"john@ballerina.com\"\n"
  },
  {
    "path": "examples/ballerina/helloget.bal",
    "content": "import kubeless/kubeless;\n\npublic function foo(kubeless:Event event, kubeless:Context context) returns (string|error) {\n    return \"Hello World Ballerina\";\n}\n"
  },
  {
    "path": "examples/ballerina/hellowithdata.bal",
    "content": "import kubeless/kubeless;\nimport ballerina/io;\n\npublic function foo(kubeless:Event event, kubeless:Context context) returns (string|error) {\n    io:println(event);\n    io:println(context);\n    return <string>event.data;\n}\n"
  },
  {
    "path": "examples/dotnetcore/dependency-yaml.cs",
    "content": "using System;\r\nusing Kubeless.Functions;\r\nusing YamlDotNet.Serialization;\r\n\r\npublic class module\r\n{\r\n    public string handler(Event k8Event, Context k8Context)\r\n    {\r\n        var person = new Person()\r\n        {\r\n            Name = \"Michael J. Fox\",\r\n            Age = 56\r\n        };\r\n\r\n        var serializer = new SerializerBuilder().Build();\r\n        return serializer.Serialize(person); // yaml\r\n    }\r\n}\r\n\r\npublic class Person\r\n{\r\n    public string Name { get; set; }\r\n    public int Age { get; set; }\r\n}"
  },
  {
    "path": "examples/dotnetcore/dependency-yaml.csproj",
    "content": "<Project Sdk=\"Microsoft.NET.Sdk\">\n\n  <PropertyGroup>\n    <TargetFramework>netstandard2.0</TargetFramework>\n  </PropertyGroup>\n\n  <ItemGroup>\n    <PackageReference Include=\"Kubeless.Functions\" Version=\"0.1.1\" />\n    <PackageReference Include=\"YamlDotNet\" Version=\"4.3.1\" />\n  </ItemGroup>\n\n</Project>\n"
  },
  {
    "path": "examples/dotnetcore/fibonacci.cs",
    "content": "using System;\r\nusing Kubeless.Functions;\r\n\r\npublic class module\r\n{\r\n    public int handler(Event k8Event, Context k8Context)\r\n    {\r\n        var n = int.Parse(k8Event.Data.ToString());\r\n\r\n        return fibonacci(n);\r\n    }\r\n    \r\n    public int fibonacci(int n)\r\n    {\r\n        if ((n == 0) || (n == 1))\r\n            return n;\r\n        return fibonacci(n - 1) + fibonacci(n - 2);\r\n    }\r\n}"
  },
  {
    "path": "examples/dotnetcore/fibonacci.csproj",
    "content": "<Project Sdk=\"Microsoft.NET.Sdk\">\n\n  <PropertyGroup>\n    <TargetFramework>netstandard2.0</TargetFramework>\n  </PropertyGroup>\n\n  <ItemGroup>\n    <PackageReference Include=\"Kubeless.Functions\" Version=\"0.1.1\" />\n  </ItemGroup>\n\n</Project>\n"
  },
  {
    "path": "examples/dotnetcore/helloget.cs",
    "content": "using System;\nusing Kubeless.Functions;\n\npublic class module\n{\n    public string handler(Event k8Event, Context k8Context)\n    {\n        return \"hello world\";\n    }\n}\n"
  },
  {
    "path": "examples/dotnetcore/helloget.csproj",
    "content": "<Project Sdk=\"Microsoft.NET.Sdk\">\n\n  <PropertyGroup>\n    <TargetFramework>netstandard2.0</TargetFramework>\n  </PropertyGroup>\n\n  <ItemGroup>\n    <PackageReference Include=\"Kubeless.Functions\" Version=\"0.1.1\" />\n  </ItemGroup>\n\n</Project>\n"
  },
  {
    "path": "examples/dotnetcore/hellowithdata.cs",
    "content": "using System;\nusing Kubeless.Functions;\n\npublic class module\n{\n    public object handler(Event k8Event, Context k8Context)\n    {\n        return k8Event.Data;\n    }\n}\n"
  },
  {
    "path": "examples/dotnetcore/hellowithdata.csproj",
    "content": "<Project Sdk=\"Microsoft.NET.Sdk\">\n\n  <PropertyGroup>\n    <TargetFramework>netstandard2.0</TargetFramework>\n  </PropertyGroup>\n\n  <ItemGroup>\n    <PackageReference Include=\"Kubeless.Functions\" Version=\"0.1.1\" />\n  </ItemGroup>\n\n</Project>\n"
  },
  {
    "path": "examples/golang/go.mod",
    "content": "module function\n\ngo 1.14\n\nrequire (\n\tgithub.com/sirupsen/logrus v1.6.0\n)\n"
  },
  {
    "path": "examples/golang/helloget.go",
    "content": "package kubeless\n\nimport (\n\t\"github.com/kubeless/kubeless/pkg/functions\"\n)\n\n// Foo sample function\nfunc Foo(event functions.Event, context functions.Context) (string, error) {\n\treturn \"Hello world!\", nil\n}\n"
  },
  {
    "path": "examples/golang/hellowithdata.go",
    "content": "package kubeless\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/kubeless/kubeless/pkg/functions\"\n)\n\n// Handler sample function with data\nfunc Handler(event functions.Event, context functions.Context) (string, error) {\n\tfmt.Println(event)\n\treturn event.Data, nil\n}\n"
  },
  {
    "path": "examples/golang/hellowithdeps.go",
    "content": "package kubeless\n\nimport (\n\t\"github.com/kubeless/kubeless/pkg/functions\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// Hello sample function with dependencies\nfunc Hello(event functions.Event, context functions.Context) (string, error) {\n\tlogrus.Info(event.Data)\n\treturn \"Hello world!\", nil\n}\n"
  },
  {
    "path": "examples/java/HelloGet.java",
    "content": "package io.kubeless;\n\nimport io.kubeless.Event;\nimport io.kubeless.Context;\n\npublic class Foo {\n    public String foo(io.kubeless.Event event, io.kubeless.Context context) {\n        return \"Hello world!\";\n    }\n}\n"
  },
  {
    "path": "examples/java/HelloWithData.java",
    "content": "package io.kubeless;\n\nimport io.kubeless.Event;\nimport io.kubeless.Context;\n\npublic class Foo {\n    public String foo(io.kubeless.Event event, io.kubeless.Context context) {\n        System.out.println(event.Data);\n        return event.Data;\n    }\n}\n"
  },
  {
    "path": "examples/java/HelloWithDeps.java",
    "content": "package io.kubeless;\n\nimport io.kubeless.Event;\nimport io.kubeless.Context;\n\nimport org.joda.time.LocalTime;\n\npublic class Hello {\n    public String sayHello(io.kubeless.Event event, io.kubeless.Context context) {\n        System.out.println(event.Data);\n        LocalTime currentTime = new LocalTime();\n        return \"Hello world! Current local time is: \" + currentTime;\n    }\n}\n"
  },
  {
    "path": "examples/java/pom.xml",
    "content": "<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n  <modelVersion>4.0.0</modelVersion>\n  <artifactId>function</artifactId>\n  <name>function</name>\n  <version>1.0-SNAPSHOT</version>\n  <dependencies>\n     <dependency>\n       <groupId>joda-time</groupId>\n       <artifactId>joda-time</artifactId>\n       <version>2.9.2</version>\n     </dependency>\n      <dependency>\n          <groupId>io.kubeless</groupId>\n          <artifactId>params</artifactId>\n          <version>1.0-SNAPSHOT</version>\n      </dependency>\n  </dependencies>\n  <parent>\n    <groupId>io.kubeless</groupId>\n    <artifactId>kubeless</artifactId>\n    <version>1.0-SNAPSHOT</version>\n  </parent>\n</project>\n"
  },
  {
    "path": "examples/jvm/Readme.md",
    "content": "# JVM exampels\nThese are examples to run compiled jvm code in kubeless.\nThey should serve as a template to be able to use other languages."
  },
  {
    "path": "examples/jvm/java/Readme.md",
    "content": "# Java on runtime JVM\n\n`gradle shadowJar` Build the jar with all deps\n\n`kubeless function deploy test --runtime jvm1.8 --from-file build/libs/jvm-test-0.1-all.jar --handler io_ino_Handler.sayHello` The package name use `_` instead of `.` for the path.\n"
  },
  {
    "path": "examples/jvm/java/build.gradle",
    "content": "buildscript {\n    repositories {\n        jcenter()\n    }\n    dependencies {\n        classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.4'\n    }\n}\n\napply plugin: 'java'\napply plugin: 'com.github.johnrengelman.shadow'\nversion = '0.1'\njar {\n    manifest {\n        attributes 'Implementation-Title': 'jvm-test',\n                   'Implementation-Version': version\n    }\n}\n\n\nrepositories {\n    mavenCentral()\n}\n\ndependencies {\n    compile group: 'log4j', name: 'log4j', version: '1.2.17'\n    compile group: 'de.inoio.kubeless', name: 'jvm-runtime', version: '0.1'\n\n    testCompile group: 'junit', name: 'junit', version: '4.+'\n}"
  },
  {
    "path": "examples/jvm/java/src/main/java/io/ino/Handler.java",
    "content": "package io.ino;\n\npublic class Handler {\n        public String sayHello(io.kubeless.Event event, io.kubeless.Context context) {\n            System.out.println(event.toString());\n            return \"Hello world! AFDFCH\";\n        }\n}"
  },
  {
    "path": "examples/jvm/scala/Readme.md",
    "content": "# Scala on runtime JVM\n\n!! WIP the jar-file is to large for the storage backend, you have to pass a url to the jar file.\n\n`sbt assembly` Build the jar with all deps\n\n`kubeless function deploy testscala --runtime jvm1.8 --from-file target/scala-2.12/scala-test.jar --handler de_inoio_Handler.fooBar` The package name use `_` instead of `.` for the path.\n"
  },
  {
    "path": "examples/jvm/scala/build.sbt",
    "content": "\nassemblyJarName in assembly := \"scala-test.jar\"\norganization := \"de.inoio\"\nscalaVersion := \"2.12.1\"\nlibraryDependencies += \"de.inoio.kubeless\" % \"jvm-runtime\" % \"0.1\""
  },
  {
    "path": "examples/jvm/scala/project/assembly.sbt",
    "content": "addSbtPlugin(\"com.eed3si9n\" % \"sbt-assembly\" % \"0.14.7\")\n"
  },
  {
    "path": "examples/jvm/scala/project/build.properties",
    "content": "sbt.version=0.13.15\n"
  },
  {
    "path": "examples/jvm/scala/src/main/scala/de/inoio/Handler.scala",
    "content": "package de.inoio\n\nimport io.kubeless.{Context, Event}\n\nclass Handler {\n  def fooBar(event: Event, context: Context): String = {\n    \"FOO Bar aus Hamburg\"\n  }\n}\n"
  },
  {
    "path": "examples/nodejs/function.yaml",
    "content": "---\napiVersion: kubeless.io/v1beta1\nkind: Function\nmetadata:\n  name: hello\nspec:\n  handler: handler.hello\n  runtime: nodejs6\n  function: |\n      module.exports = {\n        hello: function(event, context) {\n          return 'Hello World'\n        }\n      }\n"
  },
  {
    "path": "examples/nodejs/function1.yaml",
    "content": "---\napiVersion: kubeless.io/v1beta1\nkind: Function\nmetadata:\n  name: hello\nspec:\n  handler: handler.foobar\n  runtime: nodejs8\n  function: |\n      module.exports = {\n        foobar: function (event, context) {\n          return(event.data)\n        }\n      }\n"
  },
  {
    "path": "examples/nodejs/helloget.js",
    "content": "module.exports = {\n  foo: function (event, context) {\n    return 'hello world!';\n  }\n}\n"
  },
  {
    "path": "examples/nodejs/hellostream.js",
    "content": "const from = require('from2');\nconst eos = require('end-of-stream');\n\nfunction fromString(string) {\n    return from(function(size, next) {\n        if (string.length <= 0) return next(null, null);\n\n        const chunk = string.slice(0, size);\n        string = string.slice(size);\n\n        next(null, chunk);\n  });\n}\n\nmodule.exports = {\n    foo: function(event, context) {\n        return new Promise((resolve, reject) => {\n            const {response} = event.extensions;\n            const stream = fromString('hello world!');\n\n            eos(stream, err => err ? reject(err) : resolve(stream));\n\n            response.setHeader('Content-Type', 'text/event-stream; charset=utf-8');\n            stream.pipe(response);\n        });\n    }\n}\n"
  },
  {
    "path": "examples/nodejs/hellowithdata.js",
    "content": "module.exports = {\n  handler: (event, context) => {\n    console.log(event);\n    return event.data;\n  },\n};\n"
  },
  {
    "path": "examples/nodejs/hellowithdeps.js",
    "content": "'use strict';\n\nconst _ = require('lodash');\n\nmodule.exports = {\n    handler: (event, context) => {\n        _.assign(event.data, {date: new Date().toTimeString()})\n        return JSON.stringify(event.data);\n    },\n};\n"
  },
  {
    "path": "examples/nodejs/index.js",
    "content": "'use strict';\n\nmodule.exports = {\n    helloGet: require('./helloget').foo,\n    helloWithData: require('./hellowithdata').handler,\n}\n"
  },
  {
    "path": "examples/nodejs/package.json",
    "content": "{\n    \"name\": \"hellowithdeps\",\n    \"version\": \"0.0.1\",\n    \"dependencies\": {\n        \"end-of-stream\": \"^1.4.1\",\n        \"from2\": \"^2.3.0\",\n        \"lodash\": \"^4.17.5\"\n    }\n}\n"
  },
  {
    "path": "examples/php/composer.json",
    "content": "{\n  \"require\": {\n      \"monolog/monolog\": \"^1.23\"\n  }\n}\n"
  },
  {
    "path": "examples/php/helloget.php",
    "content": "<?php\n\nfunction foo($event, $context) {\n  return \"hello world\";\n}\n\n"
  },
  {
    "path": "examples/php/hellowithdata.php",
    "content": "<?php\n\nfunction foo($event, $context) {\n  return json_encode($event->data);\n}\n\n"
  },
  {
    "path": "examples/php/hellowithdeps.php",
    "content": "<?php\n\nrequire 'vendor/autoload.php';\n\nuse Monolog\\Logger;\nuse Monolog\\Handler\\StreamHandler;\n\nfunction foo($event, $context) {\n  // create a log channel\n  $log = new Logger('name');\n  $log->pushHandler(new StreamHandler(\"php://stdout\", Logger::INFO));\n\n  // add records to the log\n  $log->info('Hello');\n  $log->info('World');\n  return \"hello world\";\n}\n"
  },
  {
    "path": "examples/python/Dockerfile",
    "content": "# Create a custom image with a python function\nFROM kubeless/python@sha256:565bebecb08d9a7b804c588105677a3572f10ff2032cef7727975061a653fb98\nENV FUNC_HANDLER=foo \\\n    MOD_NAME=helloget\nADD helloget.py /\nRUN mkdir -p /kubeless/\nRUN chown 1000:1000 /kubeless\nENTRYPOINT [ \"bash\", \"-c\", \"cp /helloget.py /kubeless/ && python3.7 /kubeless.py\"]\n"
  },
  {
    "path": "examples/python/function.yaml",
    "content": "---\napiVersion: k8s.io/v1\nkind: Function\nmetadata:\n  name: function\nspec:\n  handler: hello.handler\n  runtime: python3.7\n  function: |\n      import json\n      def handler():\n              return \"hello world\"\n"
  },
  {
    "path": "examples/python/function1.yaml",
    "content": "---\napiVersion: kubeless.io/v1beta1\nkind: Function\nmetadata:\n  name: function1\nspec:\n  handler: hello.foobar\n  runtime: python3.7\n  deps: |\n    cowpy\n  function: |\n      import time\n      import random\n      from cowpy import cow\n      def foobar():\n        # NB: delay will be negative and sleep will raise an error\n        # occasionally.  This is a feature for demoing errors.\n        delay = random.normalvariate(0.3, 0.2)\n        time.sleep(delay)\n        msg = \"hello world - with a %0.2fs artificial delay\" % delay\n        c = cow.get_cow()\n        return c().milk(msg)\n"
  },
  {
    "path": "examples/python/helloget.py",
    "content": "def foo(event, context):\n    return \"hello world\"\n"
  },
  {
    "path": "examples/python/hellowithdata.py",
    "content": "def handler(event, context):\n    print(event)\n    return event['data']\n"
  },
  {
    "path": "examples/python/hellowithdeps.py",
    "content": "from hellowithdepshelper import foo"
  },
  {
    "path": "examples/python/hellowithdepshelper.py",
    "content": "from bs4 import BeautifulSoup\nimport urllib.request\n\ndef foo(event, context):\n    page = urllib.request.urlopen(\"https://www.google.com/\").read()\n    soup = BeautifulSoup(page, 'html.parser')\n    return soup.title.string\n"
  },
  {
    "path": "examples/python/requirements.txt",
    "content": "bs4"
  },
  {
    "path": "examples/ruby/Gemfile",
    "content": "source 'https://rubygems.org'\n\ngem 'logging'\n"
  },
  {
    "path": "examples/ruby/function.yaml",
    "content": "---\napiVersion: kubeless.io/v1beta1\nkind: Function\nmetadata:\n  name: function\nspec:\n  handler: test.run\n  runtime: ruby2.4\n  function: |\n    # Obtains the latest Kubeless release published\n    def run(event, context)\n      require \"net/https\"\n      require \"uri\"\n      require \"json\"\n\n      # Fetch release info\n      uri = URI.parse(\"https://api.github.com/repos/kubeless/kubeless/releases\")\n      http = Net::HTTP.new(uri.host, uri.port)\n      request = Net::HTTP::Get.new(uri.request_uri)\n      http.use_ssl = true\n      response = http.request(request)\n\n      # Parse response\n      output = JSON.parse(response.body)\n\n      # Create a Hash for output\n      output_hash = { version: output.first['name'] }\n\n      # Print the stuff (JSON)\n      puts JSON.pretty_generate(output_hash)\n\n      return output_hash[:version]\n    end\n"
  },
  {
    "path": "examples/ruby/helloget.rb",
    "content": "def foo(event, context)\n  \"hello world\"\nend\n"
  },
  {
    "path": "examples/ruby/hellowithdata.rb",
    "content": "def handler(event, context)\n  puts event\n  JSON.generate(event[:data])\nend\n"
  },
  {
    "path": "examples/ruby/hellowithdeps.rb",
    "content": "require 'logging'\n\ndef foo(event, context)\n  logging = Logging.logger(STDOUT)\n  logging.info \"it works!\"\n  \"hello world\"\nend\n"
  },
  {
    "path": "examples/ruby/latest.rb",
    "content": "# Obtains the latest Kubeless release published \ndef handler(event, context)\n  require \"net/https\"\n  require \"uri\"\n  require \"json\"\n\n  # Fetch release info\n  uri = URI.parse(\"https://api.github.com/repos/kubeless/kubeless/releases\")\n  http = Net::HTTP.new(uri.host, uri.port)\n  request = Net::HTTP::Get.new(uri.request_uri)\n  http.use_ssl = true\n  response = http.request(request)\n\n  # Parse response\n  output = JSON.parse(response.body)\n\n  # Create a Hash for output \n  output_hash = { version: output.first['name'] }\n\n  # Print the stuff (JSON)\n  puts JSON.pretty_generate(output_hash)\nend\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/kubeless/kubeless\n\ngo 1.12\n\nrequire (\n\tgithub.com/Azure/go-autorest v8.0.0+incompatible // indirect\n\tgithub.com/aws/aws-sdk-go v1.16.26\n\tgithub.com/coreos/prometheus-operator v0.0.0-20171201110357-197eb012d973\n\tgithub.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect\n\tgithub.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32\n\tgithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b\n\tgithub.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect\n\tgithub.com/googleapis/gnostic v0.2.0 // indirect\n\tgithub.com/gophercloud/gophercloud v0.0.0-20190130105114-cc9c99918988 // indirect\n\tgithub.com/gosuri/uitable v0.0.0-20160404203958-36ee7e946282\n\tgithub.com/imdario/mergo v0.3.7\n\tgithub.com/kubeless/cronjob-trigger v1.0.2\n\tgithub.com/kubeless/http-trigger v1.0.0\n\tgithub.com/kubeless/kafka-trigger v1.0.1\n\tgithub.com/kubeless/kinesis-trigger v0.0.0-20180817123215-a548c3d1cbd9\n\tgithub.com/kubeless/nats-trigger v0.0.0-20180817123246-372a5fa547dc\n\tgithub.com/mattn/go-runewidth v0.0.4 // indirect\n\tgithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect\n\tgithub.com/nats-io/gnatsd v1.4.1 // indirect\n\tgithub.com/nats-io/go-nats v1.7.0\n\tgithub.com/nats-io/nkeys v0.0.2 // indirect\n\tgithub.com/nats-io/nuid v1.0.0 // indirect\n\tgithub.com/pkg/errors v0.8.1 // indirect\n\tgithub.com/prometheus/client_golang v0.9.3\n\tgithub.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect\n\tgithub.com/prometheus/common v0.4.0\n\tgithub.com/robfig/cron v0.0.0-20180505203441-b41be1df6967\n\tgithub.com/sirupsen/logrus v1.2.0\n\tgithub.com/spf13/cobra v1.1.1\n\tgolang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d // indirect\n\tgolang.org/x/net v0.0.0-20190620200207-3b0461eec859\n\tgopkg.in/yaml.v2 v2.4.0 // indirect\n\tk8s.io/api v0.0.0-20180308224125-73d903622b73\n\tk8s.io/apiextensions-apiserver v0.0.0-20180327033742-750feebe2038\n\tk8s.io/apimachinery v0.0.0-20180228050457-302974c03f7e\n\tk8s.io/client-go v7.0.0+incompatible\n)\n"
  },
  {
    "path": "go.sum",
    "content": "cloud.google.com/go v0.0.0-20160913182117-3b1ae45394a2/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.35.1 h1:LMe/Btq0Eijsc97JyBwMc0KMXOe0orqAMdg7/EkywN8=\ncloud.google.com/go v0.35.1/go.mod h1:wfjPZNvXCBYESy3fIynybskMP48KVPrjSPCnXiK7Prg=\ncloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=\ncloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=\ncloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=\ncloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=\ncloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y=\ncloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=\ncloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=\ncloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=\ncloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=\ncloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=\ncloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=\ndmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=\ndmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=\ndmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=\ndmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=\ndmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=\ngit.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=\ngithub.com/Azure/go-autorest v8.0.0+incompatible h1:lgmv/yX7Zgt1TJEYG8DHCqc0zw5FkYevByNVIm77JNM=\ngithub.com/Azure/go-autorest v8.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=\ngithub.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=\ngithub.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=\ngithub.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=\ngithub.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=\ngithub.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=\ngithub.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=\ngithub.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=\ngithub.com/aws/aws-sdk-go v1.16.26 h1:GWkl3rkRO/JGRTWoLLIqwf7AWC4/W/1hMOUZqmX0js4=\ngithub.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=\ngithub.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=\ngithub.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=\ngithub.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=\ngithub.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=\ngithub.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=\ngithub.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=\ngithub.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=\ngithub.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=\ngithub.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=\ngithub.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=\ngithub.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=\ngithub.com/coreos/prometheus-operator v0.0.0-20171201110357-197eb012d973 h1:7a78CgFQnnKoQomLoxGgKMaUp7QO9amd/IrifrECbmY=\ngithub.com/coreos/prometheus-operator v0.0.0-20171201110357-197eb012d973/go.mod h1:SO+r5yZUacDFPKHfPoUjI3hMsH+ZUdiuNNhuSq3WoSg=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=\ngithub.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=\ngithub.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=\ngithub.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=\ngithub.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=\ngithub.com/emicklei/go-restful-swagger12 v0.0.0-20170208215640-dcef7f557305/go.mod h1:qr0VowGBT4CS4Q8vFF8BSeKz34PuqKGxs/L0IAQA9DQ=\ngithub.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=\ngithub.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=\ngithub.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=\ngithub.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=\ngithub.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=\ngithub.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=\ngithub.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew=\ngithub.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I=\ngithub.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=\ngithub.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=\ngithub.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=\ngithub.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=\ngithub.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=\ngithub.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=\ngithub.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=\ngithub.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=\ngithub.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=\ngithub.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=\ngithub.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=\ngithub.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=\ngithub.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=\ngithub.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=\ngithub.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=\ngithub.com/golang/glog v0.0.0-20141105023935-44145f04b68c/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=\ngithub.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=\ngithub.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=\ngithub.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=\ngithub.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=\ngithub.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=\ngithub.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=\ngithub.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=\ngithub.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=\ngithub.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=\ngithub.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=\ngithub.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=\ngithub.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=\ngithub.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=\ngithub.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=\ngithub.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=\ngithub.com/gophercloud/gophercloud v0.0.0-20190130105114-cc9c99918988 h1:fajr0WpQtCjYtwtH5zivs/sXvMcPcT/ebx+HdyD11NA=\ngithub.com/gophercloud/gophercloud v0.0.0-20190130105114-cc9c99918988/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=\ngithub.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=\ngithub.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=\ngithub.com/gosuri/uitable v0.0.0-20160404203958-36ee7e946282 h1:KFqmdzEPbU7Uck2tn50t+HQXZNVkxe8M9qRb/ZoSHaE=\ngithub.com/gosuri/uitable v0.0.0-20160404203958-36ee7e946282/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=\ngithub.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=\ngithub.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=\ngithub.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=\ngithub.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=\ngithub.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=\ngithub.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=\ngithub.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=\ngithub.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=\ngithub.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=\ngithub.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=\ngithub.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=\ngithub.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=\ngithub.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=\ngithub.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=\ngithub.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=\ngithub.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=\ngithub.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=\ngithub.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=\ngithub.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=\ngithub.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=\ngithub.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=\ngithub.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=\ngithub.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=\ngithub.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=\ngithub.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c h1:kQWxfPIHVLbgLzphqk3QUflDy9QdksZR4ygR807bpy0=\ngithub.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs=\ngithub.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=\ngithub.com/imdario/mergo v0.0.0-20180119215619-163f41321a19/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=\ngithub.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=\ngithub.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=\ngithub.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=\ngithub.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=\ngithub.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=\ngithub.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=\ngithub.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=\ngithub.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=\ngithub.com/json-iterator/go v0.0.0-20170829155851-36b14963da70/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE=\ngithub.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=\ngithub.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=\ngithub.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=\ngithub.com/juju/ratelimit v0.0.0-20170523012141-5b9ff8664717/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=\ngithub.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=\ngithub.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=\ngithub.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=\ngithub.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kubeless/cronjob-trigger v1.0.2 h1:/hAkCMY7dTeP8oPo2lmPWmuEdQVcydPmUc10EfwGYaQ=\ngithub.com/kubeless/cronjob-trigger v1.0.2/go.mod h1:Ktn0pfVcg2EG6XoV7MNBlsiyKm/RyUu87oRqdpMR1qM=\ngithub.com/kubeless/http-trigger v1.0.0 h1:CciPHVu1Rf8oi67GOdMmhySILHYxxQDndiDDm+VoYfw=\ngithub.com/kubeless/http-trigger v1.0.0/go.mod h1:a3DdjXl1CXccRLyiM4BeYpwW4Pt6q2viEm0mI6Wvaps=\ngithub.com/kubeless/kafka-trigger v1.0.1 h1:XcKCe92i/+hww8fb2gNNAgCBKQNzuing9h97d1G8Jeg=\ngithub.com/kubeless/kafka-trigger v1.0.1/go.mod h1:giDA+x4a/T6o0vWhHvZkas6N4B/cMjOv7fb3hnorMUI=\ngithub.com/kubeless/kinesis-trigger v0.0.0-20180817123215-a548c3d1cbd9 h1:D+VuPkR46FGkP2dvH49fTF0dF/+Kz98H3Wy9BD3+ZGg=\ngithub.com/kubeless/kinesis-trigger v0.0.0-20180817123215-a548c3d1cbd9/go.mod h1:Zz4cU6vaCS71yy+DpAx3/Y2HeV0RsJ3f+/5WCSeYq24=\ngithub.com/kubeless/kubeless v1.0.0-alpha.6/go.mod h1:eBSqNpFBgiemDH1gmDcIndBDbGgoZJobww4ZaFK9N1k=\ngithub.com/kubeless/nats-trigger v0.0.0-20180817123246-372a5fa547dc h1:64KDKAkb6xOt+Os36M9nblvBdWpMKNsQdc542wurU0E=\ngithub.com/kubeless/nats-trigger v0.0.0-20180817123246-372a5fa547dc/go.mod h1:VgX8QhZAcW/DUMyMZbdfodjzLfZCZwSxd2q8XKNi1rs=\ngithub.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=\ngithub.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=\ngithub.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=\ngithub.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=\ngithub.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=\ngithub.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=\ngithub.com/matttproud/golang_protobuf_extensions v0.0.0-20150406173934-fc2b8d3a73c4/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=\ngithub.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=\ngithub.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=\ngithub.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=\ngithub.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=\ngithub.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=\ngithub.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=\ngithub.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=\ngithub.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=\ngithub.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=\ngithub.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=\ngithub.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=\ngithub.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/nats-io/gnatsd v1.4.1 h1:RconcfDeWpKCD6QIIwiVFcvForlXpWeJP7i5/lDLy44=\ngithub.com/nats-io/gnatsd v1.4.1/go.mod h1:nqco77VO78hLCJpIcVfygDP2rPGfsEHkGTUk94uh5DQ=\ngithub.com/nats-io/go-nats v1.7.0 h1:oQOfHcLr8hb43QG8yeVyY2jtarIaTjOv41CGdF3tTvQ=\ngithub.com/nats-io/go-nats v1.7.0/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0=\ngithub.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M=\ngithub.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=\ngithub.com/nats-io/nuid v1.0.0 h1:44QGdhbiANq8ZCbUkdn6W5bqtg+mHuDE4wOUuxxndFs=\ngithub.com/nats-io/nuid v1.0.0/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=\ngithub.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=\ngithub.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=\ngithub.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=\ngithub.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=\ngithub.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=\ngithub.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=\ngithub.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=\ngithub.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=\ngithub.com/pkg/errors v0.0.0-20180311214515-816c9085562c/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=\ngithub.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=\ngithub.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=\ngithub.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno=\ngithub.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=\ngithub.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8=\ngithub.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=\ngithub.com/prometheus/client_model v0.0.0-20150212101744-fa8ad6fec335/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=\ngithub.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=\ngithub.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=\ngithub.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/common v0.0.0-20170427095455-13ba4ddd0caa/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=\ngithub.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=\ngithub.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=\ngithub.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU=\ngithub.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=\ngithub.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM=\ngithub.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=\ngithub.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=\ngithub.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=\ngithub.com/prometheus/procfs v0.0.0-20190129233650-316cf8ccfec5 h1:Etei0Wx6pooT/DeOKcGTr1M/01ggz95Ajq8BBwCOKBU=\ngithub.com/prometheus/procfs v0.0.0-20190129233650-316cf8ccfec5/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=\ngithub.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY=\ngithub.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=\ngithub.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=\ngithub.com/robfig/cron v0.0.0-20180505203441-b41be1df6967 h1:x7xEyJDP7Hv3LVgvWhzioQqbC/KtuUhTigKlH/8ehhE=\ngithub.com/robfig/cron v0.0.0-20180505203441-b41be1df6967/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=\ngithub.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=\ngithub.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=\ngithub.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=\ngithub.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=\ngithub.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=\ngithub.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=\ngithub.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=\ngithub.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=\ngithub.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=\ngithub.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=\ngithub.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=\ngithub.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=\ngithub.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=\ngithub.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=\ngithub.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=\ngithub.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=\ngithub.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=\ngithub.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=\ngithub.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=\ngithub.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=\ngithub.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=\ngithub.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=\ngithub.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=\ngithub.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=\ngithub.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=\ngithub.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=\ngithub.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=\ngithub.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=\ngithub.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=\ngithub.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=\ngithub.com/sirupsen/logrus v0.0.0-20180129181852-768a92a02685/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=\ngithub.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=\ngithub.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=\ngithub.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=\ngithub.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=\ngithub.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=\ngithub.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=\ngithub.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=\ngithub.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=\ngithub.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=\ngithub.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=\ngithub.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=\ngithub.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=\ngithub.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=\ngithub.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=\ngithub.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=\ngithub.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=\ngithub.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\ngithub.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=\ngithub.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\ngithub.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=\ngithub.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=\ngithub.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=\ngithub.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=\ngithub.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=\ngithub.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=\ngithub.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=\ngo.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=\ngo.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=\ngo.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=\ngo.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=\ngo.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=\ngo.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=\ngo.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=\ngo4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=\ngolang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=\ngolang.org/x/crypto v0.0.0-20170825220121-81e90905daef/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20190130090550-b01c7a725664 h1:YbZJ76lQ1BqNhVe7dKTSB67wDrc2VPRR75IyGyyPDX8=\ngolang.org/x/crypto v0.0.0-20190130090550-b01c7a725664/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU=\ngolang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=\ngolang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=\ngolang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=\ngolang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=\ngolang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=\ngolang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=\ngolang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=\ngolang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=\ngolang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY=\ngolang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/oauth2 v0.0.0-20170412232759-a6bd8cefa181/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1 h1:VeAkjQVzKLmu+JnFcK96TPbkuaTIqwGGAzQ9hgwPjVg=\ngolang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=\ngolang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=\ngolang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190130150945-aca44879d564 h1:o6ENHFwwr1TZ9CUPQcfo1HGvLP1OPsPOTB7xCIOPNmU=\ngolang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc=\ngolang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=\ngolang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=\ngolang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=\ngolang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=\ngolang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=\ngolang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngoogle.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=\ngoogle.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=\ngoogle.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=\ngoogle.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=\ngoogle.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=\ngoogle.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=\ngoogle.golang.org/genproto v0.0.0-20190122154452-ba6ebe99b011/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=\ngoogle.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=\ngoogle.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=\ngoogle.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=\ngoogle.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=\ngoogle.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=\ngopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=\ngopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=\ngopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=\ngopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=\ngopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=\ngopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=\ngopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=\ngopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=\ngopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=\ngopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=\ngopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=\ngopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=\ngopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=\ngopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=\ngrpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=\nhonnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=\nk8s.io/api v0.0.0-20180103175015-389dfa299845/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=\nk8s.io/api v0.0.0-20180308224125-73d903622b73 h1:5Z+PFfTIOXwKmOhQtZ0WBykbpGBBOuvbDx2YNAqIoYc=\nk8s.io/api v0.0.0-20180308224125-73d903622b73/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=\nk8s.io/apiextensions-apiserver v0.0.0-20180103181712-d0becfa6529e/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=\nk8s.io/apiextensions-apiserver v0.0.0-20180327033742-750feebe2038 h1:VcfogrrvSU1RneMsMUOMf+1o5fN+SFcSrMw3I/yv3LU=\nk8s.io/apiextensions-apiserver v0.0.0-20180327033742-750feebe2038/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=\nk8s.io/apimachinery v0.0.0-20180103174757-bc110fd540ab/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=\nk8s.io/apimachinery v0.0.0-20180228050457-302974c03f7e h1:CsgbEA8905OlpVLNKWD4GacPex50kFbqhotVNPew+dU=\nk8s.io/apimachinery v0.0.0-20180228050457-302974c03f7e/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=\nk8s.io/client-go v5.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=\nk8s.io/client-go v7.0.0+incompatible h1:kiH+Y6hn+pc78QS/mtBfMJAMIIaWevHi++JvOGEEQp4=\nk8s.io/client-go v7.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=\nk8s.io/kube-openapi v0.0.0-20170830100654-868f2f29720b/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=\nrsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=\nsourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=\nsourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=\n"
  },
  {
    "path": "hack/boilerplate.go.txt",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n"
  },
  {
    "path": "hack/update-codegen.sh",
    "content": "#!/bin/bash\n\n# Copyright 2017 The Kubernetes Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\nSCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..\nCODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}\n\n# generate the code with:\n# --output-base    because this script should also be able to run inside the vendor dir of\n#                  k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir\n#                  instead of the $GOPATH directly. For normal projects this can be dropped.\n\n### Workaround for issue: https://github.com/kubernetes/code-generator/issues/6\nmkdir -p ${GOPATH}/src/k8s.io/kubernetes/hack/boilerplate \ncp ${SCRIPT_ROOT}/hack/boilerplate.go.txt ${GOPATH}/src/k8s.io/kubernetes/hack/boilerplate/\n\n${CODEGEN_PKG}/generate-groups.sh \"deepcopy,client,informer,lister\" \\\n  github.com/kubeless/kubeless/pkg/client github.com/kubeless/kubeless/pkg/apis \\\n  kubeless:v1beta1 \n"
  },
  {
    "path": "kubeless-non-rbac.jsonnet",
    "content": "local k = import \"ksonnet.beta.1/k.libsonnet\";\nlocal runtimesSrc = import \"runtimes.jsonnet\";\n\nlocal objectMeta = k.core.v1.objectMeta;\nlocal deployment = k.apps.v1beta1.deployment;\nlocal container = k.core.v1.container;\nlocal service = k.core.v1.service;\nlocal serviceAccount = k.core.v1.serviceAccount;\nlocal configMap = k.core.v1.configMap;\n\nlocal namespace = \"kubeless\";\nlocal controller_account_name = \"controller-acct\";\n\nlocal controllerEnv = [\n  {\n    name: \"KUBELESS_INGRESS_ENABLED\",\n    valueFrom: {configMapKeyRef: {\"name\": \"kubeless-config\", key: \"ingress-enabled\"}}\n  },\n  {\n    name: \"KUBELESS_SERVICE_TYPE\",\n    valueFrom: {configMapKeyRef: {\"name\": \"kubeless-config\", key: \"service-type\"}}\n    },\n  {\n     name: \"KUBELESS_NAMESPACE\",\n     valueFrom: {fieldRef: {fieldPath: \"metadata.namespace\"}}\n   },\n   {\n     name: \"KUBELESS_CONFIG\",\n     value: \"kubeless-config\"\n   },\n];\n\nlocal functionControllerContainer =\n  container.default(\"kubeless-function-controller\", \"kubeless/function-controller:latest\") +\n  container.imagePullPolicy(\"IfNotPresent\") +\n  container.env(controllerEnv);\n\nlocal httpTriggerControllerContainer =\n  container.default(\"http-trigger-controller\", \"kubeless/http-trigger-controller:v1.0.3\") +\n  container.imagePullPolicy(\"IfNotPresent\") +\n  container.env(controllerEnv);\n\nlocal cronjobTriggerContainer =\n  container.default(\"cronjob-trigger-controller\", \"kubeless/cronjob-trigger-controller:v1.0.3\") +\n  container.imagePullPolicy(\"IfNotPresent\") +\n  container.env(controllerEnv);\n\nlocal kubelessLabel = {kubeless: \"controller\"};\n\nlocal controllerAccount =\n  serviceAccount.default(controller_account_name, namespace);\n\nlocal controllerDeployment =\n  deployment.default(\"kubeless-controller-manager\", [functionControllerContainer, httpTriggerControllerContainer, cronjobTriggerContainer], namespace) +\n  {apiVersion: \"apps/v1\"} +\n  {metadata+:{labels: kubelessLabel}} +\n  {spec+: {selector: {matchLabels: kubelessLabel}}} +\n  {spec+: {template+: {spec+: {serviceAccountName: controllerAccount.metadata.name}}}} +\n  {spec+: {template+: {metadata: {labels: kubelessLabel}}}};\n\nlocal crd = [\n  {\n    apiVersion: \"apiextensions.k8s.io/v1beta1\",\n    kind: \"CustomResourceDefinition\",\n    metadata: objectMeta.name(\"functions.kubeless.io\"),\n    spec: {group: \"kubeless.io\", version: \"v1beta1\", scope: \"Namespaced\", names: {plural: \"functions\", singular: \"function\", kind: \"Function\"}},\n  },\n  {\n    apiVersion: \"apiextensions.k8s.io/v1beta1\",\n    kind: \"CustomResourceDefinition\",\n    metadata: objectMeta.name(\"httptriggers.kubeless.io\"),\n    spec: {group: \"kubeless.io\", version: \"v1beta1\", scope: \"Namespaced\", names: {plural: \"httptriggers\", singular: \"httptrigger\", kind: \"HTTPTrigger\"}},\n  },\n  {\n    apiVersion: \"apiextensions.k8s.io/v1beta1\",\n    kind: \"CustomResourceDefinition\",\n    metadata: objectMeta.name(\"cronjobtriggers.kubeless.io\"),\n    spec: {group: \"kubeless.io\", version: \"v1beta1\", scope: \"Namespaced\", names: {plural: \"cronjobtriggers\", singular: \"cronjobtrigger\", kind: \"CronJobTrigger\"}},\n  }\n];\n\nlocal deploymentConfig = '{}';\n\nlocal kubelessConfig  = configMap.default(\"kubeless-config\", namespace) +\n    configMap.data({\"ingress-enabled\": \"false\"}) +\n    configMap.data({\"service-type\": \"ClusterIP\"})+\n    configMap.data({\"deployment\": std.toString(deploymentConfig)})+\n    configMap.data({\"runtime-images\": std.toString(runtimesSrc)})+\n    configMap.data({\"enable-build-step\": \"false\"})+\n    configMap.data({\"function-registry-tls-verify\": \"true\"})+\n    configMap.data({\"provision-image\": \"kubeless/unzip@sha256:e867f9b366ffb1a25f14baf83438db426ced4f7add56137b7300d32507229b5a\"})+\n    configMap.data({\"provision-image-secret\": \"\"})+\n    configMap.data({\"builder-image\": \"kubeless/function-image-builder:latest\"})+\n    configMap.data({\"builder-image-secret\": \"\"});\n\n{\n  controllerAccount: k.util.prune(controllerAccount),\n  controller: k.util.prune(controllerDeployment),\n  crd: k.util.prune(crd),\n  cfg: k.util.prune(kubelessConfig),\n}\n"
  },
  {
    "path": "kubeless-openshift.jsonnet",
    "content": "# Builds on kubeless.ksonnet to produce a deployable manifest on OpenShift 1.5\n# Modifies apiVersion for kubeless-controller Deployment to extensions/v1beta1\n# Modifies ClusterRole and ClusterRoleBinding apiVersions to v1\nlocal k = import \"ksonnet.beta.1/k.libsonnet\";\nlocal kubeless = import \"kubeless.jsonnet\";\n\nlocal config = kubeless.cfg + k.core.v1.configMap.data({\"deployment\":'{\"spec\":{\"template\":{\"spec\":{\"securityContext\":{}}}}}'});\n\nkubeless + {\n  controller: kubeless.controller + { apiVersion: \"extensions/v1beta1\" },\n  controllerClusterRole: kubeless.controllerClusterRole + { apiVersion: \"v1\" },\n  controllerClusterRoleBinding: kubeless.controllerClusterRoleBinding + { apiVersion: \"v1\" },\n  cfg: config,\n}\n"
  },
  {
    "path": "kubeless.jsonnet",
    "content": "# Add RBAC role and binding on top of kubeless.jsonnet, to allow\n# kubeless controller to deploy/update/etc functions on any namespace\nlocal k = import \"ksonnet.beta.1/k.libsonnet\";\nlocal objectMeta = k.core.v1.objectMeta;\n\nlocal kubeless = import \"kubeless-non-rbac.jsonnet\";\nlocal controller_account = kubeless.controller_account;\nlocal controller_roles = [\n  {\n    apiGroups: [\"\"],\n    resources: [\"services\", \"configmaps\"],\n    verbs: [\"create\", \"get\", \"delete\", \"list\", \"update\", \"patch\"],\n  },\n  {\n    apiGroups: [\"apps\", \"extensions\"],\n    resources: [\"deployments\"],\n    verbs: [\"create\", \"get\", \"delete\", \"list\", \"update\", \"patch\"],\n  },\n  {\n    apiGroups: [\"\"],\n    resources: [\"pods\"],\n    verbs: [\"list\", \"delete\"],\n  },\n  {\n    apiGroups: [\"\"],\n    resources: [\"secrets\"],\n    resourceNames: [\"kubeless-registry-credentials\"],\n    verbs: [\"get\"],\n  },\n  {\n    apiGroups: [\"kubeless.io\"],\n    resources: [\"functions\", \"httptriggers\", \"cronjobtriggers\"],\n    verbs: [\"get\", \"list\", \"watch\", \"update\", \"delete\"],\n  },\n  {\n    apiGroups: [\"batch\"],\n    resources: [\"cronjobs\", \"jobs\"],\n    verbs: [\"create\", \"get\", \"delete\", \"deletecollection\", \"list\", \"update\", \"patch\"],\n  },\n  {\n    apiGroups: [\"autoscaling\"],\n    resources: [\"horizontalpodautoscalers\"],\n    verbs: [\"create\", \"get\", \"delete\", \"list\", \"update\", \"patch\"],\n  },\n  {\n    apiGroups: [\"apiextensions.k8s.io\"],\n    resources: [\"customresourcedefinitions\"],\n    verbs: [\"get\", \"list\"],\n  },\n  {\n    apiGroups: [\"monitoring.coreos.com\"],\n    resources: [\"alertmanagers\", \"prometheuses\", \"servicemonitors\"],\n    verbs: [\"*\"],\n  },\n  {\n    apiGroups: [\"extensions\"],\n    resources: [\"ingresses\"],\n    verbs: [\"create\", \"get\", \"list\", \"update\", \"delete\"],\n  },\n];\n\nlocal controllerAccount = kubeless.controllerAccount;\n\nlocal clusterRole(name, rules) = {\n    apiVersion: \"rbac.authorization.k8s.io/v1beta1\",\n    kind: \"ClusterRole\",\n    metadata: objectMeta.name(name),\n    rules: rules,\n};\n\nlocal clusterRoleBinding(name, role, subjects) = {\n    apiVersion: \"rbac.authorization.k8s.io/v1beta1\",\n    kind: \"ClusterRoleBinding\",\n    metadata: objectMeta.name(name),\n    subjects: [{kind: s.kind, namespace: s.metadata.namespace, name: s.metadata.name} for s in subjects],\n    roleRef: {kind: role.kind, apiGroup: \"rbac.authorization.k8s.io\", name: role.metadata.name},\n};\n\nlocal controllerClusterRole = clusterRole(\n  \"kubeless-controller-deployer\", controller_roles);\n\nlocal controllerClusterRoleBinding = clusterRoleBinding(\n  \"kubeless-controller-deployer\", controllerClusterRole, [controllerAccount]\n);\n\nkubeless + {\n  controllerClusterRole: controllerClusterRole,\n  controllerClusterRoleBinding: controllerClusterRoleBinding,\n}\n"
  },
  {
    "path": "manifests/README.md",
    "content": "# Collection of manifests for development\n\n**NOTE: TO INSTALL KUBELESS USE A RELEASED MANIFEST AT https://github.com/kubeless/kubeless/releases\"\n\nIn this folder you can find several manifest that you can deploy to extend the base functionality of Kubeless.\n"
  },
  {
    "path": "manifests/autoscaling/custom-metrics.yaml",
    "content": "kind: Namespace\napiVersion: v1\nmetadata:\n  name: custom-metrics\n---\nkind: ServiceAccount\napiVersion: v1\nmetadata:\n  name: custom-metrics-apiserver\n  namespace: custom-metrics\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: custom-metrics:system:auth-delegator\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: system:auth-delegator\nsubjects:\n- kind: ServiceAccount\n  name: custom-metrics-apiserver\n  namespace: custom-metrics\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: RoleBinding\nmetadata:\n  name: custom-metrics-auth-reader\n  namespace: kube-system\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: Role\n  name: extension-apiserver-authentication-reader\nsubjects:\n- kind: ServiceAccount\n  name: custom-metrics-apiserver\n  namespace: custom-metrics\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n  name: custom-metrics-read\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  - pods\n  - services\n  verbs:\n  - get\n  - list\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: custom-metrics-read\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: custom-metrics-read\nsubjects:\n- kind: ServiceAccount\n  name: custom-metrics-apiserver\n  namespace: custom-metrics\n---\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  name: custom-metrics-apiserver\n  namespace: custom-metrics\n  labels:\n    app: custom-metrics-apiserver\nspec:\n  replicas: 1\n  template:\n    metadata:\n      name: custom-metrics-apiserver\n      labels:\n        app: custom-metrics-apiserver\n    spec:\n      serviceAccountName: custom-metrics-apiserver\n      containers:\n      - name: custom-metrics-server\n        image: luxas/k8s-prometheus-adapter\n        args:\n        - --prometheus-url=http://sample-metrics-prom.default.svc:9090\n        - --metrics-relist-interval=30s\n        - --rate-interval=60s\n        - --v=10\n        - --logtostderr=true\n        ports:\n        - containerPort: 443\n        securityContext:\n          runAsUser: 0\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: api\n  namespace: custom-metrics\nspec:\n  ports:\n  - port: 443\n    targetPort: 443\n  selector:\n    app: custom-metrics-apiserver\n---\napiVersion: apiregistration.k8s.io/v1beta1\nkind: APIService\nmetadata:\n  name: v1alpha1.custom-metrics.metrics.k8s.io\nspec:\n  insecureSkipTLSVerify: true\n  group: custom-metrics.metrics.k8s.io\n  groupPriorityMinimum: 1000\n  versionPriority: 5\n  service:\n    name: api\n    namespace: custom-metrics\n  version: v1alpha1\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n  name: custom-metrics-server-resources\nrules:\n- apiGroups:\n  - custom-metrics.metrics.k8s.io\n  resources: [\"*\"]\n  verbs: [\"*\"]\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: hpa-controller-custom-metrics\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: custom-metrics-server-resources\nsubjects:\n- kind: ServiceAccount\n  name: horizontal-pod-autoscaler\n  namespace: kube-system\n"
  },
  {
    "path": "manifests/autoscaling/prometheus-operator.yaml",
    "content": "apiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n  name: prometheus-operator\nrules:\n- apiGroups:\n  - extensions\n  resources:\n  - thirdpartyresources\n  verbs:\n  - create\n- apiGroups:\n  - monitoring.coreos.com\n  resources:\n  - alertmanagers\n  - prometheuses\n  - servicemonitors\n  verbs:\n  - \"*\"\n- apiGroups:\n  - apps\n  resources:\n  - statefulsets\n  verbs: [\"*\"]\n- apiGroups: [\"\"]\n  resources:\n  - configmaps\n  - secrets\n  verbs: [\"*\"]\n- apiGroups: [\"\"]\n  resources:\n  - pods\n  verbs: [\"list\", \"delete\"]\n- apiGroups: [\"\"]\n  resources:\n  - services\n  - endpoints\n  verbs: [\"get\", \"create\", \"update\"]\n- apiGroups: [\"\"]\n  resources:\n  - nodes\n  verbs: [\"list\", \"watch\"]\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: prometheus-operator\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: prometheus-operator\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: prometheus-operator\nsubjects:\n- kind: ServiceAccount\n  name: prometheus-operator\n  namespace: default\n---\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  name: prometheus-operator\n  labels:\n    operator: prometheus\nspec:\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        operator: prometheus\n    spec:\n      serviceAccountName: prometheus-operator\n      containers:\n       - name: prometheus-operator\n         image: luxas/prometheus-operator:v0.10.1\n         resources:\n           requests:\n             cpu: 100m\n             memory: 50Mi\n           limits:\n             cpu: 200m\n             memory: 100Mi\n"
  },
  {
    "path": "manifests/autoscaling/sample-metrics-app.yaml",
    "content": "apiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n  labels:\n    app: sample-metrics-app\n  name: sample-metrics-app\nspec:\n  replicas: 2\n  template:\n    metadata:\n      labels:\n        app: sample-metrics-app\n    spec:\n      containers:\n      - image: luxas/autoscale-demo:v0.1.2\n        name: sample-metrics-app\n        ports:\n        - name: web\n          containerPort: 8080\n        readinessProbe:\n          httpGet:\n            path: /\n            port: 8080\n          initialDelaySeconds: 3\n          periodSeconds: 5\n        livenessProbe:\n          httpGet:\n            path: /\n            port: 8080\n          initialDelaySeconds: 3\n          periodSeconds: 5\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: sample-metrics-app\n  labels:\n    app: sample-metrics-app\nspec:\n  ports:\n  - name: web\n    port: 80\n    targetPort: 8080\n  selector:\n    app: sample-metrics-app\n---\napiVersion: monitoring.coreos.com/v1alpha1\nkind: ServiceMonitor\nmetadata:\n  name: sample-metrics-app\n  labels:\n    service-monitor: function\nspec:\n  selector:\n    matchLabels:\n      app: sample-metrics-app\n  endpoints:\n  - port: web\n---\nkind: HorizontalPodAutoscaler\napiVersion: autoscaling/v2alpha1\nmetadata:\n  name: sample-metrics-app-hpa\nspec:\n  scaleTargetRef:\n    kind: Deployment\n    name: sample-metrics-app\n  minReplicas: 2\n  maxReplicas: 10\n  metrics:\n  - type: Object\n    object:\n      target:\n        kind: Service\n        name: sample-metrics-app\n      metricName: http_requests\n      targetValue: 100\n"
  },
  {
    "path": "manifests/autoscaling/sample-prometheus-instance.yaml",
    "content": "apiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRole\nmetadata:\n  name: prometheus\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - nodes\n  - services\n  - endpoints\n  - pods\n  verbs:\n  - get\n  - list\n  - watch\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: prometheus\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n  name: prometheus\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: prometheus\nsubjects:\n- kind: ServiceAccount\n  name: prometheus\n  namespace: default\n---\napiVersion: monitoring.coreos.com/v1alpha1\nkind: Prometheus\nmetadata:\n  name: sample-metrics-prom\n  labels:\n    app: sample-metrics-prom\n    prometheus: sample-metrics-prom\nspec:\n  replicas: 1\n  baseImage: prom/prometheus\n  version: v1.7.1\n  serviceAccountName: prometheus\n  serviceMonitorSelector:\n    matchLabels:\n      service-monitor: function\n  resources:\n    requests:\n      memory: 300Mi\n  #storage:\n  #  resources:\n  #    requests:\n  #      storage: 3Gi\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: sample-metrics-prom\n  labels:\n    app: sample-metrics-prom\n    prometheus: sample-metrics-prom\nspec:\n  type: NodePort\n  ports:\n  - name: web\n    nodePort: 30999\n    port: 9090\n    targetPort: web\n  selector:\n    prometheus: sample-metrics-prom\n"
  },
  {
    "path": "manifests/kinesis/kinesalite.yaml",
    "content": "---\napiVersion: v1\nkind: Service\nmetadata:\n  annotations:\n  name: kinesis\n  labels:\n    app: kinesis\nspec:\n  type: NodePort\n  ports:\n  - port: 4567\n  selector:\n    app: kinesis\n---\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n  name: kinesis\nspec:\n  replicas: 1 \n  template:\n    metadata:\n      labels:\n        app: kinesis\n    spec:\n      containers:\n      - name: kinesis\n        image: saikocat/kinesalite:1.11.5\n        ports:\n        - containerPort: 4567\n        args:\n        - --port=4567\n"
  },
  {
    "path": "manifests/monitoring/grafana-configmap.yaml",
    "content": "apiVersion: v1\ndata:\n  grafana-net-2-dashboard.json: |\n    {\n      \"__inputs\": [{\n        \"name\": \"DS_PROMETHEUS\",\n        \"label\": \"Prometheus\",\n        \"description\": \"\",\n        \"type\": \"datasource\",\n        \"pluginId\": \"prometheus\",\n        \"pluginName\": \"Prometheus\"\n      }],\n      \"__requires\": [{\n        \"type\": \"panel\",\n        \"id\": \"singlestat\",\n        \"name\": \"Singlestat\",\n        \"version\": \"\"\n      }, {\n        \"type\": \"panel\",\n        \"id\": \"text\",\n        \"name\": \"Text\",\n        \"version\": \"\"\n      }, {\n        \"type\": \"panel\",\n        \"id\": \"graph\",\n        \"name\": \"Graph\",\n        \"version\": \"\"\n      }, {\n        \"type\": \"grafana\",\n        \"id\": \"grafana\",\n        \"name\": \"Grafana\",\n        \"version\": \"3.1.0\"\n      }, {\n        \"type\": \"datasource\",\n        \"id\": \"prometheus\",\n        \"name\": \"Prometheus\",\n        \"version\": \"1.0.0\"\n      }],\n      \"id\": null,\n      \"title\": \"Prometheus Stats\",\n      \"tags\": [],\n      \"style\": \"dark\",\n      \"timezone\": \"browser\",\n      \"editable\": true,\n      \"hideControls\": true,\n      \"sharedCrosshair\": false,\n      \"rows\": [{\n        \"collapse\": false,\n        \"editable\": true,\n        \"height\": 178,\n        \"panels\": [{\n          \"cacheTimeout\": null,\n          \"colorBackground\": false,\n          \"colorValue\": false,\n          \"colors\": [\"rgba(245, 54, 54, 0.9)\", \"rgba(237, 129, 40, 0.89)\", \"rgba(50, 172, 45, 0.97)\"],\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 1,\n          \"editable\": true,\n          \"error\": false,\n          \"format\": \"s\",\n          \"id\": 5,\n          \"interval\": null,\n          \"links\": [],\n          \"maxDataPoints\": 100,\n          \"nullPointMode\": \"connected\",\n          \"nullText\": null,\n          \"postfix\": \"\",\n          \"postfixFontSize\": \"50%\",\n          \"prefix\": \"\",\n          \"prefixFontSize\": \"50%\",\n          \"span\": 3,\n          \"sparkline\": {\n            \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n            \"full\": false,\n            \"lineColor\": \"rgb(31, 120, 193)\",\n            \"show\": false\n          },\n          \"targets\": [{\n            \"expr\": \"(time() - process_start_time_seconds{job=\\\"prometheus\\\"})\",\n            \"intervalFactor\": 2,\n            \"refId\": \"A\",\n            \"step\": 4\n          }],\n          \"thresholds\": \"\",\n          \"title\": \"Uptime\",\n          \"type\": \"singlestat\",\n          \"valueFontSize\": \"80%\",\n          \"valueMaps\": [{\n            \"op\": \"=\",\n            \"text\": \"N/A\",\n            \"value\": \"null\"\n          }],\n          \"valueName\": \"current\",\n          \"mappingTypes\": [{\n            \"name\": \"value to text\",\n            \"value\": 1\n          }, {\n            \"name\": \"range to text\",\n            \"value\": 2\n          }],\n          \"rangeMaps\": [{\n            \"from\": \"null\",\n            \"to\": \"null\",\n            \"text\": \"N/A\"\n          }],\n          \"mappingType\": 1,\n          \"gauge\": {\n            \"show\": false,\n            \"minValue\": 0,\n            \"maxValue\": 100,\n            \"thresholdMarkers\": true,\n            \"thresholdLabels\": false\n          }\n        }, {\n          \"cacheTimeout\": null,\n          \"colorBackground\": false,\n          \"colorValue\": false,\n          \"colors\": [\"rgba(50, 172, 45, 0.97)\", \"rgba(237, 129, 40, 0.89)\", \"rgba(245, 54, 54, 0.9)\"],\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"editable\": true,\n          \"error\": false,\n          \"format\": \"none\",\n          \"id\": 6,\n          \"interval\": null,\n          \"links\": [],\n          \"maxDataPoints\": 100,\n          \"nullPointMode\": \"connected\",\n          \"nullText\": null,\n          \"postfix\": \"\",\n          \"postfixFontSize\": \"50%\",\n          \"prefix\": \"\",\n          \"prefixFontSize\": \"50%\",\n          \"span\": 3,\n          \"sparkline\": {\n            \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n            \"full\": false,\n            \"lineColor\": \"rgb(31, 120, 193)\",\n            \"show\": true\n          },\n          \"targets\": [{\n            \"expr\": \"prometheus_local_storage_memory_series\",\n            \"intervalFactor\": 2,\n            \"refId\": \"A\",\n            \"step\": 4\n          }],\n          \"thresholds\": \"1,5\",\n          \"title\": \"Local Storage Memory Series\",\n          \"type\": \"singlestat\",\n          \"valueFontSize\": \"70%\",\n          \"valueMaps\": [],\n          \"valueName\": \"current\",\n          \"mappingTypes\": [{\n            \"name\": \"value to text\",\n            \"value\": 1\n          }, {\n            \"name\": \"range to text\",\n            \"value\": 2\n          }],\n          \"rangeMaps\": [{\n            \"from\": \"null\",\n            \"to\": \"null\",\n            \"text\": \"N/A\"\n          }],\n          \"mappingType\": 1,\n          \"gauge\": {\n            \"show\": false,\n            \"minValue\": 0,\n            \"maxValue\": 100,\n            \"thresholdMarkers\": true,\n            \"thresholdLabels\": false\n          }\n        }, {\n          \"cacheTimeout\": null,\n          \"colorBackground\": false,\n          \"colorValue\": true,\n          \"colors\": [\"rgba(50, 172, 45, 0.97)\", \"rgba(237, 129, 40, 0.89)\", \"rgba(245, 54, 54, 0.9)\"],\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"editable\": true,\n          \"error\": false,\n          \"format\": \"none\",\n          \"id\": 7,\n          \"interval\": null,\n          \"links\": [],\n          \"maxDataPoints\": 100,\n          \"nullPointMode\": \"connected\",\n          \"nullText\": null,\n          \"postfix\": \"\",\n          \"postfixFontSize\": \"50%\",\n          \"prefix\": \"\",\n          \"prefixFontSize\": \"50%\",\n          \"span\": 3,\n          \"sparkline\": {\n            \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n            \"full\": false,\n            \"lineColor\": \"rgb(31, 120, 193)\",\n            \"show\": true\n          },\n          \"targets\": [{\n            \"expr\": \"prometheus_local_storage_indexing_queue_length\",\n            \"intervalFactor\": 2,\n            \"refId\": \"A\",\n            \"step\": 4\n          }],\n          \"thresholds\": \"500,4000\",\n          \"title\": \"Interal Storage Queue Length\",\n          \"type\": \"singlestat\",\n          \"valueFontSize\": \"70%\",\n          \"valueMaps\": [{\n            \"op\": \"=\",\n            \"text\": \"Empty\",\n            \"value\": \"0\"\n          }],\n          \"valueName\": \"current\",\n          \"mappingTypes\": [{\n            \"name\": \"value to text\",\n            \"value\": 1\n          }, {\n            \"name\": \"range to text\",\n            \"value\": 2\n          }],\n          \"rangeMaps\": [{\n            \"from\": \"null\",\n            \"to\": \"null\",\n            \"text\": \"N/A\"\n          }],\n          \"mappingType\": 1,\n          \"gauge\": {\n            \"show\": false,\n            \"minValue\": 0,\n            \"maxValue\": 100,\n            \"thresholdMarkers\": true,\n            \"thresholdLabels\": false\n          }\n        }, {\n          \"content\": \"<img src=\\\"http://prometheus.io/assets/prometheus_logo_grey.svg\\\" alt=\\\"Prometheus logo\\\" style=\\\"height: 40px;\\\">\\n<span style=\\\"font-family: 'Open Sans', 'Helvetica Neue', Helvetica; font-size: 25px;vertical-align: text-top;color: #bbbfc2;margin-left: 10px;\\\">Prometheus</span>\\n\\n<p style=\\\"margin-top: 10px;\\\">You're using Prometheus, an open-source systems monitoring and alerting toolkit originally built at SoundCloud. For more information, check out the <a href=\\\"http://www.grafana.org/\\\">Grafana</a> and <a href=\\\"http://prometheus.io/\\\">Prometheus</a> projects.</p>\",\n          \"editable\": true,\n          \"error\": false,\n          \"id\": 9,\n          \"links\": [],\n          \"mode\": \"html\",\n          \"span\": 3,\n          \"style\": {},\n          \"title\": \"\",\n          \"transparent\": true,\n          \"type\": \"text\"\n        }],\n        \"title\": \"New row\"\n      }, {\n        \"collapse\": false,\n        \"editable\": true,\n        \"height\": 227,\n        \"panels\": [{\n          \"aliasColors\": {\n            \"prometheus\": \"#C15C17\",\n            \"{instance=\\\"localhost:9090\\\",job=\\\"prometheus\\\"}\": \"#C15C17\"\n          },\n          \"bars\": false,\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"editable\": true,\n          \"error\": false,\n          \"fill\": 1,\n          \"grid\": {\n            \"threshold1\": null,\n            \"threshold1Color\": \"rgba(216, 200, 27, 0.27)\",\n            \"threshold2\": null,\n            \"threshold2Color\": \"rgba(234, 112, 112, 0.22)\"\n          },\n          \"id\": 3,\n          \"legend\": {\n            \"avg\": false,\n            \"current\": false,\n            \"max\": false,\n            \"min\": false,\n            \"show\": true,\n            \"total\": false,\n            \"values\": false\n          },\n          \"lines\": true,\n          \"linewidth\": 2,\n          \"links\": [],\n          \"nullPointMode\": \"connected\",\n          \"percentage\": false,\n          \"pointradius\": 2,\n          \"points\": false,\n          \"renderer\": \"flot\",\n          \"seriesOverrides\": [],\n          \"span\": 9,\n          \"stack\": false,\n          \"steppedLine\": false,\n          \"targets\": [{\n            \"expr\": \"rate(prometheus_local_storage_ingested_samples_total[5m])\",\n            \"interval\": \"\",\n            \"intervalFactor\": 2,\n            \"legendFormat\": \"{{job}}\",\n            \"metric\": \"\",\n            \"refId\": \"A\",\n            \"step\": 2\n          }],\n          \"timeFrom\": null,\n          \"timeShift\": null,\n          \"title\": \"Samples ingested (rate-5m)\",\n          \"tooltip\": {\n            \"shared\": true,\n            \"value_type\": \"cumulative\",\n            \"ordering\": \"alphabetical\",\n            \"msResolution\": false\n          },\n          \"type\": \"graph\",\n          \"yaxes\": [{\n            \"show\": true,\n            \"min\": null,\n            \"max\": null,\n            \"logBase\": 1,\n            \"format\": \"short\"\n          }, {\n            \"show\": true,\n            \"min\": null,\n            \"max\": null,\n            \"logBase\": 1,\n            \"format\": \"short\"\n          }],\n          \"xaxis\": {\n            \"show\": true\n          }\n        }, {\n          \"content\": \"#### Samples Ingested\\nThis graph displays the count of samples ingested by the Prometheus server, as measured over the last 5 minutes, per time series in the range vector. When troubleshooting an issue on IRC or Github, this is often the first stat requested by the Prometheus team. \",\n          \"editable\": true,\n          \"error\": false,\n          \"id\": 8,\n          \"links\": [],\n          \"mode\": \"markdown\",\n          \"span\": 2.995914043583536,\n          \"style\": {},\n          \"title\": \"\",\n          \"transparent\": true,\n          \"type\": \"text\"\n        }],\n        \"title\": \"New row\"\n      }, {\n        \"collapse\": false,\n        \"editable\": true,\n        \"height\": \"250px\",\n        \"panels\": [{\n          \"aliasColors\": {\n            \"prometheus\": \"#F9BA8F\",\n            \"{instance=\\\"localhost:9090\\\",interval=\\\"5s\\\",job=\\\"prometheus\\\"}\": \"#F9BA8F\"\n          },\n          \"bars\": false,\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"editable\": true,\n          \"error\": false,\n          \"fill\": 1,\n          \"grid\": {\n            \"threshold1\": null,\n            \"threshold1Color\": \"rgba(216, 200, 27, 0.27)\",\n            \"threshold2\": null,\n            \"threshold2Color\": \"rgba(234, 112, 112, 0.22)\"\n          },\n          \"id\": 2,\n          \"legend\": {\n            \"avg\": false,\n            \"current\": false,\n            \"max\": false,\n            \"min\": false,\n            \"show\": true,\n            \"total\": false,\n            \"values\": false\n          },\n          \"lines\": true,\n          \"linewidth\": 2,\n          \"links\": [],\n          \"nullPointMode\": \"connected\",\n          \"percentage\": false,\n          \"pointradius\": 5,\n          \"points\": false,\n          \"renderer\": \"flot\",\n          \"seriesOverrides\": [],\n          \"span\": 5,\n          \"stack\": false,\n          \"steppedLine\": false,\n          \"targets\": [{\n            \"expr\": \"rate(prometheus_target_interval_length_seconds_count[5m])\",\n            \"intervalFactor\": 2,\n            \"legendFormat\": \"{{job}}\",\n            \"refId\": \"A\",\n            \"step\": 2\n          }],\n          \"timeFrom\": null,\n          \"timeShift\": null,\n          \"title\": \"Target Scrapes (last 5m)\",\n          \"tooltip\": {\n            \"shared\": true,\n            \"value_type\": \"cumulative\",\n            \"ordering\": \"alphabetical\",\n            \"msResolution\": false\n          },\n          \"type\": \"graph\",\n          \"yaxes\": [{\n            \"show\": true,\n            \"min\": null,\n            \"max\": null,\n            \"logBase\": 1,\n            \"format\": \"short\"\n          }, {\n            \"show\": true,\n            \"min\": null,\n            \"max\": null,\n            \"logBase\": 1,\n            \"format\": \"short\"\n          }],\n          \"xaxis\": {\n            \"show\": true\n          }\n        }, {\n          \"aliasColors\": {},\n          \"bars\": false,\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"editable\": true,\n          \"error\": false,\n          \"fill\": 1,\n          \"grid\": {\n            \"threshold1\": null,\n            \"threshold1Color\": \"rgba(216, 200, 27, 0.27)\",\n            \"threshold2\": null,\n            \"threshold2Color\": \"rgba(234, 112, 112, 0.22)\"\n          },\n          \"id\": 14,\n          \"legend\": {\n            \"avg\": false,\n            \"current\": false,\n            \"max\": false,\n            \"min\": false,\n            \"show\": true,\n            \"total\": false,\n            \"values\": false\n          },\n          \"lines\": true,\n          \"linewidth\": 2,\n          \"links\": [],\n          \"nullPointMode\": \"connected\",\n          \"percentage\": false,\n          \"pointradius\": 5,\n          \"points\": false,\n          \"renderer\": \"flot\",\n          \"seriesOverrides\": [],\n          \"span\": 4,\n          \"stack\": false,\n          \"steppedLine\": false,\n          \"targets\": [{\n            \"expr\": \"prometheus_target_interval_length_seconds{quantile!=\\\"0.01\\\", quantile!=\\\"0.05\\\"}\",\n            \"interval\": \"\",\n            \"intervalFactor\": 2,\n            \"legendFormat\": \"{{quantile}} ({{interval}})\",\n            \"metric\": \"\",\n            \"refId\": \"A\",\n            \"step\": 2\n          }],\n          \"timeFrom\": null,\n          \"timeShift\": null,\n          \"title\": \"Scrape Duration\",\n          \"tooltip\": {\n            \"shared\": true,\n            \"value_type\": \"cumulative\",\n            \"ordering\": \"alphabetical\",\n            \"msResolution\": false\n          },\n          \"type\": \"graph\",\n          \"yaxes\": [{\n            \"show\": true,\n            \"min\": null,\n            \"max\": null,\n            \"logBase\": 1,\n            \"format\": \"short\"\n          }, {\n            \"show\": true,\n            \"min\": null,\n            \"max\": null,\n            \"logBase\": 1,\n            \"format\": \"short\"\n          }],\n          \"xaxis\": {\n            \"show\": true\n          }\n        }, {\n          \"content\": \"#### Scrapes\\nPrometheus scrapes metrics from instrumented jobs, either directly or via an intermediary push gateway for short-lived jobs. Target scrapes will show how frequently targets are scraped, as measured over the last 5 minutes, per time series in the range vector. Scrape Duration will show how long the scrapes are taking, with percentiles available as series. \",\n          \"editable\": true,\n          \"error\": false,\n          \"id\": 11,\n          \"links\": [],\n          \"mode\": \"markdown\",\n          \"span\": 3,\n          \"style\": {},\n          \"title\": \"\",\n          \"transparent\": true,\n          \"type\": \"text\"\n        }],\n        \"title\": \"New row\"\n      }, {\n        \"collapse\": false,\n        \"editable\": true,\n        \"height\": \"250px\",\n        \"panels\": [{\n          \"aliasColors\": {},\n          \"bars\": false,\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": null,\n          \"editable\": true,\n          \"error\": false,\n          \"fill\": 1,\n          \"grid\": {\n            \"threshold1\": null,\n            \"threshold1Color\": \"rgba(216, 200, 27, 0.27)\",\n            \"threshold2\": null,\n            \"threshold2Color\": \"rgba(234, 112, 112, 0.22)\"\n          },\n          \"id\": 12,\n          \"legend\": {\n            \"alignAsTable\": false,\n            \"avg\": false,\n            \"current\": false,\n            \"hideEmpty\": true,\n            \"max\": false,\n            \"min\": false,\n            \"show\": true,\n            \"total\": false,\n            \"values\": false\n          },\n          \"lines\": true,\n          \"linewidth\": 2,\n          \"links\": [],\n          \"nullPointMode\": \"connected\",\n          \"percentage\": false,\n          \"pointradius\": 5,\n          \"points\": false,\n          \"renderer\": \"flot\",\n          \"seriesOverrides\": [],\n          \"span\": 9,\n          \"stack\": false,\n          \"steppedLine\": false,\n          \"targets\": [{\n            \"expr\": \"prometheus_evaluator_duration_milliseconds{quantile!=\\\"0.01\\\", quantile!=\\\"0.05\\\"}\",\n            \"interval\": \"\",\n            \"intervalFactor\": 2,\n            \"legendFormat\": \"{{quantile}}\",\n            \"refId\": \"A\",\n            \"step\": 2\n          }],\n          \"timeFrom\": null,\n          \"timeShift\": null,\n          \"title\": \"Rule Eval Duration\",\n          \"tooltip\": {\n            \"shared\": true,\n            \"value_type\": \"cumulative\",\n            \"ordering\": \"alphabetical\",\n            \"msResolution\": false\n          },\n          \"type\": \"graph\",\n          \"yaxes\": [{\n            \"show\": true,\n            \"min\": null,\n            \"max\": null,\n            \"logBase\": 1,\n            \"format\": \"percentunit\",\n            \"label\": \"\"\n          }, {\n            \"show\": true,\n            \"min\": null,\n            \"max\": null,\n            \"logBase\": 1,\n            \"format\": \"short\"\n          }],\n          \"xaxis\": {\n            \"show\": true\n          }\n        }, {\n          \"content\": \"#### Rule Evaluation Duration\\nThis graph panel plots the duration for all evaluations to execute. The 50th percentile, 90th percentile and 99th percentile are shown as three separate series to help identify outliers that may be skewing the data.\",\n          \"editable\": true,\n          \"error\": false,\n          \"id\": 15,\n          \"links\": [],\n          \"mode\": \"markdown\",\n          \"span\": 3,\n          \"style\": {},\n          \"title\": \"\",\n          \"transparent\": true,\n          \"type\": \"text\"\n        }],\n        \"title\": \"New row\"\n      }],\n      \"time\": {\n        \"from\": \"now-5m\",\n        \"to\": \"now\"\n      },\n      \"timepicker\": {\n        \"now\": true,\n        \"refresh_intervals\": [\"5s\", \"10s\", \"30s\", \"1m\", \"5m\", \"15m\", \"30m\", \"1h\", \"2h\", \"1d\"],\n        \"time_options\": [\"5m\", \"15m\", \"1h\", \"6h\", \"12h\", \"24h\", \"2d\", \"7d\", \"30d\"]\n      },\n      \"templating\": {\n        \"list\": []\n      },\n      \"annotations\": {\n        \"list\": []\n      },\n      \"refresh\": false,\n      \"schemaVersion\": 12,\n      \"version\": 0,\n      \"links\": [{\n        \"icon\": \"info\",\n        \"tags\": [],\n        \"targetBlank\": true,\n        \"title\": \"Grafana Docs\",\n        \"tooltip\": \"\",\n        \"type\": \"link\",\n        \"url\": \"http://www.grafana.org/docs\"\n      }, {\n        \"icon\": \"info\",\n        \"tags\": [],\n        \"targetBlank\": true,\n        \"title\": \"Prometheus Docs\",\n        \"type\": \"link\",\n        \"url\": \"http://prometheus.io/docs/introduction/overview/\"\n      }],\n      \"gnetId\": 2,\n      \"description\": \"The  official, pre-built Prometheus Stats Dashboard.\"\n    }\n  grafana-net-737-dashboard.json: |\n    {\n      \"__inputs\": [{\n        \"name\": \"DS_PROMETHEUS\",\n        \"label\": \"prometheus\",\n        \"description\": \"\",\n        \"type\": \"datasource\",\n        \"pluginId\": \"prometheus\",\n        \"pluginName\": \"Prometheus\"\n      }],\n      \"__requires\": [{\n        \"type\": \"panel\",\n        \"id\": \"singlestat\",\n        \"name\": \"Singlestat\",\n        \"version\": \"\"\n      }, {\n        \"type\": \"panel\",\n        \"id\": \"graph\",\n        \"name\": \"Graph\",\n        \"version\": \"\"\n      }, {\n        \"type\": \"grafana\",\n        \"id\": \"grafana\",\n        \"name\": \"Grafana\",\n        \"version\": \"3.1.0\"\n      }, {\n        \"type\": \"datasource\",\n        \"id\": \"prometheus\",\n        \"name\": \"Prometheus\",\n        \"version\": \"1.0.0\"\n      }],\n      \"id\": null,\n      \"title\": \"Kubernetes Pod Resources\",\n      \"description\": \"Shows resource usage of Kubernetes pods.\",\n      \"tags\": [\n        \"kubernetes\"\n      ],\n      \"style\": \"dark\",\n      \"timezone\": \"browser\",\n      \"editable\": true,\n      \"hideControls\": false,\n      \"sharedCrosshair\": false,\n      \"rows\": [{\n        \"collapse\": false,\n        \"editable\": true,\n        \"height\": \"250px\",\n        \"panels\": [{\n          \"cacheTimeout\": null,\n          \"colorBackground\": false,\n          \"colorValue\": true,\n          \"colors\": [\n            \"rgba(50, 172, 45, 0.97)\",\n            \"rgba(237, 129, 40, 0.89)\",\n            \"rgba(245, 54, 54, 0.9)\"\n          ],\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"editable\": true,\n          \"error\": false,\n          \"format\": \"percent\",\n          \"gauge\": {\n            \"maxValue\": 100,\n            \"minValue\": 0,\n            \"show\": true,\n            \"thresholdLabels\": false,\n            \"thresholdMarkers\": true\n          },\n          \"height\": \"180px\",\n          \"id\": 4,\n          \"interval\": null,\n          \"isNew\": true,\n          \"links\": [],\n          \"mappingType\": 1,\n          \"mappingTypes\": [{\n            \"name\": \"value to text\",\n            \"value\": 1\n          }, {\n            \"name\": \"range to text\",\n            \"value\": 2\n          }],\n          \"maxDataPoints\": 100,\n          \"nullPointMode\": \"connected\",\n          \"nullText\": null,\n          \"postfix\": \"\",\n          \"postfixFontSize\": \"50%\",\n          \"prefix\": \"\",\n          \"prefixFontSize\": \"50%\",\n          \"rangeMaps\": [{\n            \"from\": \"null\",\n            \"text\": \"N/A\",\n            \"to\": \"null\"\n          }],\n          \"span\": 4,\n          \"sparkline\": {\n            \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n            \"full\": false,\n            \"lineColor\": \"rgb(31, 120, 193)\",\n            \"show\": false\n          },\n          \"targets\": [{\n            \"expr\": \"sum (container_memory_working_set_bytes{id=\\\"/\\\",instance=~\\\"^$instance$\\\"}) / sum (machine_memory_bytes{instance=~\\\"^$instance$\\\"}) * 100\",\n            \"interval\": \"\",\n            \"intervalFactor\": 2,\n            \"legendFormat\": \"\",\n            \"refId\": \"A\",\n            \"step\": 2\n          }],\n          \"thresholds\": \"65, 90\",\n          \"timeFrom\": \"1m\",\n          \"timeShift\": null,\n          \"title\": \"Memory Working Set\",\n          \"transparent\": false,\n          \"type\": \"singlestat\",\n          \"valueFontSize\": \"80%\",\n          \"valueMaps\": [{\n            \"op\": \"=\",\n            \"text\": \"N/A\",\n            \"value\": \"null\"\n          }],\n          \"valueName\": \"current\"\n        }, {\n          \"cacheTimeout\": null,\n          \"colorBackground\": false,\n          \"colorValue\": true,\n          \"colors\": [\n            \"rgba(50, 172, 45, 0.97)\",\n            \"rgba(237, 129, 40, 0.89)\",\n            \"rgba(245, 54, 54, 0.9)\"\n          ],\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 2,\n          \"editable\": true,\n          \"error\": false,\n          \"format\": \"percent\",\n          \"gauge\": {\n            \"maxValue\": 100,\n            \"minValue\": 0,\n            \"show\": true,\n            \"thresholdLabels\": false,\n            \"thresholdMarkers\": true\n          },\n          \"height\": \"180px\",\n          \"id\": 6,\n          \"interval\": null,\n          \"isNew\": true,\n          \"links\": [],\n          \"mappingType\": 1,\n          \"mappingTypes\": [{\n            \"name\": \"value to text\",\n            \"value\": 1\n          }, {\n            \"name\": \"range to text\",\n            \"value\": 2\n          }],\n          \"maxDataPoints\": 100,\n          \"nullPointMode\": \"connected\",\n          \"nullText\": null,\n          \"postfix\": \"\",\n          \"postfixFontSize\": \"50%\",\n          \"prefix\": \"\",\n          \"prefixFontSize\": \"50%\",\n          \"rangeMaps\": [{\n            \"from\": \"null\",\n            \"text\": \"N/A\",\n            \"to\": \"null\"\n          }],\n          \"span\": 4,\n          \"sparkline\": {\n            \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n            \"full\": false,\n            \"lineColor\": \"rgb(31, 120, 193)\",\n            \"show\": false\n          },\n          \"targets\": [{\n            \"expr\": \"sum(rate(container_cpu_usage_seconds_total{id=\\\"/\\\",instance=~\\\"^$instance$\\\"}[1m])) / sum (machine_cpu_cores{instance=~\\\"^$instance$\\\"}) * 100\",\n            \"interval\": \"10s\",\n            \"intervalFactor\": 1,\n            \"refId\": \"A\",\n            \"step\": 10\n          }],\n          \"thresholds\": \"65, 90\",\n          \"timeFrom\": \"1m\",\n          \"timeShift\": null,\n          \"title\": \"Cpu Usage\",\n          \"type\": \"singlestat\",\n          \"valueFontSize\": \"80%\",\n          \"valueMaps\": [{\n            \"op\": \"=\",\n            \"text\": \"N/A\",\n            \"value\": \"null\"\n          }],\n          \"valueName\": \"current\"\n        }, {\n          \"cacheTimeout\": null,\n          \"colorBackground\": false,\n          \"colorValue\": true,\n          \"colors\": [\n            \"rgba(50, 172, 45, 0.97)\",\n            \"rgba(237, 129, 40, 0.89)\",\n            \"rgba(245, 54, 54, 0.9)\"\n          ],\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 2,\n          \"editable\": true,\n          \"error\": false,\n          \"format\": \"percent\",\n          \"gauge\": {\n            \"maxValue\": 100,\n            \"minValue\": 0,\n            \"show\": true,\n            \"thresholdLabels\": false,\n            \"thresholdMarkers\": true\n          },\n          \"height\": \"180px\",\n          \"id\": 7,\n          \"interval\": null,\n          \"isNew\": true,\n          \"links\": [],\n          \"mappingType\": 1,\n          \"mappingTypes\": [{\n            \"name\": \"value to text\",\n            \"value\": 1\n          }, {\n            \"name\": \"range to text\",\n            \"value\": 2\n          }],\n          \"maxDataPoints\": 100,\n          \"nullPointMode\": \"connected\",\n          \"nullText\": null,\n          \"postfix\": \"\",\n          \"postfixFontSize\": \"50%\",\n          \"prefix\": \"\",\n          \"prefixFontSize\": \"50%\",\n          \"rangeMaps\": [{\n            \"from\": \"null\",\n            \"text\": \"N/A\",\n            \"to\": \"null\"\n          }],\n          \"span\": 4,\n          \"sparkline\": {\n            \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n            \"full\": false,\n            \"lineColor\": \"rgb(31, 120, 193)\",\n            \"show\": false\n          },\n          \"targets\": [{\n            \"expr\": \"sum(container_fs_usage_bytes{id=\\\"/\\\",instance=~\\\"^$instance$\\\"}) / sum(container_fs_limit_bytes{id=\\\"/\\\",instance=~\\\"^$instance$\\\"}) * 100\",\n            \"interval\": \"10s\",\n            \"intervalFactor\": 1,\n            \"legendFormat\": \"\",\n            \"metric\": \"\",\n            \"refId\": \"A\",\n            \"step\": 10\n          }],\n          \"thresholds\": \"65, 90\",\n          \"timeFrom\": \"1m\",\n          \"timeShift\": null,\n          \"title\": \"Filesystem Usage\",\n          \"type\": \"singlestat\",\n          \"valueFontSize\": \"80%\",\n          \"valueMaps\": [{\n            \"op\": \"=\",\n            \"text\": \"N/A\",\n            \"value\": \"null\"\n          }],\n          \"valueName\": \"current\"\n        }, {\n          \"cacheTimeout\": null,\n          \"colorBackground\": false,\n          \"colorValue\": false,\n          \"colors\": [\n            \"rgba(50, 172, 45, 0.97)\",\n            \"rgba(237, 129, 40, 0.89)\",\n            \"rgba(245, 54, 54, 0.9)\"\n          ],\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 2,\n          \"editable\": true,\n          \"error\": false,\n          \"format\": \"bytes\",\n          \"gauge\": {\n            \"maxValue\": 100,\n            \"minValue\": 0,\n            \"show\": false,\n            \"thresholdLabels\": false,\n            \"thresholdMarkers\": true\n          },\n          \"height\": \"1px\",\n          \"hideTimeOverride\": true,\n          \"id\": 9,\n          \"interval\": null,\n          \"isNew\": true,\n          \"links\": [],\n          \"mappingType\": 1,\n          \"mappingTypes\": [{\n            \"name\": \"value to text\",\n            \"value\": 1\n          }, {\n            \"name\": \"range to text\",\n            \"value\": 2\n          }],\n          \"maxDataPoints\": 100,\n          \"nullPointMode\": \"connected\",\n          \"nullText\": null,\n          \"postfix\": \"\",\n          \"postfixFontSize\": \"20%\",\n          \"prefix\": \"\",\n          \"prefixFontSize\": \"20%\",\n          \"rangeMaps\": [{\n            \"from\": \"null\",\n            \"text\": \"N/A\",\n            \"to\": \"null\"\n          }],\n          \"span\": 2,\n          \"sparkline\": {\n            \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n            \"full\": false,\n            \"lineColor\": \"rgb(31, 120, 193)\",\n            \"show\": false\n          },\n          \"targets\": [{\n            \"expr\": \"sum(container_memory_working_set_bytes{id=\\\"/\\\",instance=~\\\"^$instance$\\\"})\",\n            \"interval\": \"10s\",\n            \"intervalFactor\": 1,\n            \"refId\": \"A\",\n            \"step\": 10\n          }],\n          \"thresholds\": \"\",\n          \"timeFrom\": \"1m\",\n          \"title\": \"Used\",\n          \"type\": \"singlestat\",\n          \"valueFontSize\": \"50%\",\n          \"valueMaps\": [{\n            \"op\": \"=\",\n            \"text\": \"N/A\",\n            \"value\": \"null\"\n          }],\n          \"valueName\": \"current\"\n        }, {\n          \"cacheTimeout\": null,\n          \"colorBackground\": false,\n          \"colorValue\": false,\n          \"colors\": [\n            \"rgba(50, 172, 45, 0.97)\",\n            \"rgba(237, 129, 40, 0.89)\",\n            \"rgba(245, 54, 54, 0.9)\"\n          ],\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 2,\n          \"editable\": true,\n          \"error\": false,\n          \"format\": \"bytes\",\n          \"gauge\": {\n            \"maxValue\": 100,\n            \"minValue\": 0,\n            \"show\": false,\n            \"thresholdLabels\": false,\n            \"thresholdMarkers\": true\n          },\n          \"height\": \"1px\",\n          \"hideTimeOverride\": true,\n          \"id\": 10,\n          \"interval\": null,\n          \"isNew\": true,\n          \"links\": [],\n          \"mappingType\": 1,\n          \"mappingTypes\": [{\n            \"name\": \"value to text\",\n            \"value\": 1\n          }, {\n            \"name\": \"range to text\",\n            \"value\": 2\n          }],\n          \"maxDataPoints\": 100,\n          \"nullPointMode\": \"connected\",\n          \"nullText\": null,\n          \"postfix\": \"\",\n          \"postfixFontSize\": \"50%\",\n          \"prefix\": \"\",\n          \"prefixFontSize\": \"50%\",\n          \"rangeMaps\": [{\n            \"from\": \"null\",\n            \"text\": \"N/A\",\n            \"to\": \"null\"\n          }],\n          \"span\": 2,\n          \"sparkline\": {\n            \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n            \"full\": false,\n            \"lineColor\": \"rgb(31, 120, 193)\",\n            \"show\": false\n          },\n          \"targets\": [{\n            \"expr\": \"sum (machine_memory_bytes{instance=~\\\"^$instance$\\\"})\",\n            \"interval\": \"10s\",\n            \"intervalFactor\": 1,\n            \"refId\": \"A\",\n            \"step\": 10\n          }],\n          \"thresholds\": \"\",\n          \"timeFrom\": \"1m\",\n          \"title\": \"Total\",\n          \"type\": \"singlestat\",\n          \"valueFontSize\": \"50%\",\n          \"valueMaps\": [{\n            \"op\": \"=\",\n            \"text\": \"N/A\",\n            \"value\": \"null\"\n          }],\n          \"valueName\": \"current\"\n        }, {\n          \"cacheTimeout\": null,\n          \"colorBackground\": false,\n          \"colorValue\": false,\n          \"colors\": [\n            \"rgba(50, 172, 45, 0.97)\",\n            \"rgba(237, 129, 40, 0.89)\",\n            \"rgba(245, 54, 54, 0.9)\"\n          ],\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 2,\n          \"editable\": true,\n          \"error\": false,\n          \"format\": \"none\",\n          \"gauge\": {\n            \"maxValue\": 100,\n            \"minValue\": 0,\n            \"show\": false,\n            \"thresholdLabels\": false,\n            \"thresholdMarkers\": true\n          },\n          \"height\": \"1px\",\n          \"hideTimeOverride\": true,\n          \"id\": 11,\n          \"interval\": null,\n          \"isNew\": true,\n          \"links\": [],\n          \"mappingType\": 1,\n          \"mappingTypes\": [{\n            \"name\": \"value to text\",\n            \"value\": 1\n          }, {\n            \"name\": \"range to text\",\n            \"value\": 2\n          }],\n          \"maxDataPoints\": 100,\n          \"nullPointMode\": \"connected\",\n          \"nullText\": null,\n          \"postfix\": \" cores\",\n          \"postfixFontSize\": \"30%\",\n          \"prefix\": \"\",\n          \"prefixFontSize\": \"50%\",\n          \"rangeMaps\": [{\n            \"from\": \"null\",\n            \"text\": \"N/A\",\n            \"to\": \"null\"\n          }],\n          \"span\": 2,\n          \"sparkline\": {\n            \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n            \"full\": false,\n            \"lineColor\": \"rgb(31, 120, 193)\",\n            \"show\": false\n          },\n          \"targets\": [{\n            \"expr\": \"sum (rate (container_cpu_usage_seconds_total{id=\\\"/\\\",instance=~\\\"^$instance$\\\"}[1m]))\",\n            \"interval\": \"10s\",\n            \"intervalFactor\": 1,\n            \"refId\": \"A\",\n            \"step\": 10\n          }],\n          \"thresholds\": \"\",\n          \"timeFrom\": \"1m\",\n          \"timeShift\": null,\n          \"title\": \"Used\",\n          \"type\": \"singlestat\",\n          \"valueFontSize\": \"50%\",\n          \"valueMaps\": [{\n            \"op\": \"=\",\n            \"text\": \"N/A\",\n            \"value\": \"null\"\n          }],\n          \"valueName\": \"current\"\n        }, {\n          \"cacheTimeout\": null,\n          \"colorBackground\": false,\n          \"colorValue\": false,\n          \"colors\": [\n            \"rgba(50, 172, 45, 0.97)\",\n            \"rgba(237, 129, 40, 0.89)\",\n            \"rgba(245, 54, 54, 0.9)\"\n          ],\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 2,\n          \"editable\": true,\n          \"error\": false,\n          \"format\": \"none\",\n          \"gauge\": {\n            \"maxValue\": 100,\n            \"minValue\": 0,\n            \"show\": false,\n            \"thresholdLabels\": false,\n            \"thresholdMarkers\": true\n          },\n          \"height\": \"1px\",\n          \"hideTimeOverride\": true,\n          \"id\": 12,\n          \"interval\": null,\n          \"isNew\": true,\n          \"links\": [],\n          \"mappingType\": 1,\n          \"mappingTypes\": [{\n            \"name\": \"value to text\",\n            \"value\": 1\n          }, {\n            \"name\": \"range to text\",\n            \"value\": 2\n          }],\n          \"maxDataPoints\": 100,\n          \"nullPointMode\": \"connected\",\n          \"nullText\": null,\n          \"postfix\": \" cores\",\n          \"postfixFontSize\": \"30%\",\n          \"prefix\": \"\",\n          \"prefixFontSize\": \"50%\",\n          \"rangeMaps\": [{\n            \"from\": \"null\",\n            \"text\": \"N/A\",\n            \"to\": \"null\"\n          }],\n          \"span\": 2,\n          \"sparkline\": {\n            \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n            \"full\": false,\n            \"lineColor\": \"rgb(31, 120, 193)\",\n            \"show\": false\n          },\n          \"targets\": [{\n            \"expr\": \"sum (machine_cpu_cores{instance=~\\\"^$instance$\\\"})\",\n            \"interval\": \"10s\",\n            \"intervalFactor\": 1,\n            \"refId\": \"A\",\n            \"step\": 10\n          }],\n          \"thresholds\": \"\",\n          \"timeFrom\": \"1m\",\n          \"title\": \"Total\",\n          \"type\": \"singlestat\",\n          \"valueFontSize\": \"50%\",\n          \"valueMaps\": [{\n            \"op\": \"=\",\n            \"text\": \"N/A\",\n            \"value\": \"null\"\n          }],\n          \"valueName\": \"current\"\n        }, {\n          \"cacheTimeout\": null,\n          \"colorBackground\": false,\n          \"colorValue\": false,\n          \"colors\": [\n            \"rgba(50, 172, 45, 0.97)\",\n            \"rgba(237, 129, 40, 0.89)\",\n            \"rgba(245, 54, 54, 0.9)\"\n          ],\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 2,\n          \"editable\": true,\n          \"error\": false,\n          \"format\": \"bytes\",\n          \"gauge\": {\n            \"maxValue\": 100,\n            \"minValue\": 0,\n            \"show\": false,\n            \"thresholdLabels\": false,\n            \"thresholdMarkers\": true\n          },\n          \"height\": \"1px\",\n          \"hideTimeOverride\": true,\n          \"id\": 13,\n          \"interval\": null,\n          \"isNew\": true,\n          \"links\": [],\n          \"mappingType\": 1,\n          \"mappingTypes\": [{\n            \"name\": \"value to text\",\n            \"value\": 1\n          }, {\n            \"name\": \"range to text\",\n            \"value\": 2\n          }],\n          \"maxDataPoints\": 100,\n          \"nullPointMode\": \"connected\",\n          \"nullText\": null,\n          \"postfix\": \"\",\n          \"postfixFontSize\": \"50%\",\n          \"prefix\": \"\",\n          \"prefixFontSize\": \"50%\",\n          \"rangeMaps\": [{\n            \"from\": \"null\",\n            \"text\": \"N/A\",\n            \"to\": \"null\"\n          }],\n          \"span\": 2,\n          \"sparkline\": {\n            \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n            \"full\": false,\n            \"lineColor\": \"rgb(31, 120, 193)\",\n            \"show\": false\n          },\n          \"targets\": [{\n            \"expr\": \"sum(container_fs_usage_bytes{id=\\\"/\\\",instance=~\\\"^$instance$\\\"})\",\n            \"interval\": \"10s\",\n            \"intervalFactor\": 1,\n            \"refId\": \"A\",\n            \"step\": 10\n          }],\n          \"thresholds\": \"\",\n          \"timeFrom\": \"1m\",\n          \"title\": \"Used\",\n          \"type\": \"singlestat\",\n          \"valueFontSize\": \"50%\",\n          \"valueMaps\": [{\n            \"op\": \"=\",\n            \"text\": \"N/A\",\n            \"value\": \"null\"\n          }],\n          \"valueName\": \"current\"\n        }, {\n          \"cacheTimeout\": null,\n          \"colorBackground\": false,\n          \"colorValue\": false,\n          \"colors\": [\n            \"rgba(50, 172, 45, 0.97)\",\n            \"rgba(237, 129, 40, 0.89)\",\n            \"rgba(245, 54, 54, 0.9)\"\n          ],\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 2,\n          \"editable\": true,\n          \"error\": false,\n          \"format\": \"bytes\",\n          \"gauge\": {\n            \"maxValue\": 100,\n            \"minValue\": 0,\n            \"show\": false,\n            \"thresholdLabels\": false,\n            \"thresholdMarkers\": true\n          },\n          \"height\": \"1px\",\n          \"hideTimeOverride\": true,\n          \"id\": 14,\n          \"interval\": null,\n          \"isNew\": true,\n          \"links\": [],\n          \"mappingType\": 1,\n          \"mappingTypes\": [{\n            \"name\": \"value to text\",\n            \"value\": 1\n          }, {\n            \"name\": \"range to text\",\n            \"value\": 2\n          }],\n          \"maxDataPoints\": 100,\n          \"nullPointMode\": \"connected\",\n          \"nullText\": null,\n          \"postfix\": \"\",\n          \"postfixFontSize\": \"50%\",\n          \"prefix\": \"\",\n          \"prefixFontSize\": \"50%\",\n          \"rangeMaps\": [{\n            \"from\": \"null\",\n            \"text\": \"N/A\",\n            \"to\": \"null\"\n          }],\n          \"span\": 2,\n          \"sparkline\": {\n            \"fillColor\": \"rgba(31, 118, 189, 0.18)\",\n            \"full\": false,\n            \"lineColor\": \"rgb(31, 120, 193)\",\n            \"show\": false\n          },\n          \"targets\": [{\n            \"expr\": \"sum (container_fs_limit_bytes{id=\\\"/\\\",instance=~\\\"^$instance$\\\"})\",\n            \"interval\": \"10s\",\n            \"intervalFactor\": 1,\n            \"refId\": \"A\",\n            \"step\": 10\n          }],\n          \"thresholds\": \"\",\n          \"timeFrom\": \"1m\",\n          \"title\": \"Total\",\n          \"type\": \"singlestat\",\n          \"valueFontSize\": \"50%\",\n          \"valueMaps\": [{\n            \"op\": \"=\",\n            \"text\": \"N/A\",\n            \"value\": \"null\"\n          }],\n          \"valueName\": \"current\"\n        }, {\n          \"aliasColors\": {},\n          \"bars\": false,\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 2,\n          \"editable\": true,\n          \"error\": false,\n          \"fill\": 1,\n          \"grid\": {\n            \"threshold1\": null,\n            \"threshold1Color\": \"rgba(216, 200, 27, 0.27)\",\n            \"threshold2\": null,\n            \"threshold2Color\": \"rgba(234, 112, 112, 0.22)\",\n            \"thresholdLine\": false\n          },\n          \"height\": \"200px\",\n          \"id\": 32,\n          \"isNew\": true,\n          \"legend\": {\n            \"alignAsTable\": true,\n            \"avg\": true,\n            \"current\": true,\n            \"max\": false,\n            \"min\": false,\n            \"rightSide\": true,\n            \"show\": true,\n            \"sideWidth\": 200,\n            \"sort\": \"current\",\n            \"sortDesc\": true,\n            \"total\": false,\n            \"values\": true\n          },\n          \"lines\": true,\n          \"linewidth\": 2,\n          \"links\": [],\n          \"nullPointMode\": \"connected\",\n          \"percentage\": false,\n          \"pointradius\": 5,\n          \"points\": false,\n          \"renderer\": \"flot\",\n          \"seriesOverrides\": [],\n          \"span\": 12,\n          \"stack\": false,\n          \"steppedLine\": false,\n          \"targets\": [{\n            \"expr\": \"sum(rate(container_network_receive_bytes_total{instance=~\\\"^$instance$\\\",namespace=~\\\"^$namespace$\\\"}[1m]))\",\n            \"interval\": \"\",\n            \"intervalFactor\": 2,\n            \"legendFormat\": \"receive\",\n            \"metric\": \"network\",\n            \"refId\": \"A\",\n            \"step\": 240\n          }, {\n            \"expr\": \"- sum(rate(container_network_transmit_bytes_total{instance=~\\\"^$instance$\\\",namespace=~\\\"^$namespace$\\\"}[1m]))\",\n            \"interval\": \"\",\n            \"intervalFactor\": 2,\n            \"legendFormat\": \"transmit\",\n            \"metric\": \"network\",\n            \"refId\": \"B\",\n            \"step\": 240\n          }],\n          \"timeFrom\": null,\n          \"timeShift\": null,\n          \"title\": \"Network\",\n          \"tooltip\": {\n            \"msResolution\": false,\n            \"shared\": true,\n            \"sort\": 0,\n            \"value_type\": \"cumulative\"\n          },\n          \"transparent\": false,\n          \"type\": \"graph\",\n          \"xaxis\": {\n            \"show\": true\n          },\n          \"yaxes\": [{\n            \"format\": \"Bps\",\n            \"label\": \"transmit / receive\",\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }, {\n            \"format\": \"Bps\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": false\n          }]\n        }],\n        \"showTitle\": true,\n        \"title\": \"all pods\"\n      }, {\n        \"collapse\": false,\n        \"editable\": true,\n        \"height\": \"250px\",\n        \"panels\": [{\n          \"aliasColors\": {},\n          \"bars\": false,\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 3,\n          \"editable\": true,\n          \"error\": false,\n          \"fill\": 0,\n          \"grid\": {\n            \"threshold1\": null,\n            \"threshold1Color\": \"rgba(216, 200, 27, 0.27)\",\n            \"threshold2\": null,\n            \"threshold2Color\": \"rgba(234, 112, 112, 0.22)\"\n          },\n          \"height\": \"\",\n          \"id\": 17,\n          \"isNew\": true,\n          \"legend\": {\n            \"alignAsTable\": true,\n            \"avg\": true,\n            \"current\": true,\n            \"hideEmpty\": true,\n            \"hideZero\": true,\n            \"max\": false,\n            \"min\": false,\n            \"rightSide\": true,\n            \"show\": true,\n            \"sideWidth\": null,\n            \"sort\": \"current\",\n            \"sortDesc\": true,\n            \"total\": false,\n            \"values\": true\n          },\n          \"lines\": true,\n          \"linewidth\": 2,\n          \"links\": [],\n          \"nullPointMode\": \"connected\",\n          \"percentage\": false,\n          \"pointradius\": 5,\n          \"points\": false,\n          \"renderer\": \"flot\",\n          \"seriesOverrides\": [],\n          \"span\": 12,\n          \"stack\": false,\n          \"steppedLine\": false,\n          \"targets\": [{\n            \"expr\": \"sum(rate(container_cpu_usage_seconds_total{image!=\\\"\\\",name=~\\\"^k8s_.*\\\",instance=~\\\"^$instance$\\\",namespace=~\\\"^$namespace$\\\"}[1m])) by (pod_name)\",\n            \"interval\": \"\",\n            \"intervalFactor\": 2,\n            \"legendFormat\": \"{{ pod_name }}\",\n            \"metric\": \"container_cpu\",\n            \"refId\": \"A\",\n            \"step\": 240\n          }],\n          \"timeFrom\": null,\n          \"timeShift\": null,\n          \"title\": \"Cpu Usage\",\n          \"tooltip\": {\n            \"msResolution\": true,\n            \"shared\": false,\n            \"sort\": 2,\n            \"value_type\": \"cumulative\"\n          },\n          \"transparent\": false,\n          \"type\": \"graph\",\n          \"xaxis\": {\n            \"show\": true\n          },\n          \"yaxes\": [{\n            \"format\": \"none\",\n            \"label\": \"cores\",\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }, {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": false\n          }]\n        }, {\n          \"aliasColors\": {},\n          \"bars\": false,\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 2,\n          \"editable\": true,\n          \"error\": false,\n          \"fill\": 0,\n          \"grid\": {\n            \"threshold1\": null,\n            \"threshold1Color\": \"rgba(216, 200, 27, 0.27)\",\n            \"threshold2\": null,\n            \"threshold2Color\": \"rgba(234, 112, 112, 0.22)\"\n          },\n          \"id\": 33,\n          \"isNew\": true,\n          \"legend\": {\n            \"alignAsTable\": true,\n            \"avg\": true,\n            \"current\": true,\n            \"hideEmpty\": true,\n            \"hideZero\": true,\n            \"max\": false,\n            \"min\": false,\n            \"rightSide\": true,\n            \"show\": true,\n            \"sideWidth\": null,\n            \"sort\": \"current\",\n            \"sortDesc\": true,\n            \"total\": false,\n            \"values\": true\n          },\n          \"lines\": true,\n          \"linewidth\": 2,\n          \"links\": [],\n          \"nullPointMode\": \"null\",\n          \"percentage\": false,\n          \"pointradius\": 5,\n          \"points\": false,\n          \"renderer\": \"flot\",\n          \"seriesOverrides\": [],\n          \"span\": 12,\n          \"stack\": false,\n          \"steppedLine\": false,\n          \"targets\": [{\n            \"expr\": \"sum (container_memory_working_set_bytes{image!=\\\"\\\",name=~\\\"^k8s_.*\\\",instance=~\\\"^$instance$\\\",namespace=~\\\"^$namespace$\\\"}) by (pod_name)\",\n            \"interval\": \"\",\n            \"intervalFactor\": 2,\n            \"legendFormat\": \"{{ pod_name }}\",\n            \"metric\": \"\",\n            \"refId\": \"A\",\n            \"step\": 240\n          }],\n          \"timeFrom\": null,\n          \"timeShift\": null,\n          \"title\": \"Memory Working Set\",\n          \"tooltip\": {\n            \"msResolution\": false,\n            \"shared\": false,\n            \"sort\": 2,\n            \"value_type\": \"cumulative\"\n          },\n          \"type\": \"graph\",\n          \"xaxis\": {\n            \"show\": true\n          },\n          \"yaxes\": [{\n            \"format\": \"bytes\",\n            \"label\": \"used\",\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }, {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": false\n          }]\n        }, {\n          \"aliasColors\": {},\n          \"bars\": false,\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 2,\n          \"editable\": true,\n          \"error\": false,\n          \"fill\": 1,\n          \"grid\": {\n            \"threshold1\": null,\n            \"threshold1Color\": \"rgba(216, 200, 27, 0.27)\",\n            \"threshold2\": null,\n            \"threshold2Color\": \"rgba(234, 112, 112, 0.22)\"\n          },\n          \"id\": 16,\n          \"isNew\": true,\n          \"legend\": {\n            \"alignAsTable\": true,\n            \"avg\": true,\n            \"current\": true,\n            \"hideEmpty\": true,\n            \"hideZero\": true,\n            \"max\": false,\n            \"min\": false,\n            \"rightSide\": true,\n            \"show\": true,\n            \"sideWidth\": 200,\n            \"sort\": \"avg\",\n            \"sortDesc\": true,\n            \"total\": false,\n            \"values\": true\n          },\n          \"lines\": true,\n          \"linewidth\": 2,\n          \"links\": [],\n          \"nullPointMode\": \"null\",\n          \"percentage\": false,\n          \"pointradius\": 5,\n          \"points\": false,\n          \"renderer\": \"flot\",\n          \"seriesOverrides\": [],\n          \"span\": 12,\n          \"stack\": false,\n          \"steppedLine\": false,\n          \"targets\": [{\n            \"expr\": \"sum (rate (container_network_receive_bytes_total{image!=\\\"\\\",name=~\\\"^k8s_.*\\\",instance=~\\\"^$instance$\\\",namespace=~\\\"^$namespace$\\\"}[1m])) by (pod_name)\",\n            \"interval\": \"\",\n            \"intervalFactor\": 2,\n            \"legendFormat\": \"{{ pod_name }} < in\",\n            \"metric\": \"network\",\n            \"refId\": \"A\",\n            \"step\": 240\n          }, {\n            \"expr\": \"- sum (rate (container_network_transmit_bytes_total{image!=\\\"\\\",name=~\\\"^k8s_.*\\\",instance=~\\\"^$instance$\\\",namespace=~\\\"^$namespace$\\\"}[1m])) by (pod_name)\",\n            \"interval\": \"\",\n            \"intervalFactor\": 2,\n            \"legendFormat\": \"{{ pod_name }} > out\",\n            \"metric\": \"network\",\n            \"refId\": \"B\",\n            \"step\": 240\n          }],\n          \"timeFrom\": null,\n          \"timeShift\": null,\n          \"title\": \"Network\",\n          \"tooltip\": {\n            \"msResolution\": false,\n            \"shared\": false,\n            \"sort\": 2,\n            \"value_type\": \"cumulative\"\n          },\n          \"type\": \"graph\",\n          \"xaxis\": {\n            \"show\": true\n          },\n          \"yaxes\": [{\n            \"format\": \"Bps\",\n            \"label\": \"transmit / receive\",\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }, {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": false\n          }]\n        }, {\n          \"aliasColors\": {},\n          \"bars\": false,\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"decimals\": 2,\n          \"editable\": true,\n          \"error\": false,\n          \"fill\": 1,\n          \"grid\": {\n            \"threshold1\": null,\n            \"threshold1Color\": \"rgba(216, 200, 27, 0.27)\",\n            \"threshold2\": null,\n            \"threshold2Color\": \"rgba(234, 112, 112, 0.22)\"\n          },\n          \"id\": 34,\n          \"isNew\": true,\n          \"legend\": {\n            \"alignAsTable\": true,\n            \"avg\": true,\n            \"current\": true,\n            \"hideEmpty\": true,\n            \"hideZero\": true,\n            \"max\": false,\n            \"min\": false,\n            \"rightSide\": true,\n            \"show\": true,\n            \"sideWidth\": 200,\n            \"sort\": \"current\",\n            \"sortDesc\": true,\n            \"total\": false,\n            \"values\": true\n          },\n          \"lines\": true,\n          \"linewidth\": 2,\n          \"links\": [],\n          \"nullPointMode\": \"null\",\n          \"percentage\": false,\n          \"pointradius\": 5,\n          \"points\": false,\n          \"renderer\": \"flot\",\n          \"seriesOverrides\": [],\n          \"span\": 12,\n          \"stack\": false,\n          \"steppedLine\": false,\n          \"targets\": [{\n            \"expr\": \"sum(container_fs_usage_bytes{image!=\\\"\\\",name=~\\\"^k8s_.*\\\",instance=~\\\"^$instance$\\\",namespace=~\\\"^$namespace$\\\"}) by (pod_name)\",\n            \"interval\": \"\",\n            \"intervalFactor\": 2,\n            \"legendFormat\": \"{{ pod_name }}\",\n            \"metric\": \"network\",\n            \"refId\": \"A\",\n            \"step\": 240\n          }],\n          \"timeFrom\": null,\n          \"timeShift\": null,\n          \"title\": \"Filesystem\",\n          \"tooltip\": {\n            \"msResolution\": false,\n            \"shared\": false,\n            \"sort\": 2,\n            \"value_type\": \"cumulative\"\n          },\n          \"type\": \"graph\",\n          \"xaxis\": {\n            \"show\": true\n          },\n          \"yaxes\": [{\n            \"format\": \"bytes\",\n            \"label\": \"used\",\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": true\n          }, {\n            \"format\": \"short\",\n            \"label\": null,\n            \"logBase\": 1,\n            \"max\": null,\n            \"min\": null,\n            \"show\": false\n          }]\n        }],\n        \"showTitle\": true,\n        \"title\": \"each pod\"\n      }],\n      \"time\": {\n        \"from\": \"now-3d\",\n        \"to\": \"now\"\n      },\n      \"timepicker\": {\n        \"refresh_intervals\": [\n          \"5s\",\n          \"10s\",\n          \"30s\",\n          \"1m\",\n          \"5m\",\n          \"15m\",\n          \"30m\",\n          \"1h\",\n          \"2h\",\n          \"1d\"\n        ],\n        \"time_options\": [\n          \"5m\",\n          \"15m\",\n          \"1h\",\n          \"6h\",\n          \"12h\",\n          \"24h\",\n          \"2d\",\n          \"7d\",\n          \"30d\"\n        ]\n      },\n      \"templating\": {\n        \"list\": [{\n          \"allValue\": \".*\",\n          \"current\": {},\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"hide\": 0,\n          \"includeAll\": true,\n          \"label\": \"Instance\",\n          \"multi\": false,\n          \"name\": \"instance\",\n          \"options\": [],\n          \"query\": \"label_values(instance)\",\n          \"refresh\": 1,\n          \"regex\": \"\",\n          \"type\": \"query\"\n        }, {\n          \"current\": {},\n          \"datasource\": \"${DS_PROMETHEUS}\",\n          \"hide\": 0,\n          \"includeAll\": true,\n          \"label\": \"Namespace\",\n          \"multi\": true,\n          \"name\": \"namespace\",\n          \"options\": [],\n          \"query\": \"label_values(namespace)\",\n          \"refresh\": 1,\n          \"regex\": \"\",\n          \"type\": \"query\"\n        }]\n      },\n      \"annotations\": {\n        \"list\": []\n      },\n      \"refresh\": false,\n      \"schemaVersion\": 12,\n      \"version\": 8,\n      \"links\": [],\n      \"gnetId\": 737\n    }\n  prometheus-datasource.json: |\n    {\n      \"name\": \"prometheus\",\n      \"type\": \"prometheus\",\n      \"url\": \"http://prometheus:9090\",\n      \"access\": \"proxy\",\n      \"basicAuth\": false\n    }\nkind: ConfigMap\nmetadata:\n  creationTimestamp: null\n  name: grafana-import-dashboards\n  namespace: monitoring\n"
  },
  {
    "path": "manifests/monitoring/grafana-deployment.yaml",
    "content": "apiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n  name: grafana-core\n  namespace: monitoring\n  labels:\n    app: grafana\n    component: core\nspec:\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: grafana\n        component: core\n    spec:\n      containers:\n      - image: grafana/grafana:3.1.1\n        name: grafana-core\n        # env:\n        resources:\n          # keep request = limit to keep this container in guaranteed class\n          limits:\n            cpu: 100m\n            memory: 100Mi\n          requests:\n            cpu: 100m\n            memory: 100Mi\n        env:\n          # The following env variables set up basic auth twith the default admin user and admin password.\n          - name: GF_AUTH_BASIC_ENABLED\n            value: \"true\"\n          - name: GF_AUTH_ANONYMOUS_ENABLED\n            value: \"false\"\n          # - name: GF_AUTH_ANONYMOUS_ORG_ROLE\n          #   value: Admin\n          # does not really work, because of template variables in exported dashboards:\n          # - name: GF_DASHBOARDS_JSON_ENABLED\n          #   value: \"true\"\n        volumeMounts:\n        - name: grafana-persistent-storage\n          mountPath: /var\n      volumes:\n      - name: grafana-persistent-storage\n        emptyDir: {}\n"
  },
  {
    "path": "manifests/monitoring/grafana-job.yaml",
    "content": "apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: grafana-import-dashboards\n  namespace: monitoring\n  labels:\n    app: grafana\n    component: import-dashboards\nspec:\n  template:\n    metadata:\n      name: grafana-import-dashboards\n      labels:\n        app: grafana\n        component: import-dashboards\n    spec:\n      containers:\n      - name: grafana-import-dashboards\n        image: giantswarm/tiny-tools\n        command: [\"/bin/sh\", \"-c\"]\n        workingDir: /opt/grafana-import-dashboards\n        args:\n          # FIXME use kubernetes probe instead of \"until curl\"\n          - >\n            until $(curl --silent --fail --show-error --output /dev/null http://admin:admin@grafana:3000/api/datasources); do\n              printf '.' ; sleep 1 ;\n            done ;\n            for file in *-datasource.json ; do\n              if [ -e \"$file\" ] ; then\n                echo \"importing $file\" &&\n                curl --silent --fail --show-error \\\n                  --request POST http://admin:admin@grafana:3000/api/datasources \\\n                  --header \"Content-Type: application/json\" \\\n                  --data-binary \"@$file\" ;\n                echo \"\" ;\n              fi\n            done ;\n            for file in *-dashboard.json ; do\n              if [ -e \"$file\" ] ; then\n                echo \"importing $file\" &&\n                cat \"$file\" \\\n                | xargs -0 printf '{\"dashboard\":%s,\"overwrite\":true,\"inputs\":[{\"name\":\"DS_PROMETHEUS\",\"type\":\"datasource\",\"pluginId\":\"prometheus\",\"value\":\"prometheus\"}]}' \\\n                | jq -c '.' \\\n                | curl --silent --fail --show-error \\\n                  --request POST http://admin:admin@grafana:3000/api/dashboards/import \\\n                  --header \"Content-Type: application/json\" \\\n                  --data-binary \"@-\" ;\n                echo \"\" ;\n              fi\n            done\n\n        volumeMounts:\n        - name: config-volume\n          mountPath: /opt/grafana-import-dashboards\n      restartPolicy: Never\n      volumes:\n      - name: config-volume\n        configMap:\n          name: grafana-import-dashboards\n"
  },
  {
    "path": "manifests/monitoring/grafana-service.yaml",
    "content": "apiVersion: v1\nkind: Service\nmetadata:\n  name: grafana\n  namespace: monitoring\n  labels:\n    app: grafana\n    component: core\nspec:\n  type: NodePort\n  ports:\n    - port: 3000\n  selector:\n    app: grafana\n    component: core\n"
  },
  {
    "path": "manifests/monitoring/prometheus.yaml",
    "content": "apiVersion: v1\nkind: Namespace\nmetadata:\n  name: monitoring\n---\nkind: ConfigMap\nmetadata:\n  name: prometheus-config\n  namespace: monitoring\napiVersion: v1\ndata:\n  prometheus.yml: |-\n    global:\n      scrape_interval: 30s\n      scrape_timeout: 30s\n    scrape_configs:\n    - job_name: 'kubernetes-cluster'\n      scheme: https\n      tls_config:\n        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n      bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n      kubernetes_sd_configs:\n      - api_servers:\n        - 'https://kubernetes.default.svc'\n        in_cluster: true\n        role: apiserver\n    - job_name: 'kubernetes-nodes'\n      scheme: https\n      tls_config:\n        ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n        insecure_skip_verify: true\n      kubernetes_sd_configs:\n      - api_servers:\n        - 'https://kubernetes.default.svc'\n        in_cluster: true\n        role: node\n      relabel_configs:\n      - action: labelmap\n        regex: __meta_kubernetes_node_label_(.+)\n    - job_name: 'kubernetes-service-endpoints'\n      scheme: http\n      kubernetes_sd_configs:\n      - api_servers:\n        - 'https://kubernetes.default.svc'\n        in_cluster: true\n        role: endpoint\n      relabel_configs:\n      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]\n        action: keep\n        regex: true\n      - source_labels: [__meta_kubernetes_service_namespace]\n        action: replace\n        target_label: kubernetes_namespace\n      - source_labels: [__meta_kubernetes_service_name]\n        action: replace\n        target_label: kubernetes_name\n    - job_name: 'kubernetes-services'\n      scheme: http\n      kubernetes_sd_configs:\n      - api_servers:\n        - 'https://kubernetes.default.svc'\n        in_cluster: true\n        role: service\n      relabel_configs:\n      - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]\n        action: keep\n        regex: true\n      - source_labels: [__meta_kubernetes_service_namespace]\n        target_label: kubernetes_namespace\n      - source_labels: [__meta_kubernetes_service_name]\n        target_label: kubernetes_name\n    - job_name: 'kubernetes-pods'\n      scheme: http\n      kubernetes_sd_configs:\n      - api_servers:\n        - 'https://kubernetes.default.svc'\n        in_cluster: true\n        role: pod\n      relabel_configs:\n      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]\n        action: keep\n        regex: true\n      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]\n        action: replace\n        target_label: __metrics_path__\n        regex: (.+)\n      - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]\n        action: replace\n        regex: (.+):(?:\\\\d+);(\\\\d+)\n        replacement: ${1}:${2}\n        target_label: __address__\n      - action: labelmap\n        regex: __meta_kubernetes_pod_label_(.+)\n      - source_labels: [__meta_kubernetes_pod_namespace]\n        action: replace\n        target_label: kubernetes_namespace\n      - source_labels: [__meta_kubernetes_pod_name]\n        action: replace\n        target_label: kubernetes_pod_name\n      - source_labels: [__meta_kubernetes_pod_node_name]\n        action: replace\n        target_label: kubernetes_pod_node_name\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: prometheus\n  namespace: monitoring\nspec:\n  ports:\n  - port: 9090\n    protocol: TCP\n    targetPort: 9090\n  selector:\n    name: prometheus\n  type: NodePort\n---\napiVersion: extensions/v1beta1\nkind: Deployment\nmetadata:\n  labels:\n    name: prometheus\n  name: prometheus\n  namespace: monitoring\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      name: prometheus\n  strategy:\n    rollingUpdate:\n      maxSurge: 1\n      maxUnavailable: 1\n    type: RollingUpdate\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        name: prometheus\n      annotations:\n        prometheus.io/scrape: \"true\"\n        prometheus.io/port: \"9090\"\n    spec:\n      containers:\n      - args:\n        - -config.file=/etc/prometheus/prometheus.yml\n        - -storage.local.path=/prometheus\n        - -storage.local.retention=24h\n        command:\n        - /bin/prometheus\n        image: quay.io/prometheus/prometheus:v1.1.3\n        imagePullPolicy: IfNotPresent\n        name: prometheus\n        ports:\n        - containerPort: 9090\n          protocol: TCP\n        resources:\n          limits:\n            cpu: 500m\n            memory: 2500Mi\n          requests:\n            cpu: 100m\n            memory: 100Mi\n        volumeMounts:\n        - mountPath: /prometheus\n          name: data\n        - mountPath: /etc/prometheus\n          name: config-volume\n      restartPolicy: Always\n      securityContext: {}\n      terminationGracePeriodSeconds: 30\n      volumes:\n      - emptyDir: {}\n        name: data\n      - configMap:\n          name: prometheus-config\n        name: config-volume\n"
  },
  {
    "path": "manifests/nats/nats-cluster.yaml",
    "content": "apiVersion: \"nats.io/v1alpha2\"\nkind: \"NatsCluster\"\nmetadata:\n  name: \"nats\"\nspec:\n  size: 2\n  version: \"1.1.0\"\n"
  },
  {
    "path": "manifests/ui/README.md",
    "content": "# Kubeless UI\n\nYou can find the latest manifest for deploying the UI in the releases page of the kubeless-ui repository:\nhttps://github.com/kubeless/kubeless-ui/releases\n"
  },
  {
    "path": "pkg/apis/kubeless/register.go",
    "content": "package kubeless\n\nconst (\n\t// GroupName is ApiGroup for the Kubeless API\n\tGroupName = \"kubeless.io\"\n)\n"
  },
  {
    "path": "pkg/apis/kubeless/v1beta1/doc.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// +k8s:deepcopy-gen=package\n\n// Package v1beta1 is the v1beta1 version of the Kubeless API\n// +groupName=kubeless.io\npackage v1beta1\n"
  },
  {
    "path": "pkg/apis/kubeless/v1beta1/function.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage v1beta1\n\nimport (\n\tappsv1 \"k8s.io/api/apps/v1\"\n\t\"k8s.io/api/autoscaling/v2beta1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\n// +genclient\n// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n\n// Function object\ntype Function struct {\n\tmetav1.TypeMeta   `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\tSpec              FunctionSpec `json:\"spec\"`\n}\n\n// FunctionSpec contains func specification\ntype FunctionSpec struct {\n\tHandler                 string                          `json:\"handler\"`               // Function handler: \"file.function\"\n\tFunction                string                          `json:\"function\"`              // Function file content or URL of the function\n\tFunctionContentType     string                          `json:\"function-content-type\"` // Function file content type (plain text, url, base64, zip or compressedtar)\n\tChecksum                string                          `json:\"checksum\"`              // Checksum of the file\n\tRuntime                 string                          `json:\"runtime\"`               // Function runtime to use\n\tTimeout                 string                          `json:\"timeout\"`               // Maximum timeout for the function to complete its execution\n\tDeps                    string                          `json:\"deps\"`                  // Function dependencies\n\tDeployment              appsv1.Deployment               `json:\"deployment\" protobuf:\"bytes,3,opt,name=template\"`\n\tServiceSpec             v1.ServiceSpec                  `json:\"service\"`\n\tHorizontalPodAutoscaler v2beta1.HorizontalPodAutoscaler `json:\"horizontalPodAutoscaler\" protobuf:\"bytes,3,opt,name=horizontalPodAutoscaler\"`\n}\n\n// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n\n// FunctionList contains map of functions\ntype FunctionList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\n\t// Items is a list of third party objects\n\tItems []*Function `json:\"items\"`\n}\n"
  },
  {
    "path": "pkg/apis/kubeless/v1beta1/register.go",
    "content": "package v1beta1\n\nimport (\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\n\tkubeless \"github.com/kubeless/kubeless/pkg/apis/kubeless\"\n)\n\n// SchemeGroupVersion is group version used to register these objects\nvar SchemeGroupVersion = schema.GroupVersion{Group: kubeless.GroupName, Version: \"v1beta1\"}\n\n// Kind takes an unqualified kind and returns back a Group qualified GroupKind\nfunc Kind(kind string) schema.GroupKind {\n\treturn SchemeGroupVersion.WithKind(kind).GroupKind()\n}\n\n// Resource takes an unqualified resource and returns a Group qualified GroupResource\nfunc Resource(resource string) schema.GroupResource {\n\treturn SchemeGroupVersion.WithResource(resource).GroupResource()\n}\n\nvar (\n\t// SchemeBuilder collects the scheme builder functions for the Kubeless API\n\tSchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)\n\n\t// AddToScheme applies the SchemeBuilder functions to a specified scheme\n\tAddToScheme = SchemeBuilder.AddToScheme\n)\n\n// Adds the list of known types to Scheme.\nfunc addKnownTypes(scheme *runtime.Scheme) error {\n\tscheme.AddKnownTypes(SchemeGroupVersion,\n\t\t&Function{},\n\t\t&FunctionList{},\n\t)\n\tmetav1.AddToGroupVersion(scheme, SchemeGroupVersion)\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/apis/kubeless/v1beta1/zz_generated.deepcopy.go",
    "content": "// +build !ignore_autogenerated\n\n/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// This file was autogenerated by deepcopy-gen. Do not edit it manually!\n\npackage v1beta1\n\nimport (\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n)\n\n// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\nfunc (in *Function) DeepCopyInto(out *Function) {\n\t*out = *in\n\tout.TypeMeta = in.TypeMeta\n\tin.ObjectMeta.DeepCopyInto(&out.ObjectMeta)\n\tin.Spec.DeepCopyInto(&out.Spec)\n\treturn\n}\n\n// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Function.\nfunc (in *Function) DeepCopy() *Function {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Function)\n\tin.DeepCopyInto(out)\n\treturn out\n}\n\n// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.\nfunc (in *Function) DeepCopyObject() runtime.Object {\n\tif c := in.DeepCopy(); c != nil {\n\t\treturn c\n\t}\n\treturn nil\n}\n\n// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\nfunc (in *FunctionList) DeepCopyInto(out *FunctionList) {\n\t*out = *in\n\tout.TypeMeta = in.TypeMeta\n\tout.ListMeta = in.ListMeta\n\tif in.Items != nil {\n\t\tin, out := &in.Items, &out.Items\n\t\t*out = make([]*Function, len(*in))\n\t\tfor i := range *in {\n\t\t\tif (*in)[i] == nil {\n\t\t\t\t(*out)[i] = nil\n\t\t\t} else {\n\t\t\t\t(*out)[i] = new(Function)\n\t\t\t\t(*in)[i].DeepCopyInto((*out)[i])\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionList.\nfunc (in *FunctionList) DeepCopy() *FunctionList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(FunctionList)\n\tin.DeepCopyInto(out)\n\treturn out\n}\n\n// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.\nfunc (in *FunctionList) DeepCopyObject() runtime.Object {\n\tif c := in.DeepCopy(); c != nil {\n\t\treturn c\n\t}\n\treturn nil\n}\n\n// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\nfunc (in *FunctionSpec) DeepCopyInto(out *FunctionSpec) {\n\t*out = *in\n\tin.Deployment.DeepCopyInto(&out.Deployment)\n\tin.ServiceSpec.DeepCopyInto(&out.ServiceSpec)\n\tin.HorizontalPodAutoscaler.DeepCopyInto(&out.HorizontalPodAutoscaler)\n\treturn\n}\n\n// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FunctionSpec.\nfunc (in *FunctionSpec) DeepCopy() *FunctionSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(FunctionSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}\n"
  },
  {
    "path": "pkg/client/clientset/versioned/clientset.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage versioned\n\nimport (\n\tglog \"github.com/golang/glog\"\n\tkubelessv1beta1 \"github.com/kubeless/kubeless/pkg/client/clientset/versioned/typed/kubeless/v1beta1\"\n\tdiscovery \"k8s.io/client-go/discovery\"\n\trest \"k8s.io/client-go/rest\"\n\tflowcontrol \"k8s.io/client-go/util/flowcontrol\"\n)\n\ntype Interface interface {\n\tDiscovery() discovery.DiscoveryInterface\n\tKubelessV1beta1() kubelessv1beta1.KubelessV1beta1Interface\n\t// Deprecated: please explicitly pick a version if possible.\n\tKubeless() kubelessv1beta1.KubelessV1beta1Interface\n}\n\n// Clientset contains the clients for groups. Each group has exactly one\n// version included in a Clientset.\ntype Clientset struct {\n\t*discovery.DiscoveryClient\n\tkubelessV1beta1 *kubelessv1beta1.KubelessV1beta1Client\n}\n\n// KubelessV1beta1 retrieves the KubelessV1beta1Client\nfunc (c *Clientset) KubelessV1beta1() kubelessv1beta1.KubelessV1beta1Interface {\n\treturn c.kubelessV1beta1\n}\n\n// Deprecated: Kubeless retrieves the default version of KubelessClient.\n// Please explicitly pick a version.\nfunc (c *Clientset) Kubeless() kubelessv1beta1.KubelessV1beta1Interface {\n\treturn c.kubelessV1beta1\n}\n\n// Discovery retrieves the DiscoveryClient\nfunc (c *Clientset) Discovery() discovery.DiscoveryInterface {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.DiscoveryClient\n}\n\n// NewForConfig creates a new Clientset for the given config.\nfunc NewForConfig(c *rest.Config) (*Clientset, error) {\n\tconfigShallowCopy := *c\n\tif configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {\n\t\tconfigShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)\n\t}\n\tvar cs Clientset\n\tvar err error\n\tcs.kubelessV1beta1, err = kubelessv1beta1.NewForConfig(&configShallowCopy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)\n\tif err != nil {\n\t\tglog.Errorf(\"failed to create the DiscoveryClient: %v\", err)\n\t\treturn nil, err\n\t}\n\treturn &cs, nil\n}\n\n// NewForConfigOrDie creates a new Clientset for the given config and\n// panics if there is an error in the config.\nfunc NewForConfigOrDie(c *rest.Config) *Clientset {\n\tvar cs Clientset\n\tcs.kubelessV1beta1 = kubelessv1beta1.NewForConfigOrDie(c)\n\n\tcs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)\n\treturn &cs\n}\n\n// New creates a new Clientset for the given RESTClient.\nfunc New(c rest.Interface) *Clientset {\n\tvar cs Clientset\n\tcs.kubelessV1beta1 = kubelessv1beta1.New(c)\n\n\tcs.DiscoveryClient = discovery.NewDiscoveryClient(c)\n\treturn &cs\n}\n"
  },
  {
    "path": "pkg/client/clientset/versioned/doc.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n// This package has the automatically generated clientset.\npackage versioned\n"
  },
  {
    "path": "pkg/client/clientset/versioned/fake/clientset_generated.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage fake\n\nimport (\n\tclientset \"github.com/kubeless/kubeless/pkg/client/clientset/versioned\"\n\tkubelessv1beta1 \"github.com/kubeless/kubeless/pkg/client/clientset/versioned/typed/kubeless/v1beta1\"\n\tfakekubelessv1beta1 \"github.com/kubeless/kubeless/pkg/client/clientset/versioned/typed/kubeless/v1beta1/fake\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\t\"k8s.io/client-go/discovery\"\n\tfakediscovery \"k8s.io/client-go/discovery/fake\"\n\t\"k8s.io/client-go/testing\"\n)\n\n// NewSimpleClientset returns a clientset that will respond with the provided objects.\n// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,\n// without applying any validations and/or defaults. It shouldn't be considered a replacement\n// for a real clientset and is mostly useful in simple unit tests.\nfunc NewSimpleClientset(objects ...runtime.Object) *Clientset {\n\to := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())\n\tfor _, obj := range objects {\n\t\tif err := o.Add(obj); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tfakePtr := testing.Fake{}\n\tfakePtr.AddReactor(\"*\", \"*\", testing.ObjectReaction(o))\n\tfakePtr.AddWatchReactor(\"*\", testing.DefaultWatchReactor(watch.NewFake(), nil))\n\n\treturn &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}}\n}\n\n// Clientset implements clientset.Interface. Meant to be embedded into a\n// struct to get a default implementation. This makes faking out just the method\n// you want to test easier.\ntype Clientset struct {\n\ttesting.Fake\n\tdiscovery *fakediscovery.FakeDiscovery\n}\n\nfunc (c *Clientset) Discovery() discovery.DiscoveryInterface {\n\treturn c.discovery\n}\n\nvar _ clientset.Interface = &Clientset{}\n\n// KubelessV1beta1 retrieves the KubelessV1beta1Client\nfunc (c *Clientset) KubelessV1beta1() kubelessv1beta1.KubelessV1beta1Interface {\n\treturn &fakekubelessv1beta1.FakeKubelessV1beta1{Fake: &c.Fake}\n}\n\n// Kubeless retrieves the KubelessV1beta1Client\nfunc (c *Clientset) Kubeless() kubelessv1beta1.KubelessV1beta1Interface {\n\treturn &fakekubelessv1beta1.FakeKubelessV1beta1{Fake: &c.Fake}\n}\n"
  },
  {
    "path": "pkg/client/clientset/versioned/fake/doc.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n// This package has the automatically generated fake clientset.\npackage fake\n"
  },
  {
    "path": "pkg/client/clientset/versioned/fake/register.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage fake\n\nimport (\n\tkubelessv1beta1 \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\tserializer \"k8s.io/apimachinery/pkg/runtime/serializer\"\n)\n\nvar scheme = runtime.NewScheme()\nvar codecs = serializer.NewCodecFactory(scheme)\nvar parameterCodec = runtime.NewParameterCodec(scheme)\n\nfunc init() {\n\tv1.AddToGroupVersion(scheme, schema.GroupVersion{Version: \"v1\"})\n\tAddToScheme(scheme)\n}\n\n// AddToScheme adds all types of this clientset into the given scheme. This allows composition\n// of clientsets, like in:\n//\n//   import (\n//     \"k8s.io/client-go/kubernetes\"\n//     clientsetscheme \"k8s.io/client-go/kuberentes/scheme\"\n//     aggregatorclientsetscheme \"k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme\"\n//   )\n//\n//   kclientset, _ := kubernetes.NewForConfig(c)\n//   aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)\n//\n// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types\n// correctly.\nfunc AddToScheme(scheme *runtime.Scheme) {\n\tkubelessv1beta1.AddToScheme(scheme)\n\n}\n"
  },
  {
    "path": "pkg/client/clientset/versioned/scheme/doc.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n// This package contains the scheme of the automatically generated clientset.\npackage scheme\n"
  },
  {
    "path": "pkg/client/clientset/versioned/scheme/register.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage scheme\n\nimport (\n\tkubelessv1beta1 \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\tserializer \"k8s.io/apimachinery/pkg/runtime/serializer\"\n)\n\nvar Scheme = runtime.NewScheme()\nvar Codecs = serializer.NewCodecFactory(Scheme)\nvar ParameterCodec = runtime.NewParameterCodec(Scheme)\n\nfunc init() {\n\tv1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: \"v1\"})\n\tAddToScheme(Scheme)\n}\n\n// AddToScheme adds all types of this clientset into the given scheme. This allows composition\n// of clientsets, like in:\n//\n//   import (\n//     \"k8s.io/client-go/kubernetes\"\n//     clientsetscheme \"k8s.io/client-go/kuberentes/scheme\"\n//     aggregatorclientsetscheme \"k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme\"\n//   )\n//\n//   kclientset, _ := kubernetes.NewForConfig(c)\n//   aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)\n//\n// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types\n// correctly.\nfunc AddToScheme(scheme *runtime.Scheme) {\n\tkubelessv1beta1.AddToScheme(scheme)\n\n}\n"
  },
  {
    "path": "pkg/client/clientset/versioned/typed/kubeless/v1beta1/doc.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n// This package has the automatically generated typed clients.\npackage v1beta1\n"
  },
  {
    "path": "pkg/client/clientset/versioned/typed/kubeless/v1beta1/fake/doc.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n// Package fake has the automatically generated clients.\npackage fake\n"
  },
  {
    "path": "pkg/client/clientset/versioned/typed/kubeless/v1beta1/fake/fake_function.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage fake\n\nimport (\n\tv1beta1 \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tlabels \"k8s.io/apimachinery/pkg/labels\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\ttypes \"k8s.io/apimachinery/pkg/types\"\n\twatch \"k8s.io/apimachinery/pkg/watch\"\n\ttesting \"k8s.io/client-go/testing\"\n)\n\n// FakeFunctions implements FunctionInterface\ntype FakeFunctions struct {\n\tFake *FakeKubelessV1beta1\n\tns   string\n}\n\nvar functionsResource = schema.GroupVersionResource{Group: \"kubeless.io\", Version: \"v1beta1\", Resource: \"functions\"}\n\nvar functionsKind = schema.GroupVersionKind{Group: \"kubeless.io\", Version: \"v1beta1\", Kind: \"Function\"}\n\n// Get takes name of the function, and returns the corresponding function object, and an error if there is any.\nfunc (c *FakeFunctions) Get(name string, options v1.GetOptions) (result *v1beta1.Function, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewGetAction(functionsResource, c.ns, name), &v1beta1.Function{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1beta1.Function), err\n}\n\n// List takes label and field selectors, and returns the list of Functions that match those selectors.\nfunc (c *FakeFunctions) List(opts v1.ListOptions) (result *v1beta1.FunctionList, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewListAction(functionsResource, functionsKind, c.ns, opts), &v1beta1.FunctionList{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\n\tlabel, _, _ := testing.ExtractFromListOptions(opts)\n\tif label == nil {\n\t\tlabel = labels.Everything()\n\t}\n\tlist := &v1beta1.FunctionList{}\n\tfor _, item := range obj.(*v1beta1.FunctionList).Items {\n\t\tif label.Matches(labels.Set(item.Labels)) {\n\t\t\tlist.Items = append(list.Items, item)\n\t\t}\n\t}\n\treturn list, err\n}\n\n// Watch returns a watch.Interface that watches the requested functions.\nfunc (c *FakeFunctions) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\treturn c.Fake.\n\t\tInvokesWatch(testing.NewWatchAction(functionsResource, c.ns, opts))\n\n}\n\n// Create takes the representation of a function and creates it.  Returns the server's representation of the function, and an error, if there is any.\nfunc (c *FakeFunctions) Create(function *v1beta1.Function) (result *v1beta1.Function, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(functionsResource, c.ns, function), &v1beta1.Function{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1beta1.Function), err\n}\n\n// Update takes the representation of a function and updates it. Returns the server's representation of the function, and an error, if there is any.\nfunc (c *FakeFunctions) Update(function *v1beta1.Function) (result *v1beta1.Function, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(functionsResource, c.ns, function), &v1beta1.Function{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1beta1.Function), err\n}\n\n// Delete takes name of the function and deletes it. Returns an error if one occurs.\nfunc (c *FakeFunctions) Delete(name string, options *v1.DeleteOptions) error {\n\t_, err := c.Fake.\n\t\tInvokes(testing.NewDeleteAction(functionsResource, c.ns, name), &v1beta1.Function{})\n\n\treturn err\n}\n\n// DeleteCollection deletes a collection of objects.\nfunc (c *FakeFunctions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {\n\taction := testing.NewDeleteCollectionAction(functionsResource, c.ns, listOptions)\n\n\t_, err := c.Fake.Invokes(action, &v1beta1.FunctionList{})\n\treturn err\n}\n\n// Patch applies the patch and returns the patched function.\nfunc (c *FakeFunctions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Function, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewPatchSubresourceAction(functionsResource, c.ns, name, data, subresources...), &v1beta1.Function{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*v1beta1.Function), err\n}\n"
  },
  {
    "path": "pkg/client/clientset/versioned/typed/kubeless/v1beta1/fake/fake_kubeless_client.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage fake\n\nimport (\n\tv1beta1 \"github.com/kubeless/kubeless/pkg/client/clientset/versioned/typed/kubeless/v1beta1\"\n\trest \"k8s.io/client-go/rest\"\n\ttesting \"k8s.io/client-go/testing\"\n)\n\ntype FakeKubelessV1beta1 struct {\n\t*testing.Fake\n}\n\nfunc (c *FakeKubelessV1beta1) Functions(namespace string) v1beta1.FunctionInterface {\n\treturn &FakeFunctions{c, namespace}\n}\n\n// RESTClient returns a RESTClient that is used to communicate\n// with API server by this client implementation.\nfunc (c *FakeKubelessV1beta1) RESTClient() rest.Interface {\n\tvar ret *rest.RESTClient\n\treturn ret\n}\n"
  },
  {
    "path": "pkg/client/clientset/versioned/typed/kubeless/v1beta1/function.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage v1beta1\n\nimport (\n\tv1beta1 \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\tscheme \"github.com/kubeless/kubeless/pkg/client/clientset/versioned/scheme\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\ttypes \"k8s.io/apimachinery/pkg/types\"\n\twatch \"k8s.io/apimachinery/pkg/watch\"\n\trest \"k8s.io/client-go/rest\"\n)\n\n// FunctionsGetter has a method to return a FunctionInterface.\n// A group's client should implement this interface.\ntype FunctionsGetter interface {\n\tFunctions(namespace string) FunctionInterface\n}\n\n// FunctionInterface has methods to work with Function resources.\ntype FunctionInterface interface {\n\tCreate(*v1beta1.Function) (*v1beta1.Function, error)\n\tUpdate(*v1beta1.Function) (*v1beta1.Function, error)\n\tDelete(name string, options *v1.DeleteOptions) error\n\tDeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error\n\tGet(name string, options v1.GetOptions) (*v1beta1.Function, error)\n\tList(opts v1.ListOptions) (*v1beta1.FunctionList, error)\n\tWatch(opts v1.ListOptions) (watch.Interface, error)\n\tPatch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Function, err error)\n\tFunctionExpansion\n}\n\n// functions implements FunctionInterface\ntype functions struct {\n\tclient rest.Interface\n\tns     string\n}\n\n// newFunctions returns a Functions\nfunc newFunctions(c *KubelessV1beta1Client, namespace string) *functions {\n\treturn &functions{\n\t\tclient: c.RESTClient(),\n\t\tns:     namespace,\n\t}\n}\n\n// Get takes name of the function, and returns the corresponding function object, and an error if there is any.\nfunc (c *functions) Get(name string, options v1.GetOptions) (result *v1beta1.Function, err error) {\n\tresult = &v1beta1.Function{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"functions\").\n\t\tName(name).\n\t\tVersionedParams(&options, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}\n\n// List takes label and field selectors, and returns the list of Functions that match those selectors.\nfunc (c *functions) List(opts v1.ListOptions) (result *v1beta1.FunctionList, err error) {\n\tresult = &v1beta1.FunctionList{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"functions\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}\n\n// Watch returns a watch.Interface that watches the requested functions.\nfunc (c *functions) Watch(opts v1.ListOptions) (watch.Interface, error) {\n\topts.Watch = true\n\treturn c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"functions\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tWatch()\n}\n\n// Create takes the representation of a function and creates it.  Returns the server's representation of the function, and an error, if there is any.\nfunc (c *functions) Create(function *v1beta1.Function) (result *v1beta1.Function, err error) {\n\tresult = &v1beta1.Function{}\n\terr = c.client.Post().\n\t\tNamespace(c.ns).\n\t\tResource(\"functions\").\n\t\tBody(function).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}\n\n// Update takes the representation of a function and updates it. Returns the server's representation of the function, and an error, if there is any.\nfunc (c *functions) Update(function *v1beta1.Function) (result *v1beta1.Function, err error) {\n\tresult = &v1beta1.Function{}\n\terr = c.client.Put().\n\t\tNamespace(c.ns).\n\t\tResource(\"functions\").\n\t\tName(function.Name).\n\t\tBody(function).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}\n\n// Delete takes name of the function and deletes it. Returns an error if one occurs.\nfunc (c *functions) Delete(name string, options *v1.DeleteOptions) error {\n\treturn c.client.Delete().\n\t\tNamespace(c.ns).\n\t\tResource(\"functions\").\n\t\tName(name).\n\t\tBody(options).\n\t\tDo().\n\t\tError()\n}\n\n// DeleteCollection deletes a collection of objects.\nfunc (c *functions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {\n\treturn c.client.Delete().\n\t\tNamespace(c.ns).\n\t\tResource(\"functions\").\n\t\tVersionedParams(&listOptions, scheme.ParameterCodec).\n\t\tBody(options).\n\t\tDo().\n\t\tError()\n}\n\n// Patch applies the patch and returns the patched function.\nfunc (c *functions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Function, err error) {\n\tresult = &v1beta1.Function{}\n\terr = c.client.Patch(pt).\n\t\tNamespace(c.ns).\n\t\tResource(\"functions\").\n\t\tSubResource(subresources...).\n\t\tName(name).\n\t\tBody(data).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}\n"
  },
  {
    "path": "pkg/client/clientset/versioned/typed/kubeless/v1beta1/generated_expansion.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage v1beta1\n\ntype FunctionExpansion interface{}\n"
  },
  {
    "path": "pkg/client/clientset/versioned/typed/kubeless/v1beta1/kubeless_client.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage v1beta1\n\nimport (\n\tv1beta1 \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/client/clientset/versioned/scheme\"\n\tserializer \"k8s.io/apimachinery/pkg/runtime/serializer\"\n\trest \"k8s.io/client-go/rest\"\n)\n\ntype KubelessV1beta1Interface interface {\n\tRESTClient() rest.Interface\n\tFunctionsGetter\n}\n\n// KubelessV1beta1Client is used to interact with features provided by the kubeless.io group.\ntype KubelessV1beta1Client struct {\n\trestClient rest.Interface\n}\n\nfunc (c *KubelessV1beta1Client) Functions(namespace string) FunctionInterface {\n\treturn newFunctions(c, namespace)\n}\n\n// NewForConfig creates a new KubelessV1beta1Client for the given config.\nfunc NewForConfig(c *rest.Config) (*KubelessV1beta1Client, error) {\n\tconfig := *c\n\tif err := setConfigDefaults(&config); err != nil {\n\t\treturn nil, err\n\t}\n\tclient, err := rest.RESTClientFor(&config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &KubelessV1beta1Client{client}, nil\n}\n\n// NewForConfigOrDie creates a new KubelessV1beta1Client for the given config and\n// panics if there is an error in the config.\nfunc NewForConfigOrDie(c *rest.Config) *KubelessV1beta1Client {\n\tclient, err := NewForConfig(c)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn client\n}\n\n// New creates a new KubelessV1beta1Client for the given RESTClient.\nfunc New(c rest.Interface) *KubelessV1beta1Client {\n\treturn &KubelessV1beta1Client{c}\n}\n\nfunc setConfigDefaults(config *rest.Config) error {\n\tgv := v1beta1.SchemeGroupVersion\n\tconfig.GroupVersion = &gv\n\tconfig.APIPath = \"/apis\"\n\tconfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}\n\n\tif config.UserAgent == \"\" {\n\t\tconfig.UserAgent = rest.DefaultKubernetesUserAgent()\n\t}\n\n\treturn nil\n}\n\n// RESTClient returns a RESTClient that is used to communicate\n// with API server by this client implementation.\nfunc (c *KubelessV1beta1Client) RESTClient() rest.Interface {\n\tif c == nil {\n\t\treturn nil\n\t}\n\treturn c.restClient\n}\n"
  },
  {
    "path": "pkg/client/informers/externalversions/factory.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// This file was automatically generated by informer-gen\n\npackage externalversions\n\nimport (\n\treflect \"reflect\"\n\tsync \"sync\"\n\ttime \"time\"\n\n\tversioned \"github.com/kubeless/kubeless/pkg/client/clientset/versioned\"\n\tinternalinterfaces \"github.com/kubeless/kubeless/pkg/client/informers/externalversions/internalinterfaces\"\n\tkubeless \"github.com/kubeless/kubeless/pkg/client/informers/externalversions/kubeless\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\tcache \"k8s.io/client-go/tools/cache\"\n)\n\ntype sharedInformerFactory struct {\n\tclient           versioned.Interface\n\tnamespace        string\n\ttweakListOptions internalinterfaces.TweakListOptionsFunc\n\tlock             sync.Mutex\n\tdefaultResync    time.Duration\n\n\tinformers map[reflect.Type]cache.SharedIndexInformer\n\t// startedInformers is used for tracking which informers have been started.\n\t// This allows Start() to be called multiple times safely.\n\tstartedInformers map[reflect.Type]bool\n}\n\n// NewSharedInformerFactory constructs a new instance of sharedInformerFactory\nfunc NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {\n\treturn NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil)\n}\n\n// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.\n// Listers obtained via this SharedInformerFactory will be subject to the same filters\n// as specified here.\nfunc NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {\n\treturn &sharedInformerFactory{\n\t\tclient:           client,\n\t\tnamespace:        namespace,\n\t\ttweakListOptions: tweakListOptions,\n\t\tdefaultResync:    defaultResync,\n\t\tinformers:        make(map[reflect.Type]cache.SharedIndexInformer),\n\t\tstartedInformers: make(map[reflect.Type]bool),\n\t}\n}\n\n// Start initializes all requested informers.\nfunc (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tfor informerType, informer := range f.informers {\n\t\tif !f.startedInformers[informerType] {\n\t\t\tgo informer.Run(stopCh)\n\t\t\tf.startedInformers[informerType] = true\n\t\t}\n\t}\n}\n\n// WaitForCacheSync waits for all started informers' cache were synced.\nfunc (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {\n\tinformers := func() map[reflect.Type]cache.SharedIndexInformer {\n\t\tf.lock.Lock()\n\t\tdefer f.lock.Unlock()\n\n\t\tinformers := map[reflect.Type]cache.SharedIndexInformer{}\n\t\tfor informerType, informer := range f.informers {\n\t\t\tif f.startedInformers[informerType] {\n\t\t\t\tinformers[informerType] = informer\n\t\t\t}\n\t\t}\n\t\treturn informers\n\t}()\n\n\tres := map[reflect.Type]bool{}\n\tfor informType, informer := range informers {\n\t\tres[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)\n\t}\n\treturn res\n}\n\n// InternalInformerFor returns the SharedIndexInformer for obj using an internal\n// client.\nfunc (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tinformerType := reflect.TypeOf(obj)\n\tinformer, exists := f.informers[informerType]\n\tif exists {\n\t\treturn informer\n\t}\n\tinformer = newFunc(f.client, f.defaultResync)\n\tf.informers[informerType] = informer\n\n\treturn informer\n}\n\n// SharedInformerFactory provides shared informers for resources in all known\n// API group versions.\ntype SharedInformerFactory interface {\n\tinternalinterfaces.SharedInformerFactory\n\tForResource(resource schema.GroupVersionResource) (GenericInformer, error)\n\tWaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool\n\n\tKubeless() kubeless.Interface\n}\n\nfunc (f *sharedInformerFactory) Kubeless() kubeless.Interface {\n\treturn kubeless.New(f, f.namespace, f.tweakListOptions)\n}\n"
  },
  {
    "path": "pkg/client/informers/externalversions/generic.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// This file was automatically generated by informer-gen\n\npackage externalversions\n\nimport (\n\t\"fmt\"\n\n\tv1beta1 \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\tschema \"k8s.io/apimachinery/pkg/runtime/schema\"\n\tcache \"k8s.io/client-go/tools/cache\"\n)\n\n// GenericInformer is type of SharedIndexInformer which will locate and delegate to other\n// sharedInformers based on type\ntype GenericInformer interface {\n\tInformer() cache.SharedIndexInformer\n\tLister() cache.GenericLister\n}\n\ntype genericInformer struct {\n\tinformer cache.SharedIndexInformer\n\tresource schema.GroupResource\n}\n\n// Informer returns the SharedIndexInformer.\nfunc (f *genericInformer) Informer() cache.SharedIndexInformer {\n\treturn f.informer\n}\n\n// Lister returns the GenericLister.\nfunc (f *genericInformer) Lister() cache.GenericLister {\n\treturn cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)\n}\n\n// ForResource gives generic access to a shared informer of the matching type\n// TODO extend this to unknown resources with a client pool\nfunc (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {\n\tswitch resource {\n\t// Group=kubeless.io, Version=v1beta1\n\tcase v1beta1.SchemeGroupVersion.WithResource(\"functions\"):\n\t\treturn &genericInformer{resource: resource.GroupResource(), informer: f.Kubeless().V1beta1().Functions().Informer()}, nil\n\n\t}\n\n\treturn nil, fmt.Errorf(\"no informer found for %v\", resource)\n}\n"
  },
  {
    "path": "pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// This file was automatically generated by informer-gen\n\npackage internalinterfaces\n\nimport (\n\ttime \"time\"\n\n\tversioned \"github.com/kubeless/kubeless/pkg/client/clientset/versioned\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\tcache \"k8s.io/client-go/tools/cache\"\n)\n\ntype NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer\n\n// SharedInformerFactory a small interface to allow for adding an informer without an import cycle\ntype SharedInformerFactory interface {\n\tStart(stopCh <-chan struct{})\n\tInformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer\n}\n\ntype TweakListOptionsFunc func(*v1.ListOptions)\n"
  },
  {
    "path": "pkg/client/informers/externalversions/kubeless/interface.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// This file was automatically generated by informer-gen\n\npackage kubeless\n\nimport (\n\tinternalinterfaces \"github.com/kubeless/kubeless/pkg/client/informers/externalversions/internalinterfaces\"\n\tv1beta1 \"github.com/kubeless/kubeless/pkg/client/informers/externalversions/kubeless/v1beta1\"\n)\n\n// Interface provides access to each of this group's versions.\ntype Interface interface {\n\t// V1beta1 provides access to shared informers for resources in V1beta1.\n\tV1beta1() v1beta1.Interface\n}\n\ntype group struct {\n\tfactory          internalinterfaces.SharedInformerFactory\n\tnamespace        string\n\ttweakListOptions internalinterfaces.TweakListOptionsFunc\n}\n\n// New returns a new Interface.\nfunc New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {\n\treturn &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}\n}\n\n// V1beta1 returns a new v1beta1.Interface.\nfunc (g *group) V1beta1() v1beta1.Interface {\n\treturn v1beta1.New(g.factory, g.namespace, g.tweakListOptions)\n}\n"
  },
  {
    "path": "pkg/client/informers/externalversions/kubeless/v1beta1/function.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// This file was automatically generated by informer-gen\n\npackage v1beta1\n\nimport (\n\ttime \"time\"\n\n\tkubeless_v1beta1 \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\tversioned \"github.com/kubeless/kubeless/pkg/client/clientset/versioned\"\n\tinternalinterfaces \"github.com/kubeless/kubeless/pkg/client/informers/externalversions/internalinterfaces\"\n\tv1beta1 \"github.com/kubeless/kubeless/pkg/client/listers/kubeless/v1beta1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\truntime \"k8s.io/apimachinery/pkg/runtime\"\n\twatch \"k8s.io/apimachinery/pkg/watch\"\n\tcache \"k8s.io/client-go/tools/cache\"\n)\n\n// FunctionInformer provides access to a shared informer and lister for\n// Functions.\ntype FunctionInformer interface {\n\tInformer() cache.SharedIndexInformer\n\tLister() v1beta1.FunctionLister\n}\n\ntype functionInformer struct {\n\tfactory          internalinterfaces.SharedInformerFactory\n\ttweakListOptions internalinterfaces.TweakListOptionsFunc\n\tnamespace        string\n}\n\n// NewFunctionInformer constructs a new informer for Function type.\n// Always prefer using an informer factory to get a shared informer instead of getting an independent\n// one. This reduces memory footprint and number of connections to the server.\nfunc NewFunctionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {\n\treturn NewFilteredFunctionInformer(client, namespace, resyncPeriod, indexers, nil)\n}\n\n// NewFilteredFunctionInformer constructs a new informer for Function type.\n// Always prefer using an informer factory to get a shared informer instead of getting an independent\n// one. This reduces memory footprint and number of connections to the server.\nfunc NewFilteredFunctionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {\n\treturn cache.NewSharedIndexInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: func(options v1.ListOptions) (runtime.Object, error) {\n\t\t\t\tif tweakListOptions != nil {\n\t\t\t\t\ttweakListOptions(&options)\n\t\t\t\t}\n\t\t\t\treturn client.KubelessV1beta1().Functions(namespace).List(options)\n\t\t\t},\n\t\t\tWatchFunc: func(options v1.ListOptions) (watch.Interface, error) {\n\t\t\t\tif tweakListOptions != nil {\n\t\t\t\t\ttweakListOptions(&options)\n\t\t\t\t}\n\t\t\t\treturn client.KubelessV1beta1().Functions(namespace).Watch(options)\n\t\t\t},\n\t\t},\n\t\t&kubeless_v1beta1.Function{},\n\t\tresyncPeriod,\n\t\tindexers,\n\t)\n}\n\nfunc (f *functionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {\n\treturn NewFilteredFunctionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)\n}\n\nfunc (f *functionInformer) Informer() cache.SharedIndexInformer {\n\treturn f.factory.InformerFor(&kubeless_v1beta1.Function{}, f.defaultInformer)\n}\n\nfunc (f *functionInformer) Lister() v1beta1.FunctionLister {\n\treturn v1beta1.NewFunctionLister(f.Informer().GetIndexer())\n}\n"
  },
  {
    "path": "pkg/client/informers/externalversions/kubeless/v1beta1/interface.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// This file was automatically generated by informer-gen\n\npackage v1beta1\n\nimport (\n\tinternalinterfaces \"github.com/kubeless/kubeless/pkg/client/informers/externalversions/internalinterfaces\"\n)\n\n// Interface provides access to all the informers in this group version.\ntype Interface interface {\n\t// Functions returns a FunctionInformer.\n\tFunctions() FunctionInformer\n}\n\ntype version struct {\n\tfactory          internalinterfaces.SharedInformerFactory\n\tnamespace        string\n\ttweakListOptions internalinterfaces.TweakListOptionsFunc\n}\n\n// New returns a new Interface.\nfunc New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {\n\treturn &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}\n}\n\n// Functions returns a FunctionInformer.\nfunc (v *version) Functions() FunctionInformer {\n\treturn &functionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}\n}\n"
  },
  {
    "path": "pkg/client/listers/kubeless/v1beta1/expansion_generated.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// This file was automatically generated by lister-gen\n\npackage v1beta1\n\n// FunctionListerExpansion allows custom methods to be added to\n// FunctionLister.\ntype FunctionListerExpansion interface{}\n\n// FunctionNamespaceListerExpansion allows custom methods to be added to\n// FunctionNamespaceLister.\ntype FunctionNamespaceListerExpansion interface{}\n"
  },
  {
    "path": "pkg/client/listers/kubeless/v1beta1/function.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// This file was automatically generated by lister-gen\n\npackage v1beta1\n\nimport (\n\tv1beta1 \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n\t\"k8s.io/client-go/tools/cache\"\n)\n\n// FunctionLister helps list Functions.\ntype FunctionLister interface {\n\t// List lists all Functions in the indexer.\n\tList(selector labels.Selector) (ret []*v1beta1.Function, err error)\n\t// Functions returns an object that can list and get Functions.\n\tFunctions(namespace string) FunctionNamespaceLister\n\tFunctionListerExpansion\n}\n\n// functionLister implements the FunctionLister interface.\ntype functionLister struct {\n\tindexer cache.Indexer\n}\n\n// NewFunctionLister returns a new FunctionLister.\nfunc NewFunctionLister(indexer cache.Indexer) FunctionLister {\n\treturn &functionLister{indexer: indexer}\n}\n\n// List lists all Functions in the indexer.\nfunc (s *functionLister) List(selector labels.Selector) (ret []*v1beta1.Function, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1beta1.Function))\n\t})\n\treturn ret, err\n}\n\n// Functions returns an object that can list and get Functions.\nfunc (s *functionLister) Functions(namespace string) FunctionNamespaceLister {\n\treturn functionNamespaceLister{indexer: s.indexer, namespace: namespace}\n}\n\n// FunctionNamespaceLister helps list and get Functions.\ntype FunctionNamespaceLister interface {\n\t// List lists all Functions in the indexer for a given namespace.\n\tList(selector labels.Selector) (ret []*v1beta1.Function, err error)\n\t// Get retrieves the Function from the indexer for a given namespace and name.\n\tGet(name string) (*v1beta1.Function, error)\n\tFunctionNamespaceListerExpansion\n}\n\n// functionNamespaceLister implements the FunctionNamespaceLister\n// interface.\ntype functionNamespaceLister struct {\n\tindexer   cache.Indexer\n\tnamespace string\n}\n\n// List lists all Functions in the indexer for a given namespace.\nfunc (s functionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Function, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1beta1.Function))\n\t})\n\treturn ret, err\n}\n\n// Get retrieves the Function from the indexer for a given namespace and name.\nfunc (s functionNamespaceLister) Get(name string) (*v1beta1.Function, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1beta1.Resource(\"function\"), name)\n\t}\n\treturn obj.(*v1beta1.Function), nil\n}\n"
  },
  {
    "path": "pkg/controller/function_controller.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage controller\n\nimport (\n\t\"crypto/sha256\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"time\"\n\n\tmonitoringv1alpha1 \"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1\"\n\t\"github.com/sirupsen/logrus\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\t\"k8s.io/api/autoscaling/v2beta1\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\tapiequality \"k8s.io/apimachinery/pkg/api/equality\"\n\tk8sErrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tutilruntime \"k8s.io/apimachinery/pkg/util/runtime\"\n\t\"k8s.io/apimachinery/pkg/util/wait\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/tools/cache\"\n\t\"k8s.io/client-go/util/workqueue\"\n\n\t\"github.com/ghodss/yaml\"\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/client/clientset/versioned\"\n\tkv1beta1 \"github.com/kubeless/kubeless/pkg/client/informers/externalversions/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/langruntime\"\n\t\"github.com/kubeless/kubeless/pkg/registry\"\n\t\"github.com/kubeless/kubeless/pkg/utils\"\n)\n\nconst (\n\tmaxRetries        = 5\n\tfuncKind          = \"Function\"\n\tfuncAPIVersion    = \"kubeless.io/v1beta1\"\n\tfunctionFinalizer = \"kubeless.io/function\"\n)\n\n// FunctionController object\ntype FunctionController struct {\n\tlogger           *logrus.Entry\n\tclientset        kubernetes.Interface\n\tkubelessclient   versioned.Interface\n\tsmclient         *monitoringv1alpha1.MonitoringV1alpha1Client\n\tFunctions        map[string]*kubelessApi.Function\n\tqueue            workqueue.RateLimitingInterface\n\tinformer         cache.SharedIndexInformer\n\tconfig           *corev1.ConfigMap\n\tlangRuntime      *langruntime.Langruntimes\n\timagePullSecrets []corev1.LocalObjectReference\n}\n\n// Config contains k8s client of a controller\ntype Config struct {\n\tKubeCli        kubernetes.Interface\n\tFunctionClient versioned.Interface\n}\n\n// NewFunctionController returns a new *FunctionController\nfunc NewFunctionController(cfg Config, smclient *monitoringv1alpha1.MonitoringV1alpha1Client) *FunctionController {\n\tqueue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())\n\n\tapiExtensionsClientset := utils.GetAPIExtensionsClientInCluster()\n\tconfig, err := utils.GetKubelessConfig(cfg.KubeCli, apiExtensionsClientset)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Unable to read the configmap: %s\", err)\n\t}\n\n\tinformer := kv1beta1.NewFunctionInformer(cfg.FunctionClient, config.Data[\"functions-namespace\"], 0, cache.Indexers{})\n\n\tinformer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tkey, err := cache.MetaNamespaceKeyFunc(obj)\n\t\t\tif err == nil {\n\t\t\t\tqueue.Add(key)\n\t\t\t}\n\t\t},\n\t\tUpdateFunc: func(old, new interface{}) {\n\t\t\tkey, err := cache.MetaNamespaceKeyFunc(new)\n\t\t\tif err == nil {\n\t\t\t\tnewFunctionObj := new.(*kubelessApi.Function)\n\t\t\t\toldFunctionObj := old.(*kubelessApi.Function)\n\t\t\t\tif functionObjChanged(oldFunctionObj, newFunctionObj) {\n\t\t\t\t\tqueue.Add(key)\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tkey, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)\n\t\t\tif err == nil {\n\t\t\t\tqueue.Add(key)\n\t\t\t}\n\t\t},\n\t})\n\n\tvar lr = langruntime.New(config)\n\tlr.ReadConfigMap()\n\n\timagePullSecrets := utils.GetSecretsAsLocalObjectReference(config.Data[\"provision-image-secret\"], config.Data[\"builder-image-secret\"])\n\tif config.Data[\"enable-build-step\"] == \"true\" {\n\t\timagePullSecrets = append(imagePullSecrets, utils.GetSecretsAsLocalObjectReference(\"kubeless-registry-credentials\")...)\n\t}\n\treturn &FunctionController{\n\t\tlogger:           logrus.WithField(\"pkg\", \"function-controller\"),\n\t\tclientset:        cfg.KubeCli,\n\t\tsmclient:         smclient,\n\t\tkubelessclient:   cfg.FunctionClient,\n\t\tinformer:         informer,\n\t\tqueue:            queue,\n\t\tconfig:           config,\n\t\tlangRuntime:      lr,\n\t\timagePullSecrets: imagePullSecrets,\n\t}\n}\n\n// Run starts the kubeless controller\nfunc (c *FunctionController) Run(stopCh <-chan struct{}) {\n\tdefer utilruntime.HandleCrash()\n\tdefer c.queue.ShutDown()\n\n\tc.logger.Info(\"Starting Function controller\")\n\n\tgo c.informer.Run(stopCh)\n\n\tif !cache.WaitForCacheSync(stopCh, c.HasSynced) {\n\t\tutilruntime.HandleError(fmt.Errorf(\"Timed out waiting for caches to sync\"))\n\t\treturn\n\t}\n\n\tc.logger.Info(\"Function controller synced and ready\")\n\n\twait.Until(c.runWorker, time.Second, stopCh)\n}\n\n// HasSynced is required for the cache.Controller interface.\nfunc (c *FunctionController) HasSynced() bool {\n\treturn c.informer.HasSynced()\n}\n\n// LastSyncResourceVersion is required for the cache.Controller interface.\nfunc (c *FunctionController) LastSyncResourceVersion() string {\n\treturn c.informer.LastSyncResourceVersion()\n}\n\nfunc (c *FunctionController) runWorker() {\n\tfor c.processNextItem() {\n\t\t// continue looping\n\t}\n}\n\nfunc (c *FunctionController) processNextItem() bool {\n\tkey, quit := c.queue.Get()\n\tif quit {\n\t\treturn false\n\t}\n\tdefer c.queue.Done(key)\n\n\terr := c.processItem(key.(string))\n\tif err == nil {\n\t\t// No error, reset the ratelimit counters\n\t\tc.queue.Forget(key)\n\t} else if c.queue.NumRequeues(key) < maxRetries {\n\t\tc.logger.Errorf(\"Error processing %s (will retry): %v\", key, err)\n\t\tc.queue.AddRateLimited(key)\n\t} else {\n\t\t// err != nil and too many retries\n\t\tc.logger.Errorf(\"Error processing %s (giving up): %v\", key, err)\n\t\tc.queue.Forget(key)\n\t\tutilruntime.HandleError(err)\n\t}\n\n\treturn true\n}\n\nfunc (c *FunctionController) processItem(key string) error {\n\tc.logger.Infof(\"Processing change to Function %s\", key)\n\n\tns, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobj, exists, err := c.informer.GetIndexer().GetByKey(key)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error fetching object with key %s from store: %v\", key, err)\n\t}\n\n\t// this is an update when Function API object is actually deleted, we dont need to process anything here\n\tif !exists {\n\t\tc.logger.Infof(\"Function object %s not found in the cache, ignoring the deletion update\", key)\n\t\treturn nil\n\t}\n\n\tfuncObj := obj.(*kubelessApi.Function)\n\n\t// Function API object is marked for deletion (DeletionTimestamp != nil), so lets process the delete update\n\tif funcObj.ObjectMeta.DeletionTimestamp != nil {\n\n\t\t// If finalizer is removed, then we already processed the delete update, so just return\n\t\tif !utils.FunctionObjHasFinalizer(funcObj, functionFinalizer) {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Function object should be deleted, so cleanup the associated resources and remove the finalizer\n\t\terr := c.deleteK8sResources(ns, name)\n\t\tif err != nil {\n\t\t\tc.logger.Errorf(\"Can't delete function: %v\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t// remove finalizer from the function object, so that we dont have to process any further and object can be deleted\n\t\terr = utils.FunctionObjRemoveFinalizer(c.kubelessclient, funcObj, functionFinalizer)\n\t\tif err != nil {\n\t\t\tc.logger.Errorf(\"Failed to remove function controller as finalizer to Function Obj: %s object due to: %v: \", key, err)\n\t\t\treturn err\n\t\t}\n\t\tc.logger.Infof(\"Function object %s has been successfully processed and marked for deletion\", key)\n\t\treturn nil\n\t}\n\n\t// If function object in not marked with self as finalizer, then add the finalizer\n\tif !utils.FunctionObjHasFinalizer(funcObj, functionFinalizer) {\n\t\terr = utils.FunctionObjAddFinalizer(c.kubelessclient, funcObj, functionFinalizer)\n\t\tif err != nil {\n\t\t\tc.logger.Errorf(\"Error adding Function controller as finalizer to Function Obj: %s CRD due to: %v: \", key, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = c.ensureK8sResources(funcObj)\n\tif err != nil {\n\t\tc.logger.Errorf(\"Function can not be created/updated: %v\", err)\n\t\treturn err\n\t}\n\n\tc.logger.Infof(\"Processed change to function: %s\", key)\n\treturn nil\n}\n\n// startImageBuildJob creates (if necessary) a job that will build an image for the given function\n// returns the name of the image, a boolean indicating if the build job has been created and an error\nfunc (c *FunctionController) startImageBuildJob(funcObj *kubelessApi.Function, or []metav1.OwnerReference) (string, bool, error) {\n\timagePullSecret, err := c.clientset.CoreV1().Secrets(funcObj.ObjectMeta.Namespace).Get(\"kubeless-registry-credentials\", metav1.GetOptions{})\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"Unable to locate registry credentials to build function image: %v\", err)\n\t}\n\treg, err := registry.New(*imagePullSecret)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"Unable to retrieve registry information: %v\", err)\n\t}\n\t// Use function content and deps as tag (digested)\n\ttag := fmt.Sprintf(\"%x\", sha256.Sum256([]byte(fmt.Sprintf(\"%v%v\", funcObj.Spec.Function, funcObj.Spec.Deps))))\n\timageName := fmt.Sprintf(\"%s/%s\", reg.Creds.Username, funcObj.ObjectMeta.Name)\n\t// Check if image already exists\n\texists, err := reg.ImageExists(imageName, tag)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"Unable to check is target image exists: %v\", err)\n\t}\n\tregURL, err := url.Parse(reg.Endpoint)\n\tif err != nil {\n\t\treturn \"\", false, fmt.Errorf(\"Unable to parse registry URL: %v\", err)\n\t}\n\timage := fmt.Sprintf(\"%s/%s:%s\", regURL.Host, imageName, tag)\n\tif !exists {\n\t\ttlsVerify := true\n\t\tif c.config.Data[\"function-registry-tls-verify\"] == \"false\" {\n\t\t\ttlsVerify = false\n\t\t}\n\t\terr = utils.EnsureFuncImage(c.clientset, funcObj, c.langRuntime, or, imageName, tag, c.config.Data[\"builder-image\"], regURL.Host, imagePullSecret.Name, c.config.Data[\"provision-image\"], tlsVerify, c.imagePullSecrets)\n\t\tif err != nil {\n\t\t\treturn \"\", false, fmt.Errorf(\"Unable to create image build job: %v\", err)\n\t\t}\n\t} else {\n\t\t// Image already exists\n\t\treturn image, false, nil\n\t}\n\treturn image, true, nil\n}\n\n// ensureK8sResources creates/updates k8s objects (deploy, svc, configmap) for the function\nfunc (c *FunctionController) ensureK8sResources(funcObj *kubelessApi.Function) error {\n\tif len(funcObj.ObjectMeta.Labels) == 0 {\n\t\tfuncObj.ObjectMeta.Labels = make(map[string]string)\n\t}\n\tfuncObj.ObjectMeta.Labels[\"function\"] = funcObj.ObjectMeta.Name\n\n\tdeployment := appsv1.Deployment{}\n\tif deploymentConfigData, ok := c.config.Data[\"deployment\"]; ok {\n\t\terr := yaml.UnmarshalStrict([]byte(deploymentConfigData), &deployment, yaml.DisallowUnknownFields)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Error parsing Deployment data in ConfigMap kubeless-function-deployment-config: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\terr = utils.MergeDeployments(&funcObj.Spec.Deployment, &deployment)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\" Error while merging function.Spec.Deployment and Deployment from ConfigMap: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tor, err := utils.GetOwnerReference(funcKind, funcAPIVersion, funcObj.Name, funcObj.UID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = utils.EnsureFuncConfigMap(c.clientset, funcObj, or, c.langRuntime)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = utils.EnsureFuncService(c.clientset, funcObj, or)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprebuiltImage := \"\"\n\tif len(funcObj.Spec.Deployment.Spec.Template.Spec.Containers) > 0 && funcObj.Spec.Deployment.Spec.Template.Spec.Containers[0].Image != \"\" {\n\t\tprebuiltImage = funcObj.Spec.Deployment.Spec.Template.Spec.Containers[0].Image\n\t}\n\t// Skip image build step if using a custom runtime\n\tif prebuiltImage == \"\" {\n\t\tif c.config.Data[\"enable-build-step\"] == \"true\" {\n\t\t\tvar isBuilding bool\n\t\t\tprebuiltImage, isBuilding, err = c.startImageBuildJob(funcObj, or)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"Unable to build function: %v\", err)\n\t\t\t} else {\n\t\t\t\tif isBuilding {\n\t\t\t\t\tlogrus.Infof(\"Started build process for function %s\", funcObj.ObjectMeta.Name)\n\t\t\t\t} else {\n\t\t\t\t\tlogrus.Infof(\"Found existing image %s\", prebuiltImage)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlogrus.Infof(\"Skipping image-build step for %s\", funcObj.ObjectMeta.Name)\n\t}\n\n\terr = utils.EnsureFuncDeployment(c.clientset, funcObj, or, c.langRuntime, prebuiltImage, c.config.Data[\"provision-image\"], c.imagePullSecrets)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif funcObj.Spec.HorizontalPodAutoscaler.Name != \"\" && funcObj.Spec.HorizontalPodAutoscaler.Spec.ScaleTargetRef.Name != \"\" {\n\t\tfuncObj.Spec.HorizontalPodAutoscaler.OwnerReferences = or\n\t\tif funcObj.Spec.HorizontalPodAutoscaler.Spec.Metrics[0].Type == v2beta1.ObjectMetricSourceType {\n\t\t\t// A service monitor is needed when the metric is an object\n\t\t\terr = utils.CreateServiceMonitor(*c.smclient, funcObj, funcObj.ObjectMeta.Namespace, or)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\terr = utils.CreateAutoscale(c.clientset, funcObj.Spec.HorizontalPodAutoscaler)\n\t\tif err != nil && k8sErrors.IsAlreadyExists(err) {\n\t\t\terr = utils.UpdateAutoscale(c.clientset, funcObj.Spec.HorizontalPodAutoscaler)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t// HorizontalPodAutoscaler doesn't exists, try to delete if it already existed\n\t\terr = c.deleteAutoscale(funcObj.ObjectMeta.Namespace, funcObj.ObjectMeta.Name)\n\t\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *FunctionController) deleteAutoscale(ns, name string) error {\n\tif c.smclient != nil {\n\t\t// Delete Service monitor if the client is available\n\t\terr := utils.DeleteServiceMonitor(*c.smclient, name, ns)\n\t\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\t// delete autoscale\n\terr := utils.DeleteAutoscale(c.clientset, name, ns)\n\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// deleteK8sResources removes k8s objects of the function\nfunc (c *FunctionController) deleteK8sResources(ns, name string) error {\n\n\t// delete deployment\n\tdeletePolicy := metav1.DeletePropagationBackground\n\terr := c.clientset.Extensions().Deployments(ns).Delete(name, &metav1.DeleteOptions{PropagationPolicy: &deletePolicy})\n\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\treturn err\n\t}\n\t// delete svc\n\terr = c.clientset.Core().Services(ns).Delete(name, &metav1.DeleteOptions{})\n\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\treturn err\n\t}\n\n\t// delete cm\n\terr = c.clientset.Core().ConfigMaps(ns).Delete(name, &metav1.DeleteOptions{})\n\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\treturn err\n\t}\n\n\t// delete service monitor\n\terr = c.deleteAutoscale(ns, name)\n\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\treturn err\n\t}\n\n\t// delete build job\n\terr = c.clientset.BatchV1().Jobs(ns).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{\n\t\tLabelSelector: fmt.Sprintf(\"created-by=kubeless,function=%s\", name),\n\t})\n\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc functionObjChanged(oldFunctionObj, newFunctionObj *kubelessApi.Function) bool {\n\t// If the function object's deletion timestamp is set, then process\n\tif oldFunctionObj.DeletionTimestamp != newFunctionObj.DeletionTimestamp {\n\t\treturn true\n\t}\n\t// If the new and old function object's resource version is same\n\tif oldFunctionObj.ResourceVersion == newFunctionObj.ResourceVersion {\n\t\treturn false\n\t}\n\tnewSpec := &oldFunctionObj.Spec\n\toldSpec := &newFunctionObj.Spec\n\n\tif newSpec.Function != oldSpec.Function ||\n\t\t// compare checksum since the url content type uses Function field to pass the URL for the function\n\t\t// comparing the checksum ensures that if the function code has changed but the URL remains the same, the function will get redeployed\n\t\tnewSpec.Checksum != oldSpec.Checksum ||\n\t\tnewSpec.Handler != oldSpec.Handler ||\n\t\tnewSpec.FunctionContentType != oldSpec.FunctionContentType ||\n\t\tnewSpec.Deps != oldSpec.Deps ||\n\t\tnewSpec.Timeout != oldSpec.Timeout {\n\t\treturn true\n\t}\n\n\tif !apiequality.Semantic.DeepEqual(newSpec.Deployment, oldSpec.Deployment) ||\n\t\t!apiequality.Semantic.DeepEqual(newSpec.HorizontalPodAutoscaler, oldSpec.HorizontalPodAutoscaler) ||\n\t\t!apiequality.Semantic.DeepEqual(newSpec.ServiceSpec, oldSpec.ServiceSpec) {\n\t\treturn true\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "pkg/controller/function_controller_test.go",
    "content": "package controller\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/ghodss/yaml\"\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/langruntime\"\n\t\"github.com/sirupsen/logrus\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\t\"k8s.io/api/autoscaling/v2beta1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n\tktesting \"k8s.io/client-go/testing\"\n)\n\nfunc findAction(fake *fake.Clientset, verb, resource string) ktesting.Action {\n\tfor _, a := range fake.Actions() {\n\t\tif a.Matches(verb, resource) {\n\t\t\treturn a\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc hasAction(fake *fake.Clientset, verb, resource string) bool {\n\treturn findAction(fake, verb, resource) != nil\n}\n\nfunc TestDeleteK8sResources(t *testing.T) {\n\tmyNsFoo := metav1.ObjectMeta{\n\t\tNamespace: \"myns\",\n\t\tName:      \"foo\",\n\t}\n\n\tdeploy := appsv1.Deployment{\n\t\tObjectMeta: myNsFoo,\n\t}\n\n\tsvc := v1.Service{\n\t\tObjectMeta: myNsFoo,\n\t}\n\n\tcm := v1.ConfigMap{\n\t\tObjectMeta: myNsFoo,\n\t}\n\n\thpa := v2beta1.HorizontalPodAutoscaler{\n\t\tObjectMeta: myNsFoo,\n\t}\n\n\tclientset := fake.NewSimpleClientset(&deploy, &svc, &cm, &hpa)\n\n\tcontroller := FunctionController{\n\t\tclientset: clientset,\n\t}\n\tif err := controller.deleteK8sResources(\"myns\", \"foo\"); err != nil {\n\t\tt.Fatalf(\"Deleting resources returned err: %v\", err)\n\t}\n\n\tt.Log(\"Actions:\", clientset.Actions())\n\n\tfor _, kind := range []string{\"services\", \"configmaps\", \"deployments\", \"horizontalpodautoscalers\"} {\n\t\ta := findAction(clientset, \"delete\", kind)\n\t\tif a == nil {\n\t\t\tt.Errorf(\"failed to delete %s\", kind)\n\t\t} else if ns := a.GetNamespace(); ns != \"myns\" {\n\t\t\tt.Errorf(\"deleted %s from wrong namespace (%s)\", kind, ns)\n\t\t} else if n := a.(ktesting.DeleteAction).GetName(); n != \"foo\" {\n\t\t\tt.Errorf(\"deleted %s with wrong name (%s)\", kind, n)\n\t\t}\n\t}\n\n\t// Similar with only svc remaining\n\tclientset = fake.NewSimpleClientset(&svc)\n\tcontroller = FunctionController{\n\t\tclientset: clientset,\n\t}\n\n\tif err := controller.deleteK8sResources(\"myns\", \"foo\"); err != nil {\n\t\tt.Fatalf(\"Deleting partial resources returned err: %v\", err)\n\t}\n\n\tt.Log(\"Actions:\", clientset.Actions())\n\n\tif !hasAction(clientset, \"delete\", \"services\") {\n\t\tt.Errorf(\"failed to delete service\")\n\t}\n\n\tclientset = fake.NewSimpleClientset(&deploy, &svc, &cm)\n\tcontroller = FunctionController{\n\t\tclientset: clientset,\n\t}\n\n\tif err := controller.deleteK8sResources(\"myns\", \"foo\"); err != nil {\n\t\tt.Fatalf(\"Deleting resources returned err: %v\", err)\n\t}\n\n\tt.Log(\"Actions:\", clientset.Actions())\n\n\tfor _, kind := range []string{\"services\", \"configmaps\", \"deployments\"} {\n\t\ta := findAction(clientset, \"delete\", kind)\n\t\tif a == nil {\n\t\t\tt.Errorf(\"failed to delete %s\", kind)\n\t\t} else if ns := a.GetNamespace(); ns != \"myns\" {\n\t\t\tt.Errorf(\"deleted %s from wrong namespace (%s)\", kind, ns)\n\t\t}\n\t}\n}\n\nfunc TestEnsureK8sResourcesWithDeploymentDefinitionFromConfigMap(t *testing.T) {\n\tfuncObj := testFunc()\n\tdeploymentConfigData := `{\n\t\t\"metadata\": {\n\t\t\t\"annotations\": {\n\t\t\t\t\"foo-from-deploy-cm\": \"bar-from-deploy-cm\",\n\t\t\t\t\"xyz\": \"valuefromcm\"\n\t\t\t}\n\t\t},\n\t\t\"spec\": {\n\t\t\t\"replicas\": 2,\n\t\t\t\"template\": {\n\t\t\t\t\"metadata\": {\n\t\t\t\t\t\"annotations\": {\n\t\t\t\t\t\"podannotation-from-func-crd\": \"value-from-container\"\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}`\n\n\tclientset := fake.NewSimpleClientset()\n\tcontroller := testController(clientset, funcObj.Namespace, map[string]string{\n\t\t\"deployment\":     deploymentConfigData,\n\t\t\"runtime-images\": testRuntimeImages(),\n\t})\n\n\tif err := controller.ensureK8sResources(funcObj); err != nil {\n\t\tt.Fatalf(\"Creating/Updating resources returned err: %v\", err)\n\t}\n\tdpm, _ := clientset.AppsV1().Deployments(funcObj.Namespace).Get(funcObj.Name, metav1.GetOptions{})\n\texpectedAnnotations := map[string]string{\n\t\t\"bar\":                \"foo\",\n\t\t\"foo-from-deploy-cm\": \"bar-from-deploy-cm\",\n\t\t\"xyz\":                \"valuefromfunc\",\n\t}\n\tfor i := range expectedAnnotations {\n\t\tif dpm.ObjectMeta.Annotations[i] != expectedAnnotations[i] {\n\t\t\tt.Errorf(\"Expecting annotation %s but received %s\", expectedAnnotations[i], dpm.ObjectMeta.Annotations[i])\n\t\t}\n\t}\n\tif *dpm.Spec.Replicas != 10 {\n\t\tt.Fatalf(\"Expecting replicas as 10 but received : %d\", *dpm.Spec.Replicas)\n\t}\n\texpectedPodAnnotations := map[string]string{\n\t\t\"bar\":                         \"foo\",\n\t\t\"foo-from-deploy-cm\":          \"bar-from-deploy-cm\",\n\t\t\"xyz\":                         \"valuefromfunc\",\n\t\t\"podannotation-from-func-crd\": \"value-from-container\",\n\t}\n\tfor i := range expectedPodAnnotations {\n\t\tif dpm.Spec.Template.Annotations[i] != expectedPodAnnotations[i] {\n\t\t\tt.Fatalf(\"Expecting annotation %s but received %s\", expectedPodAnnotations[i], dpm.ObjectMeta.Annotations[i])\n\t\t}\n\t}\n}\n\nfunc TestEnsureK8sResourcesWithDeploymentDefinitionFromConfigMapUnknownKey(t *testing.T) {\n\tfuncObj := testFunc()\n\tdeploymentConfigData := `{\n\t\t\"spec\": {\n\t\t\t\"template\": {\n\t\t\t\t\"spec\": {\n\t\t\t\t\t\"unknown\": \"property\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}`\n\tcontroller := testController(fake.NewSimpleClientset(), funcObj.Namespace, map[string]string{\n\t\t\"deployment\":     deploymentConfigData,\n\t\t\"runtime-images\": testRuntimeImages(),\n\t})\n\n\tif err := controller.ensureK8sResources(funcObj); err == nil {\n\t\tt.Fatalf(\"Unknown key in ConfigMap Deployment definition does not fail\")\n\t}\n}\n\nfunc TestEnsureK8sResourcesWithLivenessProbeFromConfigMap(t *testing.T) {\n\tfuncObj := testFunc()\n\truntimeImages := `[\n\t\t{\n\t\t\t\"ID\": \"ruby\",\n\t\t\t\"depName\": \"Gemfile\",\n\t\t\t\"fileNameSuffix\": \".rb\",\n\t\t\t\"versions\": [\n\t\t\t\t{\n\t\t\t\t\t\"name\": \"ruby24\",\n\t\t\t\t\t\"version\": \"2.4\",\n\t\t\t\t\t\"initImage\": \"bitnami/ruby:2.4\",\n\t\t\t\t\t\"imagePullSecrets\":[]\n\t\t\t\t}\n\t\t\t],\n\t\t\t\"livenessProbeInfo\":{\n\t\t\t\t\"exec\": {\n\t\t\t\t\t\"command\": [\n\t\t\t\t\t\t\"curl\",\n\t\t\t\t\t\t\"-f\",\n\t\t\t\t\t\t\"http://localhost:8080/healthz\"\n\t\t\t\t\t],\n\t\t\t\t},\n\t\t\t\t\"initialDelaySeconds\": 5,\n\t\t\t\t\"periodSeconds\": 10\n\t\t\t}\n\t\t}\n\t]`\n\n\tclientset := fake.NewSimpleClientset()\n\tcontroller := testController(clientset, funcObj.Namespace, map[string]string{\n\t\t\"runtime-images\": runtimeImages,\n\t})\n\n\tif err := controller.ensureK8sResources(funcObj); err != nil {\n\t\tt.Fatalf(\"Creating/Updating resources returned err: %v\", err)\n\t}\n\tdpm, _ := clientset.AppsV1().Deployments(funcObj.Namespace).Get(funcObj.Name, metav1.GetOptions{})\n\texpectedLivenessProbe := &v1.Probe{\n\t\tInitialDelaySeconds: int32(5),\n\t\tPeriodSeconds:       int32(10),\n\t\tHandler: v1.Handler{\n\t\t\tExec: &v1.ExecAction{\n\t\t\t\tCommand: []string{\"curl\", \"-f\", \"http://localhost:8080/healthz\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tif !reflect.DeepEqual(dpm.Spec.Template.Spec.Containers[0].LivenessProbe, expectedLivenessProbe) {\n\t\tt.Fatalf(\"LivenessProbe found is '%v', although expected was '%v'\", dpm.Spec.Template.Spec.Containers[0].LivenessProbe, expectedLivenessProbe)\n\t}\n\n}\n\nfunc testFunc() *kubelessApi.Function {\n\tvar replicas int32\n\treplicas = 10\n\tfuncAnno := map[string]string{\n\t\t\"bar\": \"foo\",\n\t\t\"xyz\": \"valuefromfunc\",\n\t}\n\treturn &kubelessApi.Function{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"foo\",\n\t\t\tNamespace: \"default\",\n\t\t\tLabels:    map[string]string{\"foo\": \"bar\"},\n\t\t\tUID:       \"foo-uid\",\n\t\t},\n\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\tFunction: \"function\",\n\t\t\tDeps:     \"deps\",\n\t\t\tHandler:  \"foo.bar\",\n\t\t\tRuntime:  \"ruby2.4\",\n\t\t\tDeployment: appsv1.Deployment{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tAnnotations: funcAnno,\n\t\t\t\t},\n\t\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\t\tReplicas: &replicas,\n\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tAnnotations: funcAnno,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tEnv: []v1.EnvVar{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tName:  \"foo\",\n\t\t\t\t\t\t\t\t\t\t\tValue: \"bar\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc testRuntimeImages() string {\n\truntimeImages := []langruntime.RuntimeInfo{{\n\t\tID:             \"ruby\",\n\t\tDepName:        \"Gemfile\",\n\t\tFileNameSuffix: \".rb\",\n\t\tVersions: []langruntime.RuntimeVersion{\n\t\t\t{\n\t\t\t\tName:    \"ruby24\",\n\t\t\t\tVersion: \"2.4\",\n\t\t\t\tImages: []langruntime.Image{\n\t\t\t\t\t{Phase: \"runtime\", Image: \"bitnami/ruby:2.4\"},\n\t\t\t\t},\n\t\t\t\tImagePullSecrets: []langruntime.ImageSecret{},\n\t\t\t},\n\t\t},\n\t}}\n\n\tout, err := yaml.Marshal(runtimeImages)\n\tif err != nil {\n\t\tlogrus.Fatal(\"Canot Marshall runtimeimage\")\n\t}\n\treturn string(out)\n}\n\nfunc testController(clientset kubernetes.Interface, namespace string, configData map[string]string) *FunctionController {\n\tkubelessConfigMap := &v1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"kubeless-config\",\n\t\t},\n\t\tData: configData,\n\t}\n\t_, err := clientset.CoreV1().ConfigMaps(namespace).Create(kubelessConfigMap)\n\tif err != nil {\n\t\tlogrus.Fatal(\"Unable to create configmap\")\n\t}\n\n\tconfig, err := clientset.CoreV1().ConfigMaps(namespace).Get(\"kubeless-config\", metav1.GetOptions{})\n\tif err != nil {\n\t\tlogrus.Fatal(\"Unable to read the configmap\")\n\t}\n\tvar lr = langruntime.New(config)\n\tlr.ReadConfigMap()\n\n\treturn &FunctionController{\n\t\tlogger:      logrus.WithField(\"pkg\", \"controller\"),\n\t\tclientset:   clientset,\n\t\tlangRuntime: lr,\n\t\tconfig:      config,\n\t}\n}\n"
  },
  {
    "path": "pkg/function-image-builder/image_builder.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\n\tlbuilder \"github.com/kubeless/kubeless/pkg/function-image-builder/layer-builder\"\n\t\"github.com/spf13/cobra\"\n)\n\nvar globalUsage = `` //TODO: add explanation\n\nfunc init() {\n\tlayerCmd.Flags().Bool(\"insecure\", false, \"Disable TLS verification.\")\n\tlayerCmd.Flags().StringP(\"src\", \"\", \"\", \"Source image reference. F.e. dir://path/to/image\")\n\tlayerCmd.Flags().StringP(\"src-creds\", \"\", \"\", \"Source image credentials in case it is a private registry. F.e. user:my_pass\")\n\tlayerCmd.Flags().StringP(\"dst\", \"\", \"\", \"Destination image reference. F.e. docker://user/image\")\n\tlayerCmd.Flags().StringP(\"dst-creds\", \"\", \"\", \"Destination credentials in case it is a docker registry. F.e. user:my_pass\")\n\tlayerCmd.Flags().StringP(\"cwd\", \"\", \"\", \"Working directory\")\n}\n\nfunc runCommand(command string, args []string) error {\n\tcmd := exec.Command(command, args...)\n\n\tstdout, _ := cmd.StdoutPipe()\n\tstderr, _ := cmd.StderrPipe()\n\tcmd.Start()\n\n\tscannerStdout := bufio.NewScanner(stdout)\n\tscannerStdout.Split(bufio.ScanLines)\n\tfor scannerStdout.Scan() {\n\t\tm := scannerStdout.Text()\n\t\tfmt.Fprintln(os.Stdout, m)\n\t}\n\tscannerStderr := bufio.NewScanner(stderr)\n\tscannerStderr.Split(bufio.ScanLines)\n\tfor scannerStderr.Scan() {\n\t\tm := scannerStderr.Text()\n\t\tfmt.Fprintln(os.Stderr, m)\n\t}\n\n\treturn cmd.Wait()\n}\n\nfunc skopeoCopy(src, dst, srcCreds, dstCreds string, insecure bool) error {\n\tcommand := \"skopeo\"\n\targs := []string{\"copy\"}\n\tif srcCreds != \"\" {\n\t\targs = append(args, \"--src-creds\", srcCreds)\n\t}\n\tif dstCreds != \"\" {\n\t\targs = append(args, \"--dest-creds\", dstCreds)\n\t}\n\tif insecure {\n\t\targs = append(args, \"--src-tls-verify=false\", \"--dest-tls-verify=false\")\n\t}\n\targs = append(args, src, dst)\n\treturn runCommand(command, args)\n}\n\nvar layerCmd = &cobra.Command{\n\tUse:   \"add-layer <tar> FLAG\",\n\tShort: \"Add tar as a image layer\",\n\tLong:  ``,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tif len(args) != 1 {\n\t\t\tlog.Fatal(\"Need exactly one argument - layer tar\")\n\t\t}\n\n\t\tlayerTar := args[0]\n\n\t\tsrcImage, err := cmd.Flags().GetString(\"src\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif srcImage == \"\" {\n\t\t\tlog.Fatal(\"Need specify the source image using the flag --src\")\n\t\t}\n\n\t\tdstImage, err := cmd.Flags().GetString(\"dst\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif dstImage == \"\" {\n\t\t\tlog.Fatal(\"Need specify the destination image using the flag --dst\")\n\t\t}\n\n\t\tsrcCreds, err := cmd.Flags().GetString(\"src-creds\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tdstCreds, err := cmd.Flags().GetString(\"dst-creds\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tworkDir, err := cmd.Flags().GetString(\"cwd\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif workDir == \"\" {\n\t\t\tworkDir, err = ioutil.TempDir(\"\", \"build\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\tinsecure, err := cmd.Flags().GetBool(\"insecure\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\t// Store src image\n\t\terr = skopeoCopy(srcImage, fmt.Sprintf(\"dir://%s\", workDir), srcCreds, dstCreds, insecure)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Println(\"Succesfully stored base image \", srcImage, \" at \", workDir)\n\n\t\t// Add layer\n\t\terr = lbuilder.AddTarToLayer(workDir, layerTar)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Println(\"Added layer \", layerTar, \" in \", workDir)\n\n\t\t// Publish new image\n\t\terr = skopeoCopy(fmt.Sprintf(\"dir://%s\", workDir), dstImage, srcCreds, dstCreds, insecure)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Println(\"Succesfully stored final image at \", dstImage)\n\t},\n}\n\nfunc newRootCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:   \"imbuilder\",\n\t\tShort: \"Pulls an image and push a new one including a tar file as a new layer\",\n\t\tLong:  globalUsage,\n\t}\n\n\tcmd.AddCommand(layerCmd)\n\treturn cmd\n}\n\nfunc main() {\n\tcmd := newRootCmd()\n\tif err := cmd.Execute(); err != nil {\n\t\tos.Exit(1)\n\t}\n}\n"
  },
  {
    "path": "pkg/function-image-builder/layer-builder/description.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage layerbuilder\n\nimport (\n\t\"crypto/sha256\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"time\"\n)\n\n// Config represents a container configuration\ntype Config struct {\n\tHostname     string\n\tDomainname   string\n\tUser         string\n\tAttachStdin  bool\n\tAttachStdout bool\n\tAttachStderr bool\n\tTty          bool\n\tOpenStdin    bool\n\tStdinOnce    bool\n\tEnv          []string\n\tCmd          []string\n\tArgsEscaped  bool\n\tImage        string\n\tVolumes      interface{}\n\tWorkingDir   string\n\tEntrypoint   interface{}\n\tOnBuild      interface{}\n\tLabels       interface{}\n}\n\n// HistoryEntry represents a layer creation info\ntype HistoryEntry struct {\n\tCreated    string `json:\"created\"`\n\tCreatedBy  string `json:\"created_by,omitifempty\"`\n\tComment    string `json:\"comment,omitifempty\"`\n\tEmptyLayer bool   `json:\"empty_layer,omitifempty\"`\n}\n\n// Rootfs represents the root filesystem of an image\ntype Rootfs struct {\n\tType    string   `json:\"type\"`\n\tDiffIds []string `json:\"diff_ids\"`\n}\n\n// Description represents the specification of a Docker image\ntype Description struct {\n\tArch            string         `json:\"architecture\"`\n\tConfig          Config         `json:\"config\"`\n\tContainer       string         `json:\"container\"`\n\tContainerConfig Config         `json:\"container_config\"`\n\tCreated         string         `json:\"created\"`\n\tDockerVersion   string         `json:\"docker_version\"`\n\tHistory         []HistoryEntry `json:\"history\"`\n\tOS              string         `json:\"os\"`\n\tRootfs          Rootfs         `json:\"rootfs\"`\n}\n\n// New generates a Description object based on the description file\nfunc (d *Description) New(descriptionFile io.Reader) error {\n\tdescriptionContent, err := ioutil.ReadAll(descriptionFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.Unmarshal(descriptionContent, d)\n}\n\n// AddLayer adds a new Layer to the image Description\nfunc (d *Description) AddLayer(newLayer *Layer) {\n\t//   Delete some properties that doesn't apply anymore\n\td.Config.Hostname = \"\"\n\td.Config.Image = \"\"\n\td.Container = \"\"\n\td.ContainerConfig.Hostname = \"\"\n\td.ContainerConfig.Image = \"\"\n\t//   Update new properties\n\td.Created = time.Now().UTC().Format(time.RFC3339)\n\td.History = append(d.History, HistoryEntry{\n\t\tCreated: time.Now().UTC().Format(time.RFC3339),\n\t\tComment: \"Created by Kubeless\",\n\t})\n\td.Rootfs.DiffIds = append(d.Rootfs.DiffIds, fmt.Sprintf(\"sha256:%s\", newLayer.Sha256))\n}\n\n// Content returns the description content\nfunc (d *Description) Content() ([]byte, error) {\n\treturn json.Marshal(*d)\n}\n\n// ToLayer returns the Description as a Layer\nfunc (d *Description) ToLayer() (*Layer, error) {\n\tcontent, err := d.Content()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdescriptionNewSize := int64(len(content))\n\tdescriptionNewSha := fmt.Sprintf(\"%x\", sha256.Sum256(content))\n\n\treturn &Layer{\n\t\tSize:   descriptionNewSize,\n\t\tSha256: descriptionNewSha,\n\t}, nil\n}\n"
  },
  {
    "path": "pkg/function-image-builder/layer-builder/description_test.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage layerbuilder\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestNewDescription(t *testing.T) {\n\tdescFile := strings.NewReader(`{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\"],\"ArgsEscaped\":true,\"Image\":\"sha256:8cae5980d887cc55ba2f978ae99c662007ee06d79881678d57f33f0473fe0736\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"container\":\"8d2c840a1a9b2544fe713c2e24b6757d52328f09bdfc9c2ef6219afbf7ae6b59\",\"container_config\":{\"Hostname\":\"8d2c840a1a9b\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \"],\"ArgsEscaped\":true,\"Image\":\"sha256:8cae5980d887cc55ba2f978ae99c662007ee06d79881678d57f33f0473fe0736\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"created\":\"2018-02-28T22:14:49.023807051Z\",\"docker_version\":\"17.06.2-ce\",\"history\":[{\"created\":\"2018-02-28T22:14:48.759033366Z\",\"created_by\":\"/bin/sh -c #(nop) ADD file:327f69fc1ac9a7b6e56e9032f7b8fbd7741dd0b22920761909c6c8e5fa9c5815 in / \"},{\"created\":\"2018-02-28T22:14:49.023807051Z\",\"created_by\":\"/bin/sh -c #(nop)  \",\"empty_layer\":true}],\"os\":\"linux\",\"rootfs\":{\"type\":\"layers\",\"diff_ids\":[\"sha256:c5183829c43c4698634093dc38f9bee26d1b931dedeba71dbee984f42fe1270d\"]}}`)\n\td := Description{}\n\terr := d.New(descFile)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error %v\", err)\n\t}\n}\n\nfunc TestAddLayerDescription(t *testing.T) {\n\tdescFile := strings.NewReader(`{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"sh\"],\"ArgsEscaped\":true,\"Image\":\"sha256:8cae5980d887cc55ba2f978ae99c662007ee06d79881678d57f33f0473fe0736\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"container\":\"8d2c840a1a9b2544fe713c2e24b6757d52328f09bdfc9c2ef6219afbf7ae6b59\",\"container_config\":{\"Hostname\":\"8d2c840a1a9b\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) \"],\"ArgsEscaped\":true,\"Image\":\"sha256:8cae5980d887cc55ba2f978ae99c662007ee06d79881678d57f33f0473fe0736\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":{}},\"created\":\"2018-02-28T22:14:49.023807051Z\",\"docker_version\":\"17.06.2-ce\",\"history\":[{\"created\":\"2018-02-28T22:14:48.759033366Z\",\"created_by\":\"/bin/sh -c #(nop) ADD file:327f69fc1ac9a7b6e56e9032f7b8fbd7741dd0b22920761909c6c8e5fa9c5815 in / \"},{\"created\":\"2018-02-28T22:14:49.023807051Z\",\"created_by\":\"/bin/sh -c #(nop)  \",\"empty_layer\":true}],\"os\":\"linux\",\"rootfs\":{\"type\":\"layers\",\"diff_ids\":[\"sha256:c5183829c43c4698634093dc38f9bee26d1b931dedeba71dbee984f42fe1270d\"]}}`)\n\td := Description{}\n\terr := d.New(descFile)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error %v\", err)\n\t}\n\tnewLayer := Layer{\n\t\tSize:   10,\n\t\tSha256: \"abc123\",\n\t}\n\td.AddLayer(&newLayer)\n\t// Last history entry should be the new layer\n\tif d.History[len(d.History)-1].Comment != \"Created by Kubeless\" {\n\t\tt.Errorf(\"Failed to include new layer: %v\", d.History)\n\t}\n\t// Last rootfs.diff_id should be the new layer\n\tif d.Rootfs.DiffIds[len(d.Rootfs.DiffIds)-1] == \"abc123\" {\n\t\tt.Error(\"Failed to include new layer\")\n\t}\n}\n\nfunc TestDescriptionToLayer(t *testing.T) {\n\temptyDesc := Description{}\n\tres, err := emptyDesc.ToLayer()\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error %v\", err)\n\t}\n\texpectedSize := int64(721)\n\texpectedSha := \"17263670d4f12e26a270c7ec0a443c3ba8354da1d42f43f8e421634c5965bb6b\"\n\tif res.Sha256 != expectedSha {\n\t\tt.Errorf(\"Unexpected sha256 %s\", res.Sha256)\n\t}\n\tif res.Size != expectedSize {\n\t\tt.Errorf(\"Unexpected size %d\", res.Size)\n\t}\n}\n"
  },
  {
    "path": "pkg/function-image-builder/layer-builder/layer.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage layerbuilder\n\nimport (\n\t\"crypto/sha256\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n)\n\n// Layer represent the size and checksum of a image layer\ntype Layer struct {\n\tSize   int64\n\tSha256 string\n}\n\n// New returns a Layer based on its file\nfunc (f *Layer) New(layerFile *os.File) error {\n\t// Calculate sha256\n\tfContent, err := ioutil.ReadAll(layerFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.Sha256 = fmt.Sprintf(\"%x\", sha256.Sum256(fContent))\n\n\t// Calculate size\n\tfstat, err := layerFile.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\tf.Size = fstat.Size()\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/function-image-builder/layer-builder/layer_builder.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage layerbuilder\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n)\n\nfunc copyReader(src io.Reader, dst string) error {\n\tdstFile, err := os.Create(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dstFile.Close()\n\n\t_, err = io.Copy(dstFile, src)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = dstFile.Sync()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc copyFile(src, dst string) error {\n\tsrcFile, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer srcFile.Close()\n\n\treturn copyReader(srcFile, dst)\n}\n\nfunc getLayer(file string) (*Layer, error) {\n\tlayerFile, err := os.Open(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer layerFile.Close()\n\tlayer := Layer{}\n\terr = layer.New(layerFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &layer, nil\n}\n\nfunc saveNewDescription(content []byte, dir, contentChecksum string) error {\n\tdLayerFile := path.Join(dir, contentChecksum)\n\treturn copyReader(bytes.NewReader(content), dLayerFile)\n}\n\nfunc updateDescription(descriptionDir string, descriptionFile *os.File, newLayer *Layer) (*Description, error) {\n\td := Description{}\n\terr := d.New(descriptionFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to parse image description: %v\", err)\n\t}\n\td.AddLayer(newLayer)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to update image description: %v\", err)\n\t}\n\treturn &d, nil\n}\n\n// AddTarToLayer copies a tar file into a image directory and update its metadata\nfunc AddTarToLayer(imageDir, tarFile string) error {\n\ttarLayer, err := getLayer(tarFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdestFile := path.Join(imageDir, tarLayer.Sha256)\n\terr = copyFile(tarFile, destFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to copy tar file: %v\", err)\n\t}\n\tlog.Printf(\"Copied source %s to %s\", tarFile, destFile)\n\n\t// Parse manifest\n\tmanifestPath := path.Join(imageDir, \"manifest.json\")\n\tmanifestFile, err := os.Open(manifestPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm := Manifest{}\n\terr = m.New(manifestFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to parse image manifest: %v\", err)\n\t}\n\tlog.Printf(\"Parsed manifest\")\n\n\t// Update description\n\tdescriptionPath := path.Join(imageDir, strings.Replace(m.Config.Digest, \"sha256:\", \"\", -1))\n\tdescriptionFile, err := os.Open(descriptionPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdescription, err := updateDescription(imageDir, descriptionFile, tarLayer)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdescriptionLayer, err := description.ToLayer()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to generate layer from description: %v\", err)\n\t}\n\tdescriptionContent, err := description.Content()\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = saveNewDescription(descriptionContent, imageDir, descriptionLayer.Sha256)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Added layer to description at %s\", descriptionLayer.Sha256)\n\n\t// Update manifest\n\tm.UpdateConfig(descriptionLayer)\n\tm.AddLayer(tarLayer)\n\tmBytes, err := json.Marshal(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(manifestPath, mBytes, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Updated manifest\")\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/function-image-builder/layer-builder/layer_test.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage layerbuilder\n\nimport (\n\t\"io/ioutil\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc TestNewLayer(t *testing.T) {\n\tf, err := ioutil.TempFile(\"\", \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.Remove(f.Name())\n\tf.WriteString(\"test content\")\n\tlayer := Layer{}\n\terr = layer.New(f)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error %v\", err)\n\t}\n\tif layer.Sha256 != \"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\" {\n\t\tt.Errorf(\"Wrong sha, expecting patata, received %s\", layer.Sha256)\n\t}\n\tif layer.Size != 12 {\n\t\tt.Errorf(\"Wrong size, expecting patata, received %d\", layer.Size)\n\t}\n}\n"
  },
  {
    "path": "pkg/function-image-builder/layer-builder/manifest.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage layerbuilder\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n)\n\ntype layer struct {\n\tMediaType string `json:\"mediaType\"`\n\tSize      int64  `json:\"size\"`\n\tDigest    string `json:\"digest\"`\n}\n\n// Manifest represent the manifest.json of an image\ntype Manifest struct {\n\tSchemaVersion int     `json:\"schemaVersion\"`\n\tMediaType     string  `json:\"mediaType\"`\n\tConfig        layer   `json:\"config\"`\n\tLayers        []layer `json:\"layers\"`\n}\n\n// New parses an io.Reader into a Manifest\nfunc (m *Manifest) New(manifestFile io.Reader) error {\n\tmanifestContent, err := ioutil.ReadAll(manifestFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(manifestContent, m)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn nil\n}\n\n// UpdateConfig overrides the Config information of the manifest with a new Layer\nfunc (m *Manifest) UpdateConfig(newConfig *Layer) {\n\tm.Config.Size = int64(newConfig.Size)\n\tm.Config.Digest = fmt.Sprintf(\"sha256:%s\", newConfig.Sha256)\n}\n\n// AddLayer adds a new layer to the list in the Manifest\nfunc (m *Manifest) AddLayer(newLayer *Layer) {\n\tm.Layers = append(m.Layers, layer{\n\t\tMediaType: \"application/vnd.docker.image.rootfs.diff.tar.gzip\",\n\t\tSize:      newLayer.Size,\n\t\tDigest:    fmt.Sprintf(\"sha256:%s\", newLayer.Sha256),\n\t})\n}\n"
  },
  {
    "path": "pkg/function-image-builder/layer-builder/manifest_test.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage layerbuilder\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestNewManifest(t *testing.T) {\n\tmanifestFile := strings.NewReader(`{\"schemaVersion\":2,\"mediaType\":\"application/vnd.docker.distribution.manifest.v2+json\",\"config\":{\"mediaType\":\"application/vnd.docker.container.image.v1+json\",\"size\":1489,\"digest\":\"sha256:c7fc094ddbf9f9335543421b34d8c6f3becd3bb05c9f9a5ca0f0e6065871072d\"},\"layers\":[{\"mediaType\":\"application/vnd.docker.image.rootfs.diff.tar.gzip\",\"size\":723113,\"digest\":\"sha256:d070b8ef96fc4f2d92ff520a4fe55594e362b4e1076a32bbfeb261dc03322910\"}]}`)\n\tm := Manifest{}\n\terr := m.New(manifestFile)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error %v\", err)\n\t}\n\tif m.Config.Size != 1489 {\n\t\tt.Errorf(\"Unexpected size %d\", m.Config.Size)\n\t}\n\tif m.Config.Digest != \"sha256:c7fc094ddbf9f9335543421b34d8c6f3becd3bb05c9f9a5ca0f0e6065871072d\" {\n\t\tt.Errorf(\"Unexpected digest %s\", m.Config.Digest)\n\t}\n\tif len(m.Layers) != 1 {\n\t\tt.Errorf(\"Unexpected layers length %d\", len(m.Layers))\n\t}\n}\n\nfunc TestAddNewLayer(t *testing.T) {\n\tmanifestFile := strings.NewReader(`{\"schemaVersion\":2,\"mediaType\":\"application/vnd.docker.distribution.manifest.v2+json\",\"config\":{\"mediaType\":\"application/vnd.docker.container.image.v1+json\",\"size\":1489,\"digest\":\"sha256:c7fc094ddbf9f9335543421b34d8c6f3becd3bb05c9f9a5ca0f0e6065871072d\"},\"layers\":[{\"mediaType\":\"application/vnd.docker.image.rootfs.diff.tar.gzip\",\"size\":723113,\"digest\":\"sha256:d070b8ef96fc4f2d92ff520a4fe55594e362b4e1076a32bbfeb261dc03322910\"}]}`)\n\tm := Manifest{}\n\terr := m.New(manifestFile)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error %v\", err)\n\t}\n\tm.AddLayer(&Layer{\n\t\tSize:   10,\n\t\tSha256: \"Test\",\n\t})\n\tif len(m.Layers) != 2 {\n\t\tt.Errorf(\"Unexpected layers length %d\", len(m.Layers))\n\t}\n\tif m.Layers[1].Size != 10 && m.Layers[1].Digest != \"Test\" {\n\t\tt.Errorf(\"Unexpected layer %v\", m.Layers[1])\n\t}\n}\n\nfunc TestUpdateConfig(t *testing.T) {\n\tmanifestFile := strings.NewReader(`{\"schemaVersion\":2,\"mediaType\":\"application/vnd.docker.distribution.manifest.v2+json\",\"config\":{\"mediaType\":\"application/vnd.docker.container.image.v1+json\",\"size\":1489,\"digest\":\"sha256:c7fc094ddbf9f9335543421b34d8c6f3becd3bb05c9f9a5ca0f0e6065871072d\"},\"layers\":[{\"mediaType\":\"application/vnd.docker.image.rootfs.diff.tar.gzip\",\"size\":723113,\"digest\":\"sha256:d070b8ef96fc4f2d92ff520a4fe55594e362b4e1076a32bbfeb261dc03322910\"}]}`)\n\tm := Manifest{}\n\terr := m.New(manifestFile)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error %v\", err)\n\t}\n\tm.UpdateConfig(&Layer{\n\t\tSize:   10,\n\t\tSha256: \"Test\",\n\t})\n\tif m.Config.Size != 10 && m.Config.Digest != \"Test\" {\n\t\tt.Errorf(\"Unexpected layer %v\", m.Config)\n\t}\n}\n"
  },
  {
    "path": "pkg/function-proxy/Gopkg.toml",
    "content": "[[constraint]]\n  name = \"github.com/prometheus/client_golang\"\n  revision = \"f504d69affe11ec1ccb2e5948127f86878c9fd57\"\n"
  },
  {
    "path": "pkg/function-proxy/proxy.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage main\n\nimport (\n\t\"golang.org/x/net/context\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/exec\"\n\n\t\"github.com/kubeless/kubeless/pkg/function-proxy/utils\"\n\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n)\n\nfunc copyHeaders(dst, src http.Header) {\n\tfor k, vv := range src {\n\t\tfor _, v := range vv {\n\t\t\tdst.Add(k, v)\n\t\t}\n\t}\n}\n\nfunc handle(ctx context.Context, w http.ResponseWriter, r *http.Request) ([]byte, error) {\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(r.Method, \"http://localhost:8090\", r.Body)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tcopyHeaders(req.Header, r.Header)\n\treq.ContentLength = r.ContentLength\n\tresponse, err := client.Do(req)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\treturn ioutil.ReadAll(response.Body)\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\tutils.Handler(w, r, handle)\n}\n\nfunc health(w http.ResponseWriter, r *http.Request) {\n\trr, err := http.Get(\"http://localhost:8090/healthz\")\n\tres, _ := ioutil.ReadAll(rr.Body)\n\tlog.Println(string(res))\n\tif err != nil {\n\t\tlog.Fatalln(\"localhost:8090 not responding\")\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(\"Internal Server error\"))\n\t} else {\n\t\tw.Write([]byte(\"OK\"))\n\t}\n}\n\nfunc startNativeDaemon() {\n\targs := os.Getenv(\"FUNC_PROCESS\")\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", args)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatalf(\"Unable to run %s. Received %v\", args, err)\n\t}\n}\n\nfunc main() {\n\tgo startNativeDaemon()\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"/\", handler)\n\tmux.HandleFunc(\"/healthz\", health)\n\tmux.Handle(\"/metrics\", promhttp.Handler())\n\n\tserver := utils.NewServer(mux)\n\n\tgo func() {\n\t\tif err := server.ListenAndServe(); err != http.ErrServerClosed {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tutils.GracefulShutdown(server)\n}\n"
  },
  {
    "path": "pkg/function-proxy/utils/proxy-utils.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"golang.org/x/net/context\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/signal\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n)\n\nvar (\n\ttimeout            = os.Getenv(\"FUNC_TIMEOUT\")\n\tfuncPort           = os.Getenv(\"FUNC_PORT\")\n\tshutdownTimeout    = os.Getenv(\"SHUTDOWN_TIMEOUT\")\n\tintTimeout         int\n\tintShutdownTimeout int\n\tfuncHistogram      = prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\tName: \"function_duration_seconds\",\n\t\tHelp: \"Duration of user function in seconds\",\n\t}, []string{\"method\"})\n\tfuncCalls = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tName: \"function_calls_total\",\n\t\tHelp: \"Number of calls to user function\",\n\t}, []string{\"method\"})\n\tfuncErrors = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tName: \"function_failures_total\",\n\t\tHelp: \"Number of exceptions in user function\",\n\t}, []string{\"method\"})\n)\n\n// PromHTTPHandler to expose the metrics, invoked in the golang runtime\nfunc PromHTTPHandler() http.Handler {\n\treturn promhttp.Handler()\n}\n\nfunc init() {\n\tif timeout == \"\" {\n\t\ttimeout = \"180\"\n\t}\n\tif funcPort == \"\" {\n\t\tfuncPort = \"8080\"\n\t}\n\tif shutdownTimeout == \"\" {\n\t\tshutdownTimeout = \"10\"\n\t}\n\tvar err error\n\tintTimeout, err = strconv.Atoi(timeout)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tintShutdownTimeout, err = strconv.Atoi(shutdownTimeout)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tprometheus.MustRegister(funcHistogram, funcCalls, funcErrors)\n}\n\n// Logging Functions, required to expose statusCode property\ntype loggingResponseWriter struct {\n\thttp.ResponseWriter\n\tstatusCode int\n}\n\nfunc newLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {\n\treturn &loggingResponseWriter{w, http.StatusOK}\n}\n\nfunc (lrw *loggingResponseWriter) WriteHeader(code int) {\n\tlrw.statusCode = code\n\tlrw.ResponseWriter.WriteHeader(code)\n}\n\nfunc logReq(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlrw := newLoggingResponseWriter(w)\n\t\thandler.ServeHTTP(lrw, r)\n\t\tlog.Printf(\"%s \\\"%s %s %s\\\" %d %s\", r.RemoteAddr, r.Method, r.RequestURI, r.Proto, lrw.statusCode, r.UserAgent())\n\t\tif lrw.statusCode == 408 {\n\t\t\tgo func() {\n\t\t\t\t// Give time to return timeout response\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\tlog.Fatal(\"Request timeout. Forcing exit\")\n\t\t\t}()\n\t\t}\n\t})\n}\n\nfunc copyHeaders(dst, src http.Header) {\n\tfor k, vv := range src {\n\t\tfor _, v := range vv {\n\t\t\tdst.Add(k, v)\n\t\t}\n\t}\n}\n\n// Handle type receive the context elements of a HTTP request to process it\ntype Handle func(ctx context.Context, w http.ResponseWriter, r *http.Request) ([]byte, error)\n\n// Handler receives an HTTP request and response and a handler function\n// It manages timeouts and prometheus metrics\nfunc Handler(w http.ResponseWriter, r *http.Request, h Handle) {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Duration(intTimeout)*time.Second)\n\tdefer cancel()\n\tfuncChannel := make(chan struct {\n\t\tres string\n\t\terr error\n\t}, 1)\n\tgo func() {\n\t\tfuncCalls.With(prometheus.Labels{\"method\": r.Method}).Inc()\n\t\tstart := time.Now()\n\t\tres, err := h(ctx, w, r)\n\t\tfuncHistogram.With(prometheus.Labels{\"method\": r.Method}).Observe(time.Since(start).Seconds())\n\t\tpack := struct {\n\t\t\tres string\n\t\t\terr error\n\t\t}{string(res), err}\n\t\tfuncChannel <- pack\n\t}()\n\tselect {\n\tcase respPack := <-funcChannel:\n\t\tif respPack.err != nil {\n\t\t\tfuncErrors.With(prometheus.Labels{\"method\": r.Method}).Inc()\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(fmt.Sprintf(\"Error: %v\", respPack.err)))\n\t\t} else {\n\t\t\tw.Write([]byte(respPack.res))\n\t\t}\n\t// Send Timeout response\n\tcase <-ctx.Done():\n\t\tfuncErrors.With(prometheus.Labels{\"method\": r.Method}).Inc()\n\t\tw.WriteHeader(http.StatusRequestTimeout)\n\t\tw.Write([]byte(\"Timeout exceeded\"))\n\t}\n}\n\n// NewServer returns an HTTP server ready to listen on the configured port\n// and with logReq mixed in for logging.\nfunc NewServer(mux *http.ServeMux) *http.Server {\n\treturn &http.Server{Addr: fmt.Sprintf(\":%s\", funcPort), Handler: logReq(mux)}\n}\n\n// GracefulShutdown accepts a server reference and triggers a graceful shutdown\n// for it when either SIGINT or SIGTERM is received.\nfunc GracefulShutdown(server *http.Server) {\n\tstop := make(chan os.Signal, 1)\n\tsignal.Notify(stop, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)\n\t<-stop\n\ttimeoutDuration := time.Duration(intShutdownTimeout) * time.Second\n\tctx, cancel := context.WithTimeout(context.Background(), timeoutDuration)\n\tdefer cancel()\n\n\tlog.Printf(\"Shuting down with timeout: %s\\n\", timeoutDuration)\n\tif err := server.Shutdown(ctx); err != nil {\n\t\tlog.Printf(\"Error: %v\\n\", err)\n\t} else {\n\t\tlog.Println(\"Server stopped\")\n\t}\n}\n"
  },
  {
    "path": "pkg/functions/params.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage functions\n\nimport (\n\t\"golang.org/x/net/context\"\n\t\"net/http\"\n)\n\n// Extension includes a reference to the Event request and its Context (to handle timeouts)\ntype Extension struct {\n\tRequest  *http.Request\n\tResponse http.ResponseWriter\n\tContext  context.Context\n}\n\n// Event includes information about the event source\ntype Event struct {\n\tData           string\n\tEventID        string\n\tEventType      string\n\tEventTime      string\n\tEventNamespace string\n\tExtensions     Extension\n}\n\n// Context includes information about the function environment\ntype Context struct {\n\tFunctionName string\n\tTimeout      string\n\tRuntime      string\n\tMemoryLimit  string\n}\n"
  },
  {
    "path": "pkg/langruntime/langruntime.go",
    "content": "package langruntime\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/sirupsen/logrus\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\t\"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/util/intstr\"\n)\n\nconst (\n\t// PhaseInstallation - Installation phase name\n\tPhaseInstallation = \"installation\"\n\t// PhaseCompilation - Compilation phase name\n\tPhaseCompilation = \"compilation\"\n\t// PhaseRuntime - Runtime phase name\n\tPhaseRuntime = \"runtime\"\n)\n\n// Langruntimes struct for getting configmap\ntype Langruntimes struct {\n\tkubelessConfig    *v1.ConfigMap\n\tAvailableRuntimes []RuntimeInfo\n}\n\n// Image represents the information about a runtime phase\ntype Image struct {\n\tPhase   string            `yaml:\"phase\"`\n\tImage   string            `yaml:\"image\"`\n\tCommand string            `yaml:\"command,omitempty\"`\n\tEnv     map[string]string `yaml:\"env,omitempty\"`\n\tSecrets []Secret          `yaml:\"secrets,omitempty\"`\n}\n\n// Secret is a reference to a secret.\ntype Secret struct {\n\tName string `yaml:\"name,omitempty\"`\n}\n\n// RuntimeVersion is a struct with all the info about the images and secrets\ntype RuntimeVersion struct {\n\tName             string        `yaml:\"name\"`\n\tVersion          string        `yaml:\"version\"`\n\tImages           []Image       `yaml:\"runtimeImage\"`\n\tImagePullSecrets []ImageSecret `yaml:\"imagePullSecrets,omitempty\"`\n}\n\n// ImageSecret for pulling the image\ntype ImageSecret struct {\n\tImageSecret string `yaml:\"imageSecret,omitempty\"`\n}\n\n// RuntimeInfo describe the runtime specifics (typical file suffix and dependency file name)\n// and the supported versions\ntype RuntimeInfo struct {\n\tID                string           `yaml:\"ID\"`\n\tVersions          []RuntimeVersion `yaml:\"versions\"`\n\tLivenessProbeInfo *v1.Probe        `yaml:\"livenessProbeInfo,omitempty\"`\n\tDepName           string           `yaml:\"depName\"`\n\tFileNameSuffix    string           `yaml:\"fileNameSuffix\"`\n}\n\n// New initializes a langruntime object\nfunc New(config *v1.ConfigMap) *Langruntimes {\n\tvar ri []RuntimeInfo\n\n\treturn &Langruntimes{\n\t\tkubelessConfig:    config,\n\t\tAvailableRuntimes: ri,\n\t}\n}\n\n// ReadConfigMap reads the configmap\nfunc (l *Langruntimes) ReadConfigMap() {\n\tif runtimeImages, ok := l.kubelessConfig.Data[\"runtime-images\"]; ok {\n\t\terr := yaml.Unmarshal([]byte(runtimeImages), &l.AvailableRuntimes)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to get the runtime images: %v\", err)\n\t\t}\n\t}\n}\n\n// GetRuntimes returns the list of available runtimes as strings\nfunc (l *Langruntimes) GetRuntimes() []string {\n\tresult := []string{}\n\tfor _, runtimeInf := range l.AvailableRuntimes {\n\t\tfor _, runtime := range runtimeInf.Versions {\n\t\t\tresult = append(result, runtimeInf.ID+runtime.Version)\n\t\t}\n\t}\n\treturn result\n}\n\n// IsValidRuntime returns true if passed runtime name is valid runtime\nfunc (l *Langruntimes) IsValidRuntime(runtime string) bool {\n\tfor _, validRuntime := range l.GetRuntimes() {\n\t\tif runtime == validRuntime {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (l *Langruntimes) getAvailableRuntimesPerTrigger(imageType string) []string {\n\tvar runtimeList []string\n\tfor i := range l.AvailableRuntimes {\n\t\tfor j := range l.AvailableRuntimes[i].Versions {\n\t\t\tif l.findImage(PhaseRuntime, l.AvailableRuntimes[i].Versions[j]) != nil {\n\t\t\t\truntimeList = append(runtimeList, l.AvailableRuntimes[i].ID+l.AvailableRuntimes[i].Versions[j].Version)\n\t\t\t}\n\t\t}\n\t}\n\treturn runtimeList\n}\n\n// extract the branch number from the runtime string\nfunc (l *Langruntimes) getVersionFromRuntime(runtime string) string {\n\tre := regexp.MustCompile(\"[0-9.]+$\")\n\treturn re.FindString(runtime)\n}\n\n// GetRuntimeInfo returns all the info regarding a runtime\nfunc (l *Langruntimes) GetRuntimeInfo(runtime string) (RuntimeInfo, error) {\n\truntimeID := regexp.MustCompile(\"^[a-zA-Z_-]+\").FindString(runtime)\n\tfor _, runtimeInf := range l.AvailableRuntimes {\n\t\tif runtimeInf.ID == runtimeID {\n\t\t\treturn runtimeInf, nil\n\t\t}\n\t}\n\n\treturn RuntimeInfo{}, fmt.Errorf(\"Unable to find %s as runtime\", runtime)\n}\n\n// GetLivenessProbeInfo returs the liveness probe info regarding a runtime\nfunc (l *Langruntimes) GetLivenessProbeInfo(runtime string, port int) *v1.Probe {\n\tlivenessProbe := &v1.Probe{\n\t\tInitialDelaySeconds: int32(3),\n\t\tPeriodSeconds:       int32(30),\n\t\tHandler: v1.Handler{\n\t\t\tHTTPGet: &v1.HTTPGetAction{\n\t\t\t\tPath: \"/healthz\",\n\t\t\t\tPort: intstr.FromInt(port),\n\t\t\t},\n\t\t},\n\t}\n\n\truntimeID := regexp.MustCompile(\"^[a-zA-Z]+\").FindString(runtime)\n\tfor _, runtimeInf := range l.AvailableRuntimes {\n\t\tif runtimeInf.ID == runtimeID {\n\t\t\tif runtimeInf.LivenessProbeInfo != nil {\n\t\t\t\treturn runtimeInf.LivenessProbeInfo\n\t\t\t}\n\t\t\treturn livenessProbe\n\t\t}\n\t}\n\treturn livenessProbe\n}\n\nfunc (l *Langruntimes) findRuntimeVersion(runtimeWithVersion string) (RuntimeVersion, error) {\n\tversion := l.getVersionFromRuntime(runtimeWithVersion)\n\truntimeInf, err := l.GetRuntimeInfo(runtimeWithVersion)\n\tif err != nil {\n\t\treturn RuntimeVersion{}, err\n\t}\n\tfor _, versionInf := range runtimeInf.Versions {\n\t\tif versionInf.Version == version {\n\t\t\treturn versionInf, nil\n\t\t}\n\t}\n\treturn RuntimeVersion{}, fmt.Errorf(\"The given runtime and version %s is not valid\", runtimeWithVersion)\n}\n\n// Returns the image information of a phase or null if the phase is not found\nfunc (l *Langruntimes) findImage(phase string, runtime RuntimeVersion) *Image {\n\tfor _, imageInf := range runtime.Images {\n\t\tif imageInf.Phase == phase {\n\t\t\treturn &imageInf\n\t\t}\n\t}\n\treturn nil\n}\n\n// GetFunctionImage returns the image ID depending on the runtime, its version and function type\nfunc (l *Langruntimes) GetFunctionImage(runtime string) (string, error) {\n\truntimeInf, err := l.GetRuntimeInfo(runtime)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\timageNameEnvVar := strings.ToUpper(runtimeInf.ID) + l.getVersionFromRuntime(runtime) + \"_RUNTIME\"\n\timageName := os.Getenv(imageNameEnvVar)\n\tif imageName == \"\" {\n\t\tversionInf, err := l.findRuntimeVersion(runtime)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\truntimeImage := l.findImage(PhaseRuntime, versionInf)\n\t\tif runtimeImage == nil {\n\t\t\terr = fmt.Errorf(\"The given runtime and version '%s' does not have a valid image for HTTP based functions. Available runtimes are: %s\", runtime, strings.Join(l.getAvailableRuntimesPerTrigger(\"HTTP\")[:], \", \"))\n\t\t} else {\n\t\t\timageName = runtimeImage.Image\n\t\t}\n\t}\n\treturn imageName, nil\n}\n\n// GetImageSecrets gets the secrets to pull the runtime image\nfunc (l *Langruntimes) GetImageSecrets(runtime string) ([]v1.LocalObjectReference, error) {\n\tvar secrets []string\n\n\truntimeInf, err := l.findRuntimeVersion(runtime)\n\tif err != nil {\n\t\treturn []v1.LocalObjectReference{}, err\n\t}\n\n\tif len(runtimeInf.ImagePullSecrets) == 0 {\n\t\treturn []v1.LocalObjectReference{}, nil\n\t}\n\n\tfor _, s := range runtimeInf.ImagePullSecrets {\n\t\tsecrets = append(secrets, s.ImageSecret)\n\t}\n\tvar lors []v1.LocalObjectReference\n\tif len(secrets) > 0 {\n\t\tfor _, s := range secrets {\n\t\t\tlor := v1.LocalObjectReference{Name: s}\n\t\t\tlors = append(lors, lor)\n\t\t}\n\t}\n\n\treturn lors, nil\n}\n\n// GetInitContainerSecrets gets the secrets of the init container with name\nfunc (l *Langruntimes) GetInitContainerSecrets(runtime, name string) ([]v1.LocalObjectReference, error) {\n\truntimeInf, err := l.findRuntimeVersion(runtime)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(runtimeInf.Images) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tvar secrets []Secret\n\tphase := name2phase(name)\n\tfor _, i := range runtimeInf.Images {\n\t\tif i.Phase == phase {\n\t\t\tsecrets = append(secrets, i.Secrets...)\n\t\t\tbreak\n\t\t}\n\t}\n\tvar refs []v1.LocalObjectReference\n\tfor _, s := range secrets {\n\t\trefs = append(refs, v1.LocalObjectReference{Name: s.Name})\n\t}\n\n\treturn refs, nil\n}\n\nfunc appendToCommand(orig string, command ...string) string {\n\tif len(orig) > 0 {\n\t\treturn fmt.Sprintf(\"%s && %s\", orig, strings.Join(command, \" && \"))\n\t}\n\treturn strings.Join(command, \" && \")\n}\n\nfunc parseEnv(env map[string]string) []v1.EnvVar {\n\tres := []v1.EnvVar{}\n\tfor key, value := range env {\n\t\tres = append(res, v1.EnvVar{Name: key, Value: value})\n\t}\n\treturn res\n}\n\n// GetBuildContainer returns a Container definition based on a runtime\nfunc (l *Langruntimes) GetBuildContainer(runtime, depsChecksum string, env []v1.EnvVar, installVolume v1.VolumeMount, resources v1.ResourceRequirements) (v1.Container, error) {\n\truntimeInf, err := l.GetRuntimeInfo(runtime)\n\tif err != nil {\n\t\treturn v1.Container{}, err\n\t}\n\tdepsFile := path.Join(installVolume.MountPath, runtimeInf.DepName)\n\tversionInf, err := l.findRuntimeVersion(runtime)\n\tif err != nil {\n\t\treturn v1.Container{}, err\n\t}\n\n\timageInf := l.findImage(PhaseInstallation, versionInf)\n\tif imageInf == nil {\n\t\t// The runtime doesn't have an installation hook\n\t\treturn v1.Container{}, nil\n\t}\n\n\tvar command string\n\t// Validate deps checksum\n\tshaFile := \"/tmp/deps.sha256\"\n\n\t// if checksum exist, check sum\n\tif depsChecksum != \"\" {\n\t\tcommand = appendToCommand(command,\n\t\t\tfmt.Sprintf(\"echo '%s  %s' > %s\", depsChecksum, depsFile, shaFile),\n\t\t\tfmt.Sprintf(\"sha256sum -c %s\", shaFile),\n\t\t\timageInf.Command,\n\t\t)\n\t} else {\n\t\tcommand = appendToCommand(command, imageInf.Command)\n\t}\n\n\tenv = append(\n\t\tenv,\n\t\tv1.EnvVar{Name: \"KUBELESS_INSTALL_VOLUME\", Value: installVolume.MountPath},\n\t\tv1.EnvVar{Name: \"KUBELESS_DEPS_FILE\", Value: depsFile},\n\t)\n\tenv = append(env, parseEnv(imageInf.Env)...)\n\n\treturn v1.Container{\n\t\tName:            \"install\",\n\t\tImage:           imageInf.Image,\n\t\tCommand:         []string{\"sh\", \"-c\"},\n\t\tArgs:            []string{command},\n\t\tVolumeMounts:    []v1.VolumeMount{installVolume},\n\t\tImagePullPolicy: v1.PullIfNotPresent,\n\t\tWorkingDir:      installVolume.MountPath,\n\t\tEnv:             env,\n\t\tResources:       resources,\n\t}, nil\n}\n\n// UpdateDeployment object in case of custom runtime\nfunc (l *Langruntimes) UpdateDeployment(dpm *appsv1.Deployment, volPath, runtime string) {\n\tversionInf, err := l.findRuntimeVersion(runtime)\n\tif err != nil {\n\t\t// Not found an image for the given runtime\n\t\treturn\n\t}\n\tdpm.Spec.Template.Spec.Containers[0].Env = append(\n\t\tdpm.Spec.Template.Spec.Containers[0].Env,\n\t\tv1.EnvVar{Name: \"KUBELESS_INSTALL_VOLUME\", Value: volPath},\n\t)\n\n\timageInf := l.findImage(PhaseRuntime, versionInf)\n\tif imageInf == nil {\n\t\t// Not found an image for the given runtime\n\t\treturn\n\t}\n\tdpm.Spec.Template.Spec.Containers[0].Env = append(\n\t\tdpm.Spec.Template.Spec.Containers[0].Env,\n\t\tparseEnv(imageInf.Env)...,\n\t)\n}\n\n// GetCompilationContainer returns a Container definition based on a runtime\nfunc (l *Langruntimes) GetCompilationContainer(runtime, funcName string, env []v1.EnvVar, installVolume v1.VolumeMount, resources v1.ResourceRequirements) (*v1.Container, error) {\n\tversionInf, err := l.findRuntimeVersion(runtime)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timageInf := l.findImage(PhaseCompilation, versionInf)\n\tif imageInf == nil {\n\t\t// The runtime doesn't have a compilation hook\n\t\treturn nil, nil\n\t}\n\n\tenv = append(\n\t\tenv,\n\t\tv1.EnvVar{Name: \"KUBELESS_INSTALL_VOLUME\", Value: installVolume.MountPath},\n\t\tv1.EnvVar{Name: \"KUBELESS_FUNC_NAME\", Value: funcName},\n\t)\n\tenv = append(env, parseEnv(imageInf.Env)...)\n\treturn &v1.Container{\n\t\tName:            \"compile\",\n\t\tImage:           imageInf.Image,\n\t\tCommand:         []string{\"sh\", \"-c\"},\n\t\tArgs:            []string{imageInf.Command},\n\t\tEnv:             env,\n\t\tVolumeMounts:    []v1.VolumeMount{installVolume},\n\t\tImagePullPolicy: v1.PullIfNotPresent,\n\t\tWorkingDir:      installVolume.MountPath,\n\t\tResources:       resources,\n\t}, nil\n}\n\n// name2phase returns the phase of an init container\nfunc name2phase(name string) string {\n\tswitch name {\n\tcase \"compile\":\n\t\treturn PhaseCompilation\n\tcase \"install\":\n\t\treturn PhaseInstallation\n\t}\n\treturn name\n}\n"
  },
  {
    "path": "pkg/langruntime/langruntime_test.go",
    "content": "package langruntime\n\nimport (\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\nvar clientset = fake.NewSimpleClientset()\n\nfunc TestMain(m *testing.M) {\n\tAddFakeConfig(clientset)\n\tos.Exit(m.Run())\n}\n\nfunc check(clientset *fake.Clientset, lr *Langruntimes, runtime, fname string, values []string, t *testing.T) {\n\n\tinfo, err := lr.GetRuntimeInfo(runtime)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif info.DepName != values[0] {\n\t\tt.Fatalf(\"Retrieving the image returned a wrong dependencies file. Received \" + info.DepName + \" while expecting \" + values[0])\n\t}\n\tif fname+info.FileNameSuffix != values[1] {\n\t\tt.Fatalf(\"Retrieving the image returned a wrong file name. Received \" + fname + info.FileNameSuffix + \" while expecting \" + values[1])\n\t}\n}\n\nfunc TestGetFunctionFileNames(t *testing.T) {\n\tlr := SetupLangRuntime(clientset)\n\tlr.ReadConfigMap()\n\n\texpectedValues := []string{\"requirements.txt\", \"test.py\"}\n\tcheck(clientset, lr, \"python2.7\", \"test\", expectedValues, t)\n}\n\nfunc TestGetFunctionImage(t *testing.T) {\n\tlr := SetupLangRuntime(clientset)\n\tlr.ReadConfigMap()\n\n\t// Throws an error if the runtime doesn't exist\n\t_, err := lr.GetFunctionImage(\"unexistent\")\n\tif err == nil {\n\t\tt.Fatalf(\"Retrieving data for 'unexistent' should return an error\")\n\t}\n\n\t// Throws an error if the runtime version doesn't exist\n\t_, err = lr.GetFunctionImage(\"python10\")\n\texpectedErrMsg := regexp.MustCompile(\"The given runtime and version python10 is not valid\")\n\tif expectedErrMsg.FindString(err.Error()) == \"\" {\n\t\tt.Fatalf(\"Retrieving data for 'python10' should return an error. Received: %s\", err)\n\t}\n\n\texpectedImageName := \"ruby-test-image\"\n\tos.Setenv(\"PYTHON2.7_RUNTIME\", expectedImageName)\n\timageR, errR := lr.GetFunctionImage(\"python2.7\")\n\tif errR != nil {\n\t\tt.Errorf(\"Retrieving the image returned err: %v\", errR)\n\t}\n\tif imageR != expectedImageName {\n\t\tt.Errorf(\"Expecting \" + imageR + \" to be set to \" + expectedImageName)\n\t}\n\tos.Unsetenv(\"PYTHON2.7_RUNTIME\")\n}\n\nfunc TestGetLivenessProbe(t *testing.T) {\n\tlr := SetupLangRuntime(clientset)\n\tlr.ReadConfigMap()\n\tlivenessProbe := lr.GetLivenessProbeInfo(\"python\", 8080)\n\n\texpectedLivenessProbe := &v1.Probe{\n\t\tInitialDelaySeconds: int32(5),\n\t\tPeriodSeconds:       int32(10),\n\t\tHandler: v1.Handler{\n\t\t\tExec: &v1.ExecAction{\n\t\t\t\tCommand: []string{\"curl\", \"-f\", \"http://localhost:8080/healthz\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tif !reflect.DeepEqual(livenessProbe, expectedLivenessProbe) {\n\t\tt.Fatalf(\"Expected livenessProbeInfo to be %v, but found %v\", expectedLivenessProbe, livenessProbe)\n\t}\n}\n\nfunc TestGetRuntimes(t *testing.T) {\n\tlr := SetupLangRuntime(clientset)\n\tlr.ReadConfigMap()\n\n\truntimes := strings.Join(lr.GetRuntimes(), \", \")\n\texpectedRuntimes := \"python2.7\"\n\tif runtimes != expectedRuntimes {\n\t\tt.Errorf(\"Expected %s but got %s\", expectedRuntimes, runtimes)\n\t}\n}\n\nfunc TestGetBuildContainer(t *testing.T) {\n\tlr := SetupLangRuntime(clientset)\n\tlr.ReadConfigMap()\n\n\t// It should throw an error if there is not an image available\n\t_, err := lr.GetBuildContainer(\"notExists\", \"\", []v1.EnvVar{}, v1.VolumeMount{}, v1.ResourceRequirements{})\n\tif err == nil {\n\t\tt.Error(\"Expected to throw an error\")\n\t}\n\n\t// It should return the proper build image for python\n\tvol1 := v1.VolumeMount{Name: \"v1\", MountPath: \"/v1\"}\n\tresources := v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceLimitsCPU: resource.MustParse(\"100m\")}}\n\tc, err := lr.GetBuildContainer(\"python2.7\", \"abc123\", []v1.EnvVar{}, vol1, resources)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\texpectedContainer := v1.Container{\n\t\tName:            \"install\",\n\t\tImage:           \"python:2.7\",\n\t\tCommand:         []string{\"sh\", \"-c\"},\n\t\tArgs:            []string{\"echo 'abc123  /v1/requirements.txt' > /tmp/deps.sha256 && sha256sum -c /tmp/deps.sha256 && foo\"},\n\t\tVolumeMounts:    []v1.VolumeMount{vol1},\n\t\tWorkingDir:      \"/v1\",\n\t\tImagePullPolicy: v1.PullIfNotPresent,\n\t\tEnv: []v1.EnvVar{\n\t\t\t{Name: \"KUBELESS_INSTALL_VOLUME\", Value: \"/v1\"},\n\t\t\t{Name: \"KUBELESS_DEPS_FILE\", Value: \"/v1/requirements.txt\"},\n\t\t},\n\t\tResources: v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceLimitsCPU: resource.MustParse(\"100m\")}},\n\t}\n\tif !reflect.DeepEqual(expectedContainer, c) {\n\t\tt.Errorf(\"Unexpected result. Expecting:\\n %+v\\nReceived:\\n %+v\", expectedContainer, c)\n\t}\n}\n\nfunc TestGetBuildContainerWithBundledDeps(t *testing.T) {\n\tlr := SetupLangRuntime(clientset)\n\tlr.ReadConfigMap()\n\n\t// It should return the proper build image for python\n\tvol1 := v1.VolumeMount{Name: \"v1\", MountPath: \"/v1\"}\n\tresources := v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceLimitsCPU: resource.MustParse(\"100m\")}}\n\tc, err := lr.GetBuildContainer(\"python2.7\", \"\", []v1.EnvVar{}, vol1, resources)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\texpectedContainer := v1.Container{\n\t\tName:            \"install\",\n\t\tImage:           \"python:2.7\",\n\t\tCommand:         []string{\"sh\", \"-c\"},\n\t\tArgs:            []string{\"foo\"},\n\t\tVolumeMounts:    []v1.VolumeMount{vol1},\n\t\tWorkingDir:      \"/v1\",\n\t\tImagePullPolicy: v1.PullIfNotPresent,\n\t\tEnv: []v1.EnvVar{\n\t\t\t{Name: \"KUBELESS_INSTALL_VOLUME\", Value: \"/v1\"},\n\t\t\t{Name: \"KUBELESS_DEPS_FILE\", Value: \"/v1/requirements.txt\"},\n\t\t},\n\t\tResources: v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceLimitsCPU: resource.MustParse(\"100m\")}},\n\t}\n\tif !reflect.DeepEqual(expectedContainer, c) {\n\t\tt.Errorf(\"Unexpected result. Expecting:\\n %+v\\nReceived:\\n %+v\", expectedContainer, c)\n\t}\n}\n"
  },
  {
    "path": "pkg/langruntime/langruntimetestutils.go",
    "content": "package langruntime\n\nimport (\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"k8s.io/api/core/v1\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\n// AddFakeConfig initializes configmap for unit tests with fake configuration.\nfunc AddFakeConfig(clientset *fake.Clientset) {\n\n\truntimeImages := `[\n  {\n    \"ID\": \"python\",\n    \"compiled\": false,\n    \"depName\": \"requirements.txt\",\n    \"fileNameSuffix\": \".py\",\n    \"livenessProbeInfo\": {\n      \"exec\": {\n        \"command\": [\"curl\", \"-f\", \"http://localhost:8080/healthz\"]\n      },\n      \"initialDelaySeconds\": 5,\n      \"periodseconds\": 10\n    },\n    \"versions\": [\n      {\n        \"images\": [\n          {\n            \"command\": \"foo\",\n            \"image\": \"python:2.7\",\n            \"phase\": \"installation\",\n            \"secrets\": [{\"name\": \"my-secret\"}]            \n          },\n          {\n            \"image\": \"bar\",\n            \"phase\": \"runtime\",\n            \"env\": {\"PYTHONPATH\": \"/kubeless/lib/python2.7/site-packages:/kubeless\"}\n          }\n        ],\n        \"name\": \"python27\",\n        \"version\": \"2.7\",\n        \"imagePullSecrets\": [{\"ImageSecret\": \"p1\"}, {\"ImageSecret\": \"p2\"}]\n      }\n    ]\n  }\n]`\n\tcm := v1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"kubeless-config\",\n\t\t\tNamespace: \"kubeless\",\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"runtime-images\": runtimeImages,\n\t\t},\n\t}\n\n\t_, err := clientset.CoreV1().ConfigMaps(\"kubeless\").Create(&cm)\n\tif err != nil {\n\t\tlogrus.Fatal(\"Unable to create configmap\")\n\t}\n}\n\n// SetupLangRuntime Sets up Langruntime struct\nfunc SetupLangRuntime(clientset *fake.Clientset) *Langruntimes {\n\tconfig, err := clientset.CoreV1().ConfigMaps(\"kubeless\").Get(\"kubeless-config\", metav1.GetOptions{})\n\tif err != nil {\n\t\tlogrus.Fatal(\"Unable to read the configmap\")\n\t}\n\tvar lr = New(config)\n\treturn lr\n}\n"
  },
  {
    "path": "pkg/registry/registry.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage registry\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"k8s.io/api/core/v1\"\n)\n\n// Credentials represent the required credentials to authenticate against a Docker registry\ntype Credentials struct {\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n\tEmail    string `json:\"email,omitifempty\"`\n\tAuth     string `json:\"auth,omitifempty\"`\n}\n\n// Registry struct represents a Docker Registry\ntype Registry struct {\n\tEndpoint string\n\tVersion  string\n\tCreds    Credentials\n}\n\ntype tagv1 struct {\n\tLayer string `json:\"layer\"`\n\tName  string `json:\"name\"`\n}\n\ntype tagListV2 struct {\n\tName string   `json:\"name\"`\n\tTags []string `json:\"tags\"`\n}\n\ntype dockerCfg struct {\n\tAuths map[string]Credentials `json:\"auths\"`\n}\n\n// New returns a Registry struct parsing its URL and storing the required credentials\nfunc New(config v1.Secret) (*Registry, error) {\n\t// Parse secret\n\tcfg := dockerCfg{}\n\terr := json.Unmarshal(config.Data[\".dockerconfigjson\"], &cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tregs := reflect.ValueOf(cfg.Auths).MapKeys()\n\tif len(regs) > 1 {\n\t\treturn nil, fmt.Errorf(\"Found several registries: %q, unable to decide which one to use\", regs)\n\t}\n\tregistryURL := regs[0].String()\n\tre := regexp.MustCompile(\"(https?://.*)/(v[0-9]+)/?\")\n\tparsedURL := re.FindStringSubmatch(registryURL)\n\tif len(parsedURL) == 0 {\n\t\treturn nil, fmt.Errorf(\"Unable to parse registry URL %s\", registryURL)\n\t}\n\treg := Registry{\n\t\tEndpoint: parsedURL[1],\n\t\tVersion:  parsedURL[2],\n\t\tCreds:    cfg.Auths[registryURL],\n\t}\n\treturn &reg, err\n}\n\n// getTags return the list of tags from an HTTP response to the tag/list API endpoint\nfunc (r *Registry) getTags(body []byte) ([]string, error) {\n\tswitch r.Version {\n\tcase \"v1\":\n\t\tresponse := []tagv1{}\n\t\terr := json.Unmarshal(body, &response)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttags := []string{}\n\t\tfor _, tag := range response {\n\t\t\ttags = append(tags, tag.Name)\n\t\t}\n\t\treturn tags, nil\n\tcase \"v2\":\n\t\tresponse := tagListV2{}\n\t\terr := json.Unmarshal(body, &response)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn response.Tags, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"API version %s not supported\", r.Version)\n\t}\n}\n\n// tagURL return the URL of the endpoint for listing existing tags\nfunc (r *Registry) tagURL(img string) (string, error) {\n\tswitch r.Version {\n\tcase \"v1\":\n\t\treturn fmt.Sprintf(\"%s/%s/repositories/%s/tags\", r.Endpoint, r.Version, img), nil\n\tcase \"v2\":\n\t\treturn fmt.Sprintf(\"%s/%s/%s/tags/list\", r.Endpoint, r.Version, img), nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"API version %s not supported\", r.Version)\n\t}\n}\n\n// findProperty returns the value of a property from a list witht the format 'foo=\"bar\",bar=\"foo\"'\nfunc findProperty(src, property string) (string, error) {\n\tre := regexp.MustCompile(fmt.Sprintf(\"%s=\\\"([^\\\"]*)\\\"\", property))\n\tres := re.FindStringSubmatch(src)\n\tif len(res) != 2 {\n\t\treturn \"\", fmt.Errorf(\"Unable to find the property %s in %s\", property, src)\n\t}\n\treturn res[1], nil\n}\n\ntype authResponse struct {\n\tToken string `json:\"token\"`\n}\n\n// doRequestWithAuth does an HTTP GET agains the given url parsing the authInfo given\nfunc doRequestWithAuth(authInfo, url string, client *http.Client) ([]byte, error) {\n\tbearer, err := findProperty(authInfo, \"Bearer realm\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to extract auth info: %v\", err)\n\t}\n\tservice, err := findProperty(authInfo, \"service\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to extract auth info: %v\", err)\n\t}\n\tscope, err := findProperty(authInfo, \"scope\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to extract auth info: %v\", err)\n\t}\n\tauthResp, err := client.Get(fmt.Sprintf(\"%s?service=%s&scope=%s\", bearer, service, scope))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to obtain auth token: %v\", err)\n\t}\n\tdefer authResp.Body.Close()\n\tauthb, err := ioutil.ReadAll(authResp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tauthr := authResponse{}\n\terr = json.Unmarshal(authb, &authr)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to parse auth token: %v\", err)\n\t}\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Bearer %s\", authr.Token))\n\trespWithAuth, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer respWithAuth.Body.Close()\n\tbody, err := ioutil.ReadAll(respWithAuth.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}\n\nfunc (r *Registry) doRequest(url string) ([]byte, error) {\n\ttr := &http.Transport{\n\t\tMaxIdleConns:       10,\n\t\tIdleConnTimeout:    30 * time.Second,\n\t\tDisableCompression: true,\n\t}\n\tclient := &http.Client{\n\t\tTransport: tr,\n\t}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Handle auth if needed\n\tif resp.StatusCode == 401 {\n\t\t// Get auth info from headers\n\t\tauthInfo := resp.Header.Get(\"Www-Authenticate\")\n\t\tif authInfo == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"Failed to authenticate: unknown authentication format: %v\", body)\n\t\t}\n\t\tbody, err = doRequestWithAuth(authInfo, url, client)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn body, nil\n}\n\n// ImageExists checks if a certain image:tag exists in the registry\nfunc (r *Registry) ImageExists(id, tag string) (bool, error) {\n\turl, err := r.tagURL(id)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tbody, err := r.doRequest(url)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif match, _ := regexp.MatchString(\"Resource not found\", string(body)); match {\n\t\t// There is no image with that ID yet\n\t\treturn false, nil\n\t}\n\ttags, err := r.getTags(body)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tfor _, t := range tags {\n\t\tif t == tag {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}\n"
  },
  {
    "path": "pkg/registry/registry_test.go",
    "content": "package registry\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"k8s.io/api/core/v1\"\n)\n\nfunc TestNew(t *testing.T) {\n\ts := v1.Secret{\n\t\tData: map[string][]byte{\n\t\t\t\".dockerconfigjson\": []byte(\"{\\\"auths\\\":{\\\"https://index.docker.io/v1/\\\":{\\\"username\\\":\\\"test\\\",\\\"password\\\":\\\"pass\\\"}}}\"),\n\t\t},\n\t}\n\tr, err := New(s)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif r.Endpoint != \"https://index.docker.io\" {\n\t\tt.Errorf(\"Unexpected endpoint %s, expecting https://index.docker.io\", r.Endpoint)\n\t}\n\tif r.Version != \"v1\" {\n\t\tt.Errorf(\"Unexpected version %s, expecting v1\", r.Version)\n\t}\n\tif r.Creds.Username != \"test\" {\n\t\tt.Errorf(\"Unexpected username %s, expecting test\", r.Creds.Username)\n\t}\n\tif r.Creds.Password != \"pass\" {\n\t\tt.Errorf(\"Unexpected password %s, expecting pass\", r.Creds.Password)\n\t}\n}\n\nfunc TestTagURLV1(t *testing.T) {\n\tr := Registry{\n\t\tEndpoint: \"https://registry-1.docker.io\",\n\t\tVersion:  \"v1\",\n\t}\n\turl, err := r.tagURL(\"test/image\")\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t}\n\tif url != \"https://registry-1.docker.io/v1/repositories/test/image/tags\" {\n\t\tt.Errorf(\"Unexpected URL %s\", url)\n\t}\n}\n\nfunc TestTagURLV2(t *testing.T) {\n\tr := Registry{\n\t\tEndpoint: \"https://registry-1.docker.io\",\n\t\tVersion:  \"v2\",\n\t}\n\turl, err := r.tagURL(\"test/image\")\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t}\n\tif url != \"https://registry-1.docker.io/v2/test/image/tags/list\" {\n\t\tt.Errorf(\"Unexpected URL %s\", url)\n\t}\n}\n\nfunc TestGetTagsV1(t *testing.T) {\n\tr := Registry{\n\t\tEndpoint: \"https://registry-1.docker.io\",\n\t\tVersion:  \"v1\",\n\t}\n\tbody := []byte(\"[{\\\"later\\\": \\\"\\\", \\\"name\\\": \\\"latest\\\"}]\")\n\ttags, err := r.getTags(body)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t}\n\texpectedTags := []string{\"latest\"}\n\tif !reflect.DeepEqual(tags, expectedTags) {\n\t\tt.Errorf(\"Unexpected tags: %v\", tags)\n\t}\n}\n\nfunc TestGetTagsV2(t *testing.T) {\n\tr := Registry{\n\t\tEndpoint: \"https://registry-1.docker.io\",\n\t\tVersion:  \"v2\",\n\t}\n\tbody := []byte(\"{\\\"name\\\": \\\"test\\\", \\\"tags\\\":[\\\"latest\\\"]}\")\n\ttags, err := r.getTags(body)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t}\n\texpectedTags := []string{\"latest\"}\n\tif !reflect.DeepEqual(tags, expectedTags) {\n\t\tt.Errorf(\"Unexpected tags: %v\", tags)\n\t}\n}\n"
  },
  {
    "path": "pkg/utils/configlocation.go",
    "content": "package utils\n\n// ConfigLocation is a struct to store the location of kubeless configuration specific ConfigMap\ntype ConfigLocation struct {\n\tName      string\n\tNamespace string\n}\n"
  },
  {
    "path": "pkg/utils/exec.go",
    "content": "package utils\n\nimport (\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"sync\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/net/websocket\"\n\t\"k8s.io/api/core/v1\"\n\t\"k8s.io/client-go/kubernetes/scheme\"\n\tcorev1 \"k8s.io/client-go/kubernetes/typed/core/v1\"\n\t\"k8s.io/client-go/rest\"\n)\n\nconst (\n\tstdinChannel  = 0\n\tstdoutChannel = 1\n\tstderrChannel = 2\n\terrChannel    = 3\n)\n\n// Cmd stores information relevant to an individual remote command being run\ntype Cmd struct {\n\tStdin  io.Reader\n\tStdout io.Writer\n\tStderr io.Writer\n}\n\n// RoundTripCallback is suitable to use with `ExecRoundTripper` and will\n// copy data to/from stdio channels.  The returned `Response` is\n// currently always `nil`.\nfunc (c *Cmd) RoundTripCallback(conn *websocket.Conn) (*http.Response, error) {\n\terrChan := make(chan error, 3)\n\twg := sync.WaitGroup{}\n\twg.Add(2)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tif c.Stdin == nil {\n\t\t\treturn\n\t\t}\n\t\tbuf := make([]byte, 1025) // NB: first byte is fixed\n\t\tbuf[0] = stdinChannel\n\t\tfor {\n\t\t\tn, err := c.Stdin.Read(buf[1:])\n\t\t\terr2 := websocket.Message.Send(conn, buf[:n+1])\n\t\t\tif err == nil && err2 != nil {\n\t\t\t\terr = err2\n\t\t\t}\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tconst closeStatusNormal = 1000\n\t\tconn.WriteClose(closeStatusNormal)\n\t}()\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\tvar buf []byte\n\t\t\terr := websocket.Message.Receive(conn, &buf)\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(buf) == 0 {\n\t\t\t\tlogrus.Debug(\"Received empty message, skipping\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlogrus.Debugf(\"Received %dB message for channel %d\", len(buf)-1, buf[0])\n\t\t\tvar w io.Writer\n\t\t\tswitch buf[0] {\n\t\t\tcase stdoutChannel:\n\t\t\t\tw = c.Stdout\n\t\t\tcase stderrChannel:\n\t\t\t\tw = c.Stderr\n\t\t\tcase errChannel:\n\t\t\t\terrChan <- fmt.Errorf(\"Error from remote command: %s\", buf[1:])\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tlogrus.Infof(\"Ignoring message for unknown channel %d\", buf[0])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif w == nil {\n\t\t\t\tlogrus.Infof(\"Ignoring message for nil channel %d\", buf[0])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t_, err = w.Write(buf[1:])\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\twg.Wait()\n\tclose(errChan)\n\terr := <-errChan\n\treturn &http.Response{\n\t\tStatus:     \"OK\",\n\t\tStatusCode: 200,\n\t}, err\n}\n\n// A RoundTripCallback is used to process the websocket from an\n// individual command execution.\ntype RoundTripCallback func(conn *websocket.Conn) (*http.Response, error)\n\n// WebsocketRoundTripper is an http.RoundTripper that invokes a\n// callback on a websocket connection.\ntype WebsocketRoundTripper struct {\n\tTLSConfig *tls.Config\n\tDo        RoundTripCallback\n}\n\n// RoundTrip implements the http.RoundTripper interface.\nfunc (d *WebsocketRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {\n\treferrer := r.Referer()\n\tif referrer == \"\" {\n\t\treferrer = \"http://localhost/\"\n\t}\n\n\twsconf, err := websocket.NewConfig(r.URL.String(), referrer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twsconf.TlsConfig = d.TLSConfig\n\twsconf.Header = r.Header\n\twsconf.Protocol = []string{\"channel.k8s.io\"}\n\n\tconn, err := websocket.DialConfig(wsconf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconn.PayloadType = websocket.BinaryFrame\n\tdefer conn.Close()\n\n\treturn d.Do(conn)\n}\n\n// ExecRoundTripper creates a wrapped WebsocketRoundTripper\nfunc ExecRoundTripper(conf *rest.Config, f RoundTripCallback) (http.RoundTripper, error) {\n\ttlsConfig, err := rest.TLSConfigFor(conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trt := &WebsocketRoundTripper{\n\t\tDo:        f,\n\t\tTLSConfig: tlsConfig,\n\t}\n\n\treturn rest.HTTPWrappersForConfig(conf, rt)\n}\n\n// Exec returns an \"exec\" Request suitable for ExecRoundTripper.\nfunc Exec(client corev1.CoreV1Interface, pod, namespace string, opts v1.PodExecOptions) (*http.Request, error) {\n\tcl := client.RESTClient()\n\treq := cl.Verb(\"ignored\").\n\t\tNamespace(namespace).\n\t\tResource(\"pods\").\n\t\tName(pod).\n\t\tSubResource(\"exec\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec)\n\n\turl := req.URL()\n\n\tswitch url.Scheme {\n\tcase \"http\":\n\t\turl.Scheme = \"ws\"\n\tcase \"https\":\n\t\turl.Scheme = \"wss\"\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unrecognised URL scheme in %v\", url)\n\t}\n\n\t// NB: Only some fields are honoured by our RoundTrip implementation\n\treturn &http.Request{\n\t\tURL: url,\n\t}, nil\n}\n"
  },
  {
    "path": "pkg/utils/exec_test.go",
    "content": "package utils\n\nimport (\n\t\"testing\"\n\n\t\"k8s.io/api/core/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/rest\"\n)\n\nfunc TestExecURL(t *testing.T) {\n\tconf := rest.Config{\n\t\tHost: \"https://example.com/\",\n\t}\n\tclientset := kubernetes.NewForConfigOrDie(&conf)\n\n\topts := v1.PodExecOptions{\n\t\tContainer: \"ctr\",\n\t\tStderr:    true,\n\t\tCommand:   []string{\"a\", \"b\"},\n\t}\n\treq, err := Exec(clientset.Core(), \"mypod\", \"myns\", opts)\n\tif err != nil {\n\t\tt.Fatal(\"Exec error:\", err)\n\t}\n\tt.Logf(\"Got URL %v\", req.URL)\n\tif req.URL.String() != \"wss://example.com/api/v1/namespaces/myns/pods/mypod/exec?command=a&command=b&container=ctr&stderr=true\" {\n\t\tt.Error(\"Unexpected url:\", req.URL)\n\t}\n}\n"
  },
  {
    "path": "pkg/utils/k8sutil.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage utils\n\nimport (\n\t\"crypto/rand\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/sirupsen/logrus\"\n\n\tappsv1 \"k8s.io/api/apps/v1\"\n\t\"k8s.io/api/autoscaling/v2beta1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tclientsetAPIExtensions \"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/rest\"\n\t\"k8s.io/client-go/tools/clientcmd\"\n\n\tk8sErrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/types\"\n\n\tmonitoringv1alpha1 \"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1\"\n\n\t// Auth plugins\n\t_ \"k8s.io/client-go/plugin/pkg/client/auth\"\n\n\t\"github.com/imdario/mergo\"\n\t\"github.com/kubeless/kubeless/pkg/client/clientset/versioned\"\n)\n\nconst (\n\tdefaultTimeout = \"180\"\n)\n\n// GetClient returns a k8s clientset to the request from inside of cluster\nfunc GetClient() kubernetes.Interface {\n\tconfig, err := GetInClusterConfig()\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Can not get kubernetes config: %v\", err)\n\t}\n\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Can not create kubernetes client: %v\", err)\n\t}\n\n\treturn clientset\n}\n\n// BuildOutOfClusterConfig returns k8s config\nfunc BuildOutOfClusterConfig() (*rest.Config, error) {\n\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()\n\tkubeconfigEnv := os.Getenv(\"KUBECONFIG\")\n\tif kubeconfigEnv == \"\" {\n\t\thome := os.Getenv(\"HOMEDRIVE\") + os.Getenv(\"HOMEPATH\")\n\t\tif home == \"\" {\n\t\t\tfor _, h := range []string{\"HOME\", \"USERPROFILE\"} {\n\t\t\t\tif home = os.Getenv(h); home != \"\" {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tkubeconfigPath := filepath.Join(home, \".kube\", \"config\")\n\t\tloadingRules.ExplicitPath = kubeconfigPath\n\t}\n\tconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(\n\t\tloadingRules, &clientcmd.ConfigOverrides{}).ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn config, nil\n}\n\n// GetClientOutOfCluster returns a k8s clientset to the request from outside of cluster\nfunc GetClientOutOfCluster() kubernetes.Interface {\n\tconfig, err := BuildOutOfClusterConfig()\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Can not get kubernetes config: %v\", err)\n\t}\n\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Can not get kubernetes client: %v\", err)\n\t}\n\n\treturn clientset\n}\n\n// GetAPIExtensionsClientOutOfCluster returns a k8s clientset to access APIExtensions from outside of cluster\nfunc GetAPIExtensionsClientOutOfCluster() clientsetAPIExtensions.Interface {\n\tconfig, err := BuildOutOfClusterConfig()\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Can not get kubernetes config: %v\", err)\n\t}\n\tclientset, err := clientsetAPIExtensions.NewForConfig(config)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Can not get kubernetes client: %v\", err)\n\t}\n\treturn clientset\n}\n\n// GetAPIExtensionsClientInCluster returns a k8s clientset to access APIExtensions from inside of cluster\nfunc GetAPIExtensionsClientInCluster() clientsetAPIExtensions.Interface {\n\tconfig, err := GetInClusterConfig()\n\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Can not get kubernetes config: %v\", err)\n\t}\n\tclientset, err := clientsetAPIExtensions.NewForConfig(config)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Can not get kubernetes client: %v\", err)\n\t}\n\treturn clientset\n}\n\n// GetFunctionClientInCluster returns function clientset to the request from inside of cluster\nfunc GetFunctionClientInCluster() (versioned.Interface, error) {\n\tconfig, err := GetInClusterConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tkubelessClient, err := versioned.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn kubelessClient, nil\n}\n\n// GetKubelessClientOutCluster returns kubeless clientset to make kubeless API request from outside of cluster\nfunc GetKubelessClientOutCluster() (versioned.Interface, error) {\n\tconfig, err := BuildOutOfClusterConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkubelessClient, err := versioned.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn kubelessClient, nil\n}\n\n// GetDefaultNamespace returns the namespace set in current cluster context\nfunc GetDefaultNamespace() string {\n\trules := clientcmd.NewDefaultClientConfigLoadingRules()\n\trules.DefaultClientConfig = &clientcmd.DefaultClientConfig\n\toverrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults}\n\n\tif ns, _, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, overrides).Namespace(); err == nil {\n\t\treturn ns\n\t}\n\treturn v1.NamespaceDefault\n}\n\n// GetFunction returns specification of a function\nfunc GetFunction(funcName, ns string) (kubelessApi.Function, error) {\n\tkubelessClient, err := GetKubelessClientOutCluster()\n\tif err != nil {\n\t\treturn kubelessApi.Function{}, err\n\t}\n\n\tf, err := kubelessClient.KubelessV1beta1().Functions(ns).Get(funcName, metav1.GetOptions{})\n\n\tif err != nil {\n\t\tif k8sErrors.IsNotFound(err) {\n\t\t\tlogrus.Fatalf(\"Function %s is not found\", funcName)\n\t\t}\n\t\treturn kubelessApi.Function{}, err\n\t}\n\n\treturn *f, nil\n}\n\n// CreateFunctionCustomResource will create a custom function object\nfunc CreateFunctionCustomResource(kubelessClient versioned.Interface, f *kubelessApi.Function) error {\n\t_, err := kubelessClient.KubelessV1beta1().Functions(f.Namespace).Create(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// UpdateFunctionCustomResource applies changes to the function custom object\nfunc UpdateFunctionCustomResource(kubelessClient versioned.Interface, f *kubelessApi.Function) error {\n\t_, err := kubelessClient.KubelessV1beta1().Functions(f.Namespace).Update(f)\n\treturn err\n}\n\n// PatchFunctionCustomResource applies changes to the function custom object\nfunc PatchFunctionCustomResource(kubelessClient versioned.Interface, f *kubelessApi.Function) error {\n\tdata, err := json.Marshal(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = kubelessClient.KubelessV1beta1().Functions(f.Namespace).Patch(f.Name, types.MergePatchType, data)\n\treturn err\n}\n\n// DeleteFunctionCustomResource will delete custom function object\nfunc DeleteFunctionCustomResource(kubelessClient versioned.Interface, funcName, ns string) error {\n\terr := kubelessClient.KubelessV1beta1().Functions(ns).Delete(funcName, &metav1.DeleteOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// GetFunctionCustomResource will delete custom function object\nfunc GetFunctionCustomResource(kubelessClient versioned.Interface, funcName, ns string) (*kubelessApi.Function, error) {\n\tfunctionObj, err := kubelessClient.KubelessV1beta1().Functions(ns).Get(funcName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn functionObj, nil\n}\n\n// GetPodsByLabel returns list of pods which match the label\n// We use this to returns pods to which the function is deployed or pods running controllers\nfunc GetPodsByLabel(c kubernetes.Interface, ns, k, v string) (*v1.PodList, error) {\n\tpods, err := c.Core().Pods(ns).List(metav1.ListOptions{\n\t\tLabelSelector: k + \"=\" + v,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pods, nil\n}\n\n// GetReadyPod returns the first pod has passed the liveness probe check\nfunc GetReadyPod(pods *v1.PodList) (v1.Pod, error) {\n\tfor _, pod := range pods.Items {\n\t\tisPodRunning := true\n\t\tfor _, containerStatus := range pod.Status.ContainerStatuses {\n\t\t\tif !containerStatus.Ready {\n\t\t\t\tisPodRunning = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif isPodRunning {\n\t\t\treturn pod, nil\n\t\t}\n\t}\n\treturn v1.Pod{}, fmt.Errorf(\"there is no pod ready\")\n}\n\n// GetLocalHostname returns hostname\nfunc GetLocalHostname(config *rest.Config, funcName string) (string, error) {\n\turl, err := url.Parse(config.Host)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thost := url.Hostname()\n\n\treturn fmt.Sprintf(\"%s.%s.nip.io\", funcName, host), nil\n}\n\nfunc doRESTReq(restIface rest.Interface, groupVersion, verb, resource, elem, namespace string, body interface{}, result interface{}) error {\n\tvar req *rest.Request\n\tbodyJSON := []byte{}\n\tvar err error\n\tif body != nil {\n\t\tbodyJSON, err = json.Marshal(body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tswitch verb {\n\tcase \"get\":\n\t\treq = restIface.Get().Name(elem)\n\t\tbreak\n\tcase \"create\":\n\t\treq = restIface.Post().Body(bodyJSON)\n\t\tbreak\n\tcase \"update\":\n\t\treq = restIface.Put().Name(elem).Body(bodyJSON)\n\t\tbreak\n\tdefault:\n\t\treturn fmt.Errorf(\"Verb %s not supported\", verb)\n\t}\n\trawResponse, err := req.AbsPath(\"apis\", groupVersion, \"namespaces\", namespace, resource).DoRaw()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif result != nil {\n\t\terr = json.Unmarshal(rawResponse, result)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// CreateAutoscale creates HPA object for function\nfunc CreateAutoscale(client kubernetes.Interface, hpa v2beta1.HorizontalPodAutoscaler) error {\n\t_, err := client.AutoscalingV2beta1().HorizontalPodAutoscalers(hpa.ObjectMeta.Namespace).Create(&hpa)\n\treturn err\n}\n\n// UpdateAutoscale updates an existing HPA object for a function\nfunc UpdateAutoscale(client kubernetes.Interface, hpa v2beta1.HorizontalPodAutoscaler) error {\n\t_, err := client.AutoscalingV2beta1().HorizontalPodAutoscalers(hpa.ObjectMeta.Namespace).Update(&hpa)\n\treturn err\n}\n\n// DeleteAutoscale deletes an autoscale rule\nfunc DeleteAutoscale(client kubernetes.Interface, name, ns string) error {\n\terr := client.AutoscalingV2beta1().HorizontalPodAutoscalers(ns).Delete(name, &metav1.DeleteOptions{})\n\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// DeleteServiceMonitor cleans the sm if it exists\nfunc DeleteServiceMonitor(smclient monitoringv1alpha1.MonitoringV1alpha1Client, name, ns string) error {\n\terr := smclient.ServiceMonitors(ns).Delete(name, &metav1.DeleteOptions{})\n\tif err != nil && !k8sErrors.IsNotFound(err) {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// InitializeEmptyMapsInDeployment initializes all nil maps in a Deployment object\n// This is done to counteract with side-effects of github.com/imdario/mergo which panics when provided with a nil map in a struct\nfunc initializeEmptyMapsInDeployment(deployment *appsv1.Deployment) {\n\tif deployment.ObjectMeta.Annotations == nil {\n\t\tdeployment.Annotations = make(map[string]string)\n\t}\n\tif deployment.ObjectMeta.Labels == nil {\n\t\tdeployment.ObjectMeta.Labels = make(map[string]string)\n\t}\n\tif deployment.Spec.Selector != nil && deployment.Spec.Selector.MatchLabels == nil {\n\t\tdeployment.ObjectMeta.Labels = make(map[string]string)\n\t}\n\tif deployment.Spec.Template.ObjectMeta.Annotations == nil {\n\t\tdeployment.Spec.Template.ObjectMeta.Annotations = make(map[string]string)\n\t}\n\tif deployment.Spec.Template.ObjectMeta.Labels == nil {\n\t\tdeployment.Spec.Template.ObjectMeta.Labels = make(map[string]string)\n\t}\n\tif deployment.Spec.Template.Spec.NodeSelector == nil {\n\t\tdeployment.Spec.Template.Spec.NodeSelector = make(map[string]string)\n\t}\n}\n\n// MergeDeployments merges two deployment objects\nfunc MergeDeployments(destinationDeployment *appsv1.Deployment, sourceDeployment *appsv1.Deployment) error {\n\t// Initializing nil maps in deployment objects else github.com/imdario/mergo panics\n\tinitializeEmptyMapsInDeployment(destinationDeployment)\n\tinitializeEmptyMapsInDeployment(sourceDeployment)\n\terr := mergo.Merge(destinationDeployment, sourceDeployment)\n\n\t// Merge containers\n\tif err == nil && len(sourceDeployment.Spec.Template.Spec.Containers) > 0 {\n\t\tsrcContainers := sourceDeployment.Spec.Template.Spec.Containers\n\t\tdstContainers := destinationDeployment.Spec.Template.Spec.Containers\n\n\t\t// Merge each container individually\n\t\tfor i, srcContainer := range srcContainers {\n\t\t\tif i >= len(dstContainers) {\n\t\t\t\tdestinationDeployment.Spec.Template.Spec.Containers[i] = srcContainer\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdstContainer := dstContainers[i]\n\n\t\t\t// Use mergo.WithAppendSlice to append extra volumeMount/env/port definitions\n\t\t\terr = mergo.Merge(&dstContainer, srcContainer, mergo.WithAppendSlice)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdestinationDeployment.Spec.Template.Spec.Containers[i] = dstContainer\n\t\t}\n\n\t}\n\treturn err\n}\n\n// FunctionObjAddFinalizer add specified finalizer string to function object\nfunc FunctionObjAddFinalizer(kubelessClient versioned.Interface, funcObj *kubelessApi.Function, finalizerString string) error {\n\tfuncObjClone := funcObj.DeepCopy()\n\tfuncObjClone.ObjectMeta.Finalizers = append(funcObjClone.ObjectMeta.Finalizers, finalizerString)\n\treturn UpdateFunctionCustomResource(kubelessClient, funcObjClone)\n}\n\n// FunctionObjHasFinalizer checks if function object already has the Function controller finalizer\nfunc FunctionObjHasFinalizer(funcObj *kubelessApi.Function, finalizerString string) bool {\n\tcurrentFinalizers := funcObj.ObjectMeta.Finalizers\n\tfor _, f := range currentFinalizers {\n\t\tif f == finalizerString {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// FunctionObjRemoveFinalizer removes the finalizer from the function object\nfunc FunctionObjRemoveFinalizer(kubelessClient versioned.Interface, funcObj *kubelessApi.Function, finalizerString string) error {\n\tfuncObjClone := funcObj.DeepCopy()\n\tnewSlice := make([]string, 0)\n\tfor _, item := range funcObj.ObjectMeta.Finalizers {\n\t\tif item == finalizerString {\n\t\t\tcontinue\n\t\t}\n\t\tnewSlice = append(newSlice, item)\n\t}\n\tif len(newSlice) == 0 {\n\t\tnewSlice = nil\n\t}\n\tfuncObjClone.ObjectMeta.Finalizers = newSlice\n\terr := UpdateFunctionCustomResource(kubelessClient, funcObjClone)\n\treturn err\n}\n\n// GetAnnotationsFromCRD gets annotations from a CustomResourceDefinition\nfunc GetAnnotationsFromCRD(clientset clientsetAPIExtensions.Interface, name string) (map[string]string, error) {\n\tcrd, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn crd.GetAnnotations(), nil\n}\n\n// GetRandString returns a random string of lenght N\nfunc GetRandString(n int) (string, error) {\n\tb := make([]byte, n)\n\tif _, err := rand.Read(b); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.RawURLEncoding.EncodeToString(b), nil\n}\n\n// GetSecretsAsLocalObjectReference returns a list of LocalObjectReference based on secret names\nfunc GetSecretsAsLocalObjectReference(secrets ...string) []v1.LocalObjectReference {\n\tres := []v1.LocalObjectReference{}\n\tfor _, secret := range secrets {\n\t\tif secret != \"\" {\n\t\t\tres = append(res, v1.LocalObjectReference{Name: secret})\n\t\t}\n\t}\n\treturn res\n}\n"
  },
  {
    "path": "pkg/utils/k8sutil_test.go",
    "content": "package utils\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"testing\"\n\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tv2beta1 \"k8s.io/api/autoscaling/v2beta1\"\n\tcorev1 \"k8s.io/api/core/v1\"\n\textensionsv1beta1 \"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1\"\n\tfakeextensionsapi \"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake\"\n\tresource \"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n\t\"k8s.io/client-go/kubernetes/scheme\"\n\t\"k8s.io/client-go/rest\"\n\tktesting \"k8s.io/client-go/testing\"\n)\n\nfunc objBody(object interface{}) io.ReadCloser {\n\toutput, err := json.Marshal(object)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ioutil.NopCloser(bytes.NewReader([]byte(output)))\n}\n\nfunc fakeConfig() *rest.Config {\n\treturn &rest.Config{\n\t\tHost: \"https://example.com:443\",\n\t\tContentConfig: rest.ContentConfig{\n\t\t\tGroupVersion: &schema.GroupVersion{\n\t\t\t\tGroup:   \"\",\n\t\t\t\tVersion: \"v1\",\n\t\t\t},\n\t\t\tNegotiatedSerializer: scheme.Codecs,\n\t\t},\n\t}\n}\n\nfunc TestGetLocalHostname(t *testing.T) {\n\tconfig := fakeConfig()\n\texpectedHostName := \"foobar.example.com.nip.io\"\n\tactualHostName, err := GetLocalHostname(config, \"foobar\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif expectedHostName != actualHostName {\n\t\tt.Errorf(\"Expected %s but got %s\", expectedHostName, actualHostName)\n\t}\n}\n\nfunc TestCreateAutoscaleResource(t *testing.T) {\n\tclientset := fake.NewSimpleClientset()\n\tname := \"foo\"\n\tns := \"myns\"\n\thpaDef := v2beta1.HorizontalPodAutoscaler{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      name,\n\t\t\tNamespace: ns,\n\t\t},\n\t}\n\tif err := CreateAutoscale(clientset, hpaDef); err != nil {\n\t\tt.Fatalf(\"Creating autoscale returned err: %v\", err)\n\t}\n\n\thpa, err := clientset.AutoscalingV2beta1().HorizontalPodAutoscalers(ns).Get(name, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"Creating autoscale returned err: %v\", err)\n\t}\n\tif hpa.ObjectMeta.Name != \"foo\" {\n\t\tt.Fatalf(\"Creating wrong scale target name\")\n\t}\n}\n\nfunc TestUpdateAutoscaleResource(t *testing.T) {\n\tclientset := fake.NewSimpleClientset()\n\tname := \"foo\"\n\tns := \"myns\"\n\n\t// Create a pre-existing HPA\n\thpaDef := v2beta1.HorizontalPodAutoscaler{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      name,\n\t\t\tNamespace: ns,\n\t\t},\n\t}\n\tif err := CreateAutoscale(clientset, hpaDef); err != nil {\n\t\tt.Fatalf(\"Creating autoscale returned err: %v\", err)\n\t}\n\n\t// Perform an update\n\thpaDef = v2beta1.HorizontalPodAutoscaler{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      name,\n\t\t\tNamespace: ns,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"baz\": \"qux\",\n\t\t\t},\n\t\t},\n\t}\n\tif err := UpdateAutoscale(clientset, hpaDef); err != nil {\n\t\tt.Fatalf(\"Updating autoscale returned err: %v\", err)\n\t}\n\n\thpa, err := clientset.AutoscalingV2beta1().HorizontalPodAutoscalers(ns).Get(name, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"Updating autoscale returned err: %v\", err)\n\t}\n\tif hpa.ObjectMeta.Name != \"foo\" {\n\t\tt.Fatalf(\"Updating wrong scale target name\")\n\t}\n}\n\nfunc TestDeleteAutoscaleResource(t *testing.T) {\n\tmyNsFoo := metav1.ObjectMeta{\n\t\tNamespace: \"myns\",\n\t\tName:      \"foo\",\n\t}\n\n\tas := v2beta1.HorizontalPodAutoscaler{\n\t\tObjectMeta: myNsFoo,\n\t}\n\n\tclientset := fake.NewSimpleClientset(&as)\n\tif err := DeleteAutoscale(clientset, \"foo\", \"myns\"); err != nil {\n\t\tt.Fatalf(\"Deleting autoscale returned err: %v\", err)\n\t}\n\ta := clientset.Actions()\n\tif ns := a[0].GetNamespace(); ns != \"myns\" {\n\t\tt.Errorf(\"deleted autoscale from wrong namespace (%s)\", ns)\n\t}\n\tif name := a[0].(ktesting.DeleteAction).GetName(); name != \"foo\" {\n\t\tt.Errorf(\"deleted autoscale with wrong name (%s)\", name)\n\t}\n}\n\nfunc TestInitializeEmptyMapsInDeployment(t *testing.T) {\n\tdeployment := appsv1.Deployment{}\n\tdeployment.Spec.Selector = &metav1.LabelSelector{}\n\tinitializeEmptyMapsInDeployment(&deployment)\n\tif deployment.ObjectMeta.Annotations == nil {\n\t\tt.Fatal(\"ObjectMeta.Annotations map is nil\")\n\t}\n\tif deployment.ObjectMeta.Labels == nil {\n\t\tt.Fatal(\"ObjectMeta.Labels map is nil\")\n\t}\n\tif deployment.Spec.Selector == nil && deployment.Spec.Selector.MatchLabels == nil {\n\t\tt.Fatal(\"deployment.Spec.Selector.MatchLabels is nil\")\n\t}\n\tif deployment.Spec.Template.ObjectMeta.Labels == nil {\n\t\tt.Fatal(\"deployment.Spec.Template.ObjectMeta.Labels map is nil\")\n\t}\n\tif deployment.Spec.Template.ObjectMeta.Annotations == nil {\n\t\tt.Fatal(\"deployment.Spec.Template.ObjectMeta.Annotations map is nil\")\n\t}\n\tif deployment.Spec.Template.Spec.NodeSelector == nil {\n\t\tt.Fatal(\"deployment.Spec.Template.Spec.NodeSelector map is nil\")\n\t}\n}\n\nfunc TestMergeDeployments(t *testing.T) {\n\tvar dstReplicas int32\n\tdstReplicas = 10\n\tdestinationDeployment := appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"foo1-deploy\": \"bar\",\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &dstReplicas,\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"foo\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/bar\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tResources: corev1.ResourceRequirements{},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar srcReplicas int32\n\tsrcReplicas = 8\n\tsourceDeployment := appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"foo2-deploy\": \"bar\",\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &srcReplicas,\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"baz\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/qux\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tResources: corev1.ResourceRequirements{\n\t\t\t\t\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\tcorev1.ResourceName(corev1.ResourceCPU):    resource.MustParse(\"100m\"),\n\t\t\t\t\t\t\t\t\tcorev1.ResourceName(corev1.ResourceMemory): resource.MustParse(\"100Mi\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar expectedReplicas int32\n\texpectedReplicas = 10\n\texpectedDeployment := appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"foo1-deploy\": \"bar\",\n\t\t\t\t\"foo2-deploy\": \"bar\",\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &expectedReplicas,\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"foo\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/bar\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"baz\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/qux\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tResources: corev1.ResourceRequirements{\n\t\t\t\t\t\t\t\tRequests: corev1.ResourceList{\n\t\t\t\t\t\t\t\t\tcorev1.ResourceName(corev1.ResourceCPU):    resource.MustParse(\"100m\"),\n\t\t\t\t\t\t\t\t\tcorev1.ResourceName(corev1.ResourceMemory): resource.MustParse(\"100Mi\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tMergeDeployments(&destinationDeployment, &sourceDeployment)\n\n\tmergedContainerCount := len(destinationDeployment.Spec.Template.Spec.Containers)\n\tif mergedContainerCount != 1 {\n\t\tt.Fatalf(\"Expecting 1 container but received %v\", mergedContainerCount)\n\t}\n\n\texpectedAnnotations := expectedDeployment.ObjectMeta.Annotations\n\tmergedAnnotations := destinationDeployment.ObjectMeta.Annotations\n\tfor i := range expectedAnnotations {\n\t\tif mergedAnnotations[i] != expectedAnnotations[i] {\n\t\t\tt.Fatalf(\"Expecting annotation %s but received %s\", expectedAnnotations[i], mergedAnnotations[i])\n\t\t}\n\t}\n\n\tmergedReplicas := *destinationDeployment.Spec.Replicas\n\tif mergedReplicas != expectedReplicas {\n\t\tt.Fatalf(\"Expecting 8 replicas but received %v\", *destinationDeployment.Spec.Replicas)\n\t}\n\n\texpectedVolumeMountCount := 2\n\tmergedVolumeMountCount := len(destinationDeployment.Spec.Template.Spec.Containers[0].VolumeMounts)\n\tif mergedVolumeMountCount != expectedVolumeMountCount {\n\t\tt.Fatalf(\"Expecting %v volumeMounts but received %v\", expectedVolumeMountCount, mergedVolumeMountCount)\n\t}\n\n\texpectedCPURequest := expectedDeployment.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceName(corev1.ResourceCPU)]\n\tmergedCPURequest := destinationDeployment.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceName(corev1.ResourceCPU)]\n\tif mergedCPURequest != expectedCPURequest {\n\t\tt.Fatalf(\n\t\t\t\"Expecting %s cpu resource request but received %s\",\n\t\t\texpectedCPURequest.String(),\n\t\t\tmergedCPURequest.String(),\n\t\t)\n\t}\n\n\texpectedMemoryRequest := expectedDeployment.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceName(corev1.ResourceMemory)]\n\tmergedMemoryRequest := destinationDeployment.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceName(corev1.ResourceMemory)]\n\tif mergedMemoryRequest != expectedMemoryRequest {\n\t\tt.Fatalf(\n\t\t\t\"Expecting %s memory resource request but received %s\",\n\t\t\texpectedMemoryRequest.String(),\n\t\t\tmergedMemoryRequest.String(),\n\t\t)\n\t}\n}\n\nfunc TestGetAnnotationsFromCRD(t *testing.T) {\n\tcrdWithoutAnnotationName := \"crdWithoutAnnotation\"\n\tcrdWithAnnotationName := \"crdWithAnnotation\"\n\texpectedAnnotations := map[string]string{\n\t\t\"foo\": \"bar\",\n\t}\n\tcrdWithAnnotation := &extensionsv1beta1.CustomResourceDefinition{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"foo\": \"bar\",\n\t\t\t},\n\t\t\tName: crdWithAnnotationName,\n\t\t},\n\t\tSpec: extensionsv1beta1.CustomResourceDefinitionSpec{\n\t\t\tGroup: \"foo.group.io\",\n\t\t\tNames: extensionsv1beta1.CustomResourceDefinitionNames{\n\t\t\t\tPlural:   \"foos\",\n\t\t\t\tSingular: \"foo\",\n\t\t\t\tKind:     \"fooKind\",\n\t\t\t\tListKind: \"fooList\",\n\t\t\t},\n\t\t},\n\t}\n\tclientset := fakeextensionsapi.NewSimpleClientset()\n\t_, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crdWithAnnotation)\n\tif err != nil {\n\t\tt.Fatalf(\"Error while creating CRD: %v\", err)\n\t}\n\tannotations, err := GetAnnotationsFromCRD(clientset, crdWithAnnotationName)\n\tif err != nil {\n\t\tt.Fatalf(\"Error while fetching CRD: %v\", err)\n\t}\n\tfor i := range expectedAnnotations {\n\t\tif annotations[i] != expectedAnnotations[i] {\n\t\t\tt.Errorf(\"Expecting annotation %s but received %s\", expectedAnnotations[i], annotations[i])\n\t\t}\n\t}\n\n\tcrdWithoutAnnotation := &extensionsv1beta1.CustomResourceDefinition{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tAnnotations: map[string]string{},\n\t\t\tName:        crdWithoutAnnotationName,\n\t\t},\n\t\tSpec: extensionsv1beta1.CustomResourceDefinitionSpec{\n\t\t\tGroup: \"foo.group.io\",\n\t\t\tNames: extensionsv1beta1.CustomResourceDefinitionNames{\n\t\t\t\tPlural:   \"foos\",\n\t\t\t\tSingular: \"foo\",\n\t\t\t\tKind:     \"fooKind\",\n\t\t\t\tListKind: \"fooList\",\n\t\t\t},\n\t\t},\n\t}\n\t_, err = clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crdWithoutAnnotation)\n\tif err != nil {\n\t\tt.Fatalf(\"Error while creating CRD: %v\", err)\n\t}\n\tannotations, err = GetAnnotationsFromCRD(clientset, crdWithoutAnnotationName)\n\tif err != nil {\n\t\tt.Fatalf(\"Error while fetching annotations from CRD: %v\", err)\n\t}\n\tif len(annotations) != 0 {\n\t\tt.Errorf(\"Expecting annotations of length 0 but received length %d\", len(annotations))\n\t}\n\n}\n"
  },
  {
    "path": "pkg/utils/kubelessutil.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage utils\n\nimport (\n\t\"crypto/sha256\"\n\t\"encoding/base64\"\n\t\"encoding/hex\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode/utf8\"\n\n\tmonitoringv1alpha1 \"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1\"\n\t\"github.com/ghodss/yaml\"\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/langruntime\"\n\t\"github.com/sirupsen/logrus\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tbatchv1 \"k8s.io/api/batch/v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tclientsetAPIExtensions \"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset\"\n\tk8sErrors \"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/types\"\n\t\"k8s.io/apimachinery/pkg/util/intstr\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/rest\"\n)\n\n// secretsMountPath is the file system path where volumes populated with secrets are mounted.\nconst secretsMountPath = \"/var/run/secrets/kubeless.io\"\n\n// GetFunctionPort returns the port for a function service\nfunc GetFunctionPort(clientset kubernetes.Interface, namespace, functionName string) (string, error) {\n\tsvc, err := clientset.CoreV1().Services(namespace).Get(functionName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Unable to find the service for function %s\", functionName)\n\t}\n\treturn strconv.Itoa(int(svc.Spec.Ports[0].Port)), nil\n}\n\n// IsJSON returns true if the string is json\nfunc IsJSON(s string) bool {\n\tvar js map[string]interface{}\n\treturn json.Unmarshal([]byte(s), &js) == nil\n\n}\n\nfunc appendToCommand(orig string, command ...string) string {\n\tif len(orig) > 0 {\n\t\treturn fmt.Sprintf(\"%s && %s\", orig, strings.Join(command, \" && \"))\n\t}\n\treturn strings.Join(command, \" && \")\n}\n\nfunc getProvisionContainer(function, checksum, fileName, handler, contentType, runtime, prepareImage string, runtimeVolume, depsVolume v1.VolumeMount, resources v1.ResourceRequirements, lr *langruntime.Langruntimes) (v1.Container, error) {\n\tprepareCommand := \"\"\n\toriginFile := path.Join(depsVolume.MountPath, fileName)\n\n\t// Prepare Function file and dependencies\n\tif strings.Contains(contentType, \"base64\") {\n\t\t// File is encoded in base64\n\t\tdecodedFile := \"/tmp/func.decoded\"\n\t\tprepareCommand = appendToCommand(prepareCommand, fmt.Sprintf(\"base64 -d < %s > %s\", originFile, decodedFile))\n\t\toriginFile = decodedFile\n\t} else if strings.Contains(contentType, \"url\") {\n\t\tfromURLFile := \"/tmp/func.fromurl\"\n\t\tprepareCommand = appendToCommand(prepareCommand, fmt.Sprintf(\"curl '%s' -L --silent --output %s\", function, fromURLFile))\n\t\toriginFile = fromURLFile\n\t} else if strings.Contains(contentType, \"text\") || contentType == \"\" {\n\t\t// Assumming that function is plain text\n\t\t// So we don't need to preprocess it\n\t} else {\n\t\treturn v1.Container{}, fmt.Errorf(\"Unable to prepare function of type %s: Unknown format\", contentType)\n\t}\n\n\t// Validate checksum\n\tif checksum == \"\" {\n\t\t// DEPRECATED: Checksum may be empty\n\t} else {\n\t\tchecksumInfo := strings.Split(checksum, \":\")\n\t\tswitch checksumInfo[0] {\n\t\tcase \"sha256\":\n\t\t\tshaFile := \"/tmp/func.sha256\"\n\t\t\tprepareCommand = appendToCommand(prepareCommand,\n\t\t\t\tfmt.Sprintf(\"echo '%s  %s' > %s\", checksumInfo[1], originFile, shaFile),\n\t\t\t\tfmt.Sprintf(\"sha256sum -c %s\", shaFile),\n\t\t\t)\n\t\t\tbreak\n\t\tdefault:\n\t\t\treturn v1.Container{}, fmt.Errorf(\"Unable to verify checksum %s: Unknown format\", checksum)\n\t\t}\n\t}\n\n\tif strings.Contains(contentType, \"zip\") {\n\t\t// Extract content in case it is a Zip file\n\t\tprepareCommand = appendToCommand(prepareCommand,\n\t\t\tfmt.Sprintf(\"unzip -o %s -d %s\", originFile, runtimeVolume.MountPath),\n\t\t)\n\t} else if strings.Contains(contentType, \"compressedtar\") {\n\t\t// Extract content in case it is a compressed tar file.\n\t\t// The `tar` command auto-detects the compression type.\n\t\tprepareCommand = appendToCommand(prepareCommand,\n\t\t\tfmt.Sprintf(\"tar xf %s -C %s\", originFile, runtimeVolume.MountPath),\n\t\t)\n\t} else {\n\t\t// Copy the target as a single file\n\t\tdestFileName, err := getFileName(handler, contentType, runtime, lr)\n\t\tif err != nil {\n\t\t\treturn v1.Container{}, err\n\t\t}\n\t\tdest := path.Join(runtimeVolume.MountPath, destFileName)\n\t\tprepareCommand = appendToCommand(prepareCommand,\n\t\t\tfmt.Sprintf(\"cp %s %s\", originFile, dest),\n\t\t)\n\t}\n\n\t// Copy deps file to the installation path\n\truntimeInf, err := lr.GetRuntimeInfo(runtime)\n\tif err == nil && runtimeInf.DepName != \"\" && !strings.Contains(contentType, \"deps\") {\n\t\tdepsFile := path.Join(depsVolume.MountPath, runtimeInf.DepName)\n\t\tprepareCommand = appendToCommand(prepareCommand,\n\t\t\tfmt.Sprintf(\"cp %s %s\", depsFile, runtimeVolume.MountPath),\n\t\t)\n\t}\n\n\treturn v1.Container{\n\t\tName:            \"prepare\",\n\t\tImage:           prepareImage,\n\t\tCommand:         []string{\"sh\", \"-c\"},\n\t\tArgs:            []string{prepareCommand},\n\t\tVolumeMounts:    []v1.VolumeMount{runtimeVolume, depsVolume},\n\t\tImagePullPolicy: v1.PullIfNotPresent,\n\t\tResources:       resources,\n\t}, nil\n}\n\nfunc addDefaultLabel(labels map[string]string) map[string]string {\n\tif labels == nil {\n\t\tlabels = make(map[string]string)\n\t}\n\tlabels[\"created-by\"] = \"kubeless\"\n\treturn labels\n}\n\nfunc hasDefaultLabel(labels map[string]string) bool {\n\tif labels == nil || labels[\"created-by\"] != \"kubeless\" {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc splitHandler(handler string) (string, string, error) {\n\tstr := strings.Split(handler, \".\")\n\tif len(str) != 2 {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed: incorrect handler format. It should be module_name.handler_name\")\n\t}\n\n\treturn str[0], str[1], nil\n}\n\n// getFileName returns a file name based on a handler identifier\nfunc getFileName(handler, funcContentType, runtime string, lr *langruntime.Langruntimes) (string, error) {\n\tmodName, _, err := splitHandler(handler)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfilename := modName\n\tif funcContentType == \"text\" || funcContentType == \"\" || funcContentType == \"url\" || funcContentType == \"base64\" {\n\t\t// We can only guess the extension if the function is specified as plain text\n\t\truntimeInf, err := lr.GetRuntimeInfo(runtime)\n\t\tif err == nil {\n\t\t\tfilename = modName + runtimeInf.FileNameSuffix\n\t\t}\n\t}\n\treturn filename, nil\n}\n\n// EnsureFuncConfigMap creates/updates a config map with a function specification\nfunc EnsureFuncConfigMap(client kubernetes.Interface, funcObj *kubelessApi.Function, or []metav1.OwnerReference, lr *langruntime.Langruntimes) error {\n\tconfigMapData := map[string]string{}\n\tvar err error\n\tif funcObj.Spec.Handler != \"\" {\n\t\tfileName, err := getFileName(funcObj.Spec.Handler, funcObj.Spec.FunctionContentType, funcObj.Spec.Runtime, lr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconfigMapData = map[string]string{\n\t\t\t\"handler\": funcObj.Spec.Handler,\n\t\t\tfileName:  funcObj.Spec.Function,\n\t\t}\n\t\truntimeInfo, err := lr.GetRuntimeInfo(funcObj.Spec.Runtime)\n\t\tif err == nil && runtimeInfo.DepName != \"\" {\n\t\t\tconfigMapData[runtimeInfo.DepName] = funcObj.Spec.Deps\n\t\t}\n\t}\n\n\tconfigMap := &v1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:            funcObj.ObjectMeta.Name,\n\t\t\tLabels:          addDefaultLabel(funcObj.ObjectMeta.Labels),\n\t\t\tOwnerReferences: or,\n\t\t},\n\t\tData: configMapData,\n\t}\n\n\t_, err = client.Core().ConfigMaps(funcObj.ObjectMeta.Namespace).Create(configMap)\n\tif err != nil && k8sErrors.IsAlreadyExists(err) {\n\t\t// In case the ConfigMap already exists we should update\n\t\t// just certain fields (to avoid race conditions)\n\t\tvar newConfigMap *v1.ConfigMap\n\t\tnewConfigMap, err = client.Core().ConfigMaps(funcObj.ObjectMeta.Namespace).Get(funcObj.ObjectMeta.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !hasDefaultLabel(newConfigMap.ObjectMeta.Labels) {\n\t\t\treturn fmt.Errorf(\"Found a conflicting configmap object %s/%s. Aborting\", funcObj.ObjectMeta.Namespace, funcObj.ObjectMeta.Name)\n\t\t}\n\t\tnewConfigMap.ObjectMeta.Labels = funcObj.ObjectMeta.Labels\n\t\tnewConfigMap.ObjectMeta.OwnerReferences = or\n\t\tnewConfigMap.Data = configMap.Data\n\t\t_, err = client.Core().ConfigMaps(funcObj.ObjectMeta.Namespace).Update(newConfigMap)\n\t\tif err != nil && k8sErrors.IsAlreadyExists(err) {\n\t\t\t// The configmap may already exist and there is nothing to update\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn err\n}\n\n// this function resolves backward incompatibility in case user uses old client which doesn't include serviceSpec into funcSpec.\n// if serviceSpec is empty, we will use the default serviceSpec whose port is 8080\nfunc serviceSpec(funcObj *kubelessApi.Function) v1.ServiceSpec {\n\tif len(funcObj.Spec.ServiceSpec.Ports) == 0 {\n\t\treturn v1.ServiceSpec{\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\t// Note: Prefix: \"http-\" is added to adapt to Istio so that it can discover the function services\n\t\t\t\t\tName:       \"http-function-port\",\n\t\t\t\t\tProtocol:   v1.ProtocolTCP,\n\t\t\t\t\tPort:       8080,\n\t\t\t\t\tTargetPort: intstr.FromInt(8080),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: funcObj.ObjectMeta.Labels,\n\t\t\tType:     v1.ServiceTypeClusterIP,\n\t\t}\n\t}\n\treturn funcObj.Spec.ServiceSpec\n}\n\n// EnsureFuncService creates/updates a function service\nfunc EnsureFuncService(client kubernetes.Interface, funcObj *kubelessApi.Function, or []metav1.OwnerReference) error {\n\tsvc := &v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:            funcObj.ObjectMeta.Name,\n\t\t\tLabels:          addDefaultLabel(funcObj.ObjectMeta.Labels),\n\t\t\tOwnerReferences: or,\n\t\t},\n\t\tSpec: serviceSpec(funcObj),\n\t}\n\n\t_, err := client.Core().Services(funcObj.ObjectMeta.Namespace).Create(svc)\n\tif err != nil && k8sErrors.IsAlreadyExists(err) {\n\t\t// In case the SVC already exists we should update\n\t\t// just certain fields (to avoid race conditions)\n\t\tvar newSvc *v1.Service\n\t\tnewSvc, err = client.Core().Services(funcObj.ObjectMeta.Namespace).Get(funcObj.ObjectMeta.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !hasDefaultLabel(newSvc.ObjectMeta.Labels) {\n\t\t\treturn fmt.Errorf(\"Found a conflicting service object %s/%s. Aborting\", funcObj.ObjectMeta.Namespace, funcObj.ObjectMeta.Name)\n\t\t}\n\t\tnewSvc.ObjectMeta.Labels = funcObj.ObjectMeta.Labels\n\t\tnewSvc.ObjectMeta.OwnerReferences = or\n\t\tnewSvc.Spec.Ports = svc.Spec.Ports\n\t\t_, err = client.Core().Services(funcObj.ObjectMeta.Namespace).Update(newSvc)\n\t\tif err != nil && k8sErrors.IsAlreadyExists(err) {\n\t\t\t// The service may already exist and there is nothing to update\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn err\n}\n\nfunc getRuntimeVolumeMount(name string) v1.VolumeMount {\n\treturn v1.VolumeMount{\n\t\tName:      name,\n\t\tMountPath: \"/kubeless\",\n\t}\n}\n\nfunc getChecksum(content string) (string, error) {\n\th := sha256.New()\n\t_, err := h.Write([]byte(content))\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\treturn hex.EncodeToString(h.Sum(nil)), nil\n}\n\n// populatePodSpec populates a basic Pod Spec that uses init containers to populate\n// the runtime container with the function content and its dependencies.\n// The caller should define the runtime container(s).\n// It accepts a prepopulated podSpec with default information and volume that the\n// runtime container should mount\nfunc populatePodSpec(funcObj *kubelessApi.Function, lr *langruntime.Langruntimes, podSpec *v1.PodSpec, runtimeVolumeMount v1.VolumeMount, provisionImage string, imagePullSecrets []v1.LocalObjectReference) error {\n\tdepsVolumeName := funcObj.ObjectMeta.Name + \"-deps\"\n\tresult := podSpec\n\tif len(imagePullSecrets) > 0 {\n\t\tresult.ImagePullSecrets = imagePullSecrets\n\t}\n\tresult.Volumes = append(podSpec.Volumes,\n\t\tv1.Volume{\n\t\t\tName: runtimeVolumeMount.Name,\n\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\tEmptyDir: &v1.EmptyDirVolumeSource{},\n\t\t\t},\n\t\t},\n\t\tv1.Volume{\n\t\t\tName: depsVolumeName,\n\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\tConfigMap: &v1.ConfigMapVolumeSource{\n\t\t\t\t\tLocalObjectReference: v1.LocalObjectReference{\n\t\t\t\t\t\tName: funcObj.ObjectMeta.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\t// prepare init-containers if some function is specified\n\n\tresources := v1.ResourceRequirements{}\n\tif len(funcObj.Spec.Deployment.Spec.Template.Spec.InitContainers) > 0 {\n\t\tresources = funcObj.Spec.Deployment.Spec.Template.Spec.InitContainers[0].Resources\n\t}\n\n\tif funcObj.Spec.Function != \"\" {\n\t\tfileName, err := getFileName(funcObj.Spec.Handler, funcObj.Spec.FunctionContentType, funcObj.Spec.Runtime, lr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsrcVolumeMount := v1.VolumeMount{\n\t\t\tName:      depsVolumeName,\n\t\t\tMountPath: \"/src\",\n\t\t}\n\t\tprovisionContainer, err := getProvisionContainer(\n\t\t\tfuncObj.Spec.Function,\n\t\t\tfuncObj.Spec.Checksum,\n\t\t\tfileName,\n\t\t\tfuncObj.Spec.Handler,\n\t\t\tfuncObj.Spec.FunctionContentType,\n\t\t\tfuncObj.Spec.Runtime,\n\t\t\tprovisionImage,\n\t\t\truntimeVolumeMount,\n\t\t\tsrcVolumeMount,\n\t\t\tresources,\n\t\t\tlr,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresult.InitContainers = []v1.Container{provisionContainer}\n\t}\n\n\t// add the image secrets if present to pull images from private docker registry\n\tif funcObj.Spec.Runtime != \"\" {\n\t\timageSecrets, err := lr.GetImageSecrets(funcObj.Spec.Runtime)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to fetch ImagePullSecrets, %v\", err)\n\t\t}\n\t\tresult.ImagePullSecrets = append(result.ImagePullSecrets, imageSecrets...)\n\t}\n\n\t// ensure that the runtime is supported for installing dependencies\n\t_, err := lr.GetRuntimeInfo(funcObj.Spec.Runtime)\n\tenvVars := []v1.EnvVar{}\n\tif len(result.Containers) > 0 {\n\t\tenvVars = result.Containers[0].Env\n\t}\n\n\thasDeps := funcObj.Spec.Deps != \"\" || strings.Contains(funcObj.Spec.FunctionContentType, \"deps\")\n\tif hasDeps && err != nil {\n\t\treturn fmt.Errorf(\"Unable to install dependencies for the runtime %s\", funcObj.Spec.Runtime)\n\t} else if hasDeps {\n\t\tdepsChecksum := \"\"\n\t\tif funcObj.Spec.Deps != \"\" {\n\t\t\tdepsChecksum, err = getChecksum(funcObj.Spec.Deps)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Unable to obtain dependencies checksum: %v\", err)\n\t\t\t}\n\t\t}\n\n\t\tdepsInstallContainer, err := lr.GetBuildContainer(funcObj.Spec.Runtime, depsChecksum, envVars, runtimeVolumeMount, resources)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif depsInstallContainer.Name != \"\" {\n\t\t\tresult.InitContainers = append(\n\t\t\t\tresult.InitContainers,\n\t\t\t\tdepsInstallContainer,\n\t\t\t)\n\t\t}\n\t}\n\n\t// add compilation init container if needed\n\t_, funcName, _ := splitHandler(funcObj.Spec.Handler)\n\tcompContainer, err := lr.GetCompilationContainer(funcObj.Spec.Runtime, funcName, envVars, runtimeVolumeMount, resources)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif compContainer != nil {\n\t\tresult.InitContainers = append(\n\t\t\tresult.InitContainers,\n\t\t\t*compContainer,\n\t\t)\n\t}\n\n\t// mount volumes with init container secrets specified in runtime configuration\n\tlr.ReadConfigMap()\n\tfor i := 0; i < len(result.InitContainers); i++ {\n\t\tsecrets, err := lr.GetInitContainerSecrets(funcObj.Spec.Runtime, result.InitContainers[i].Name)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to fetch init container secrets for runtime %s at phase %s: %v\", funcObj.Spec.Runtime, result.InitContainers[i].Name, err)\n\t\t}\n\t\tfor _, secret := range secrets {\n\t\t\t// add volume if not available in the pod spec already\n\t\t\tvar found bool\n\t\t\tfor _, vol := range result.Volumes {\n\t\t\t\tif vol.Name == secret.Name && (vol.Secret == nil || vol.Secret.SecretName != secret.Name) {\n\t\t\t\t\treturn fmt.Errorf(\"Unable to add volume for secret %s, volume already defined %#v\", secret.Name, vol)\n\t\t\t\t}\n\t\t\t\tif vol.Name == secret.Name && vol.Secret != nil && vol.Secret.SecretName == secret.Name {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tresult.Volumes = append(result.Volumes, v1.Volume{\n\t\t\t\t\tName: secret.Name,\n\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\tSecret: &v1.SecretVolumeSource{SecretName: secret.Name},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t}\n\n\t\t\t// add volume mount to the init container\n\t\t\tresult.InitContainers[i].VolumeMounts = append(result.InitContainers[i].VolumeMounts, v1.VolumeMount{\n\t\t\t\tName:      secret.Name,\n\t\t\t\tReadOnly:  true,\n\t\t\t\tMountPath: filepath.Join(secretsMountPath, secret.Name),\n\t\t\t})\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// EnsureFuncImage creates a Job to build a function image\nfunc EnsureFuncImage(client kubernetes.Interface, funcObj *kubelessApi.Function, lr *langruntime.Langruntimes, or []metav1.OwnerReference, imageName, tag, builderImage, registryHost, dockerSecretName, provisionImage string, registryTLSEnabled bool, imagePullSecrets []v1.LocalObjectReference) error {\n\tif len(tag) < 64 {\n\t\treturn errors.New(\"Expecting sha256 as image tag\")\n\t}\n\tjobName := fmt.Sprintf(\"build-%s-%s\", funcObj.ObjectMeta.Name, tag[0:10])\n\t_, err := client.BatchV1().Jobs(funcObj.ObjectMeta.Namespace).Get(jobName, metav1.GetOptions{})\n\tif err == nil {\n\t\t// The job already exists\n\t\tlogrus.Infof(\"Found a previous job for building %s:%s\", imageName, tag)\n\t\treturn nil\n\t}\n\tpodSpec := v1.PodSpec{\n\t\tRestartPolicy: v1.RestartPolicyOnFailure,\n\t}\n\truntimeVolumeMount := getRuntimeVolumeMount(funcObj.ObjectMeta.Name)\n\terr = populatePodSpec(funcObj, lr, &podSpec, runtimeVolumeMount, provisionImage, imagePullSecrets)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Add a final initContainer to create the function bundle.tar\n\tprepareContainer := v1.Container{}\n\tfor _, c := range podSpec.InitContainers {\n\t\tif c.Name == \"prepare\" {\n\t\t\tprepareContainer = c\n\t\t}\n\t}\n\tpodSpec.InitContainers = append(podSpec.InitContainers, v1.Container{\n\t\tName:         \"bundle\",\n\t\tCommand:      []string{\"sh\", \"-c\"},\n\t\tArgs:         []string{fmt.Sprintf(\"tar cvf %s/bundle.tar %s/*\", runtimeVolumeMount.MountPath, runtimeVolumeMount.MountPath)},\n\t\tVolumeMounts: prepareContainer.VolumeMounts,\n\t\tImage:        provisionImage,\n\t})\n\n\tbuildJob := batchv1.Job{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:            jobName,\n\t\t\tNamespace:       funcObj.ObjectMeta.Namespace,\n\t\t\tOwnerReferences: or,\n\t\t\tLabels: addDefaultLabel(map[string]string{\n\t\t\t\t\"function\": funcObj.ObjectMeta.Name,\n\t\t\t}),\n\t\t},\n\t\tSpec: batchv1.JobSpec{\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tSpec: podSpec,\n\t\t\t},\n\t\t},\n\t}\n\n\tbaseImage, err := lr.GetFunctionImage(funcObj.Spec.Runtime)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Registry volume\n\tdockerCredsVol := dockerSecretName\n\tdockerCredsVolMountPath := \"/docker\"\n\tregistryCredsVolume := v1.Volume{\n\t\tName: dockerCredsVol,\n\t\tVolumeSource: v1.VolumeSource{\n\t\t\tSecret: &v1.SecretVolumeSource{\n\t\t\t\tSecretName: dockerSecretName,\n\t\t\t},\n\t\t},\n\t}\n\tbuildJob.Spec.Template.Spec.Volumes = append(buildJob.Spec.Template.Spec.Volumes, registryCredsVolume)\n\n\targs := []string{\n\t\t\"/imbuilder\",\n\t\t\"add-layer\",\n\t}\n\tif !registryTLSEnabled {\n\t\targs = append(args, \"--insecure\")\n\t}\n\targs = append(args,\n\t\t\"--src\", fmt.Sprintf(\"docker://%s\", baseImage),\n\t\t\"--dst\", fmt.Sprintf(\"docker://%s/%s:%s\", registryHost, imageName, tag),\n\t\tfmt.Sprintf(\"%s/bundle.tar\", podSpec.InitContainers[0].VolumeMounts[0].MountPath),\n\t)\n\t// Add main container\n\tbuildJob.Spec.Template.Spec.Containers = []v1.Container{\n\t\t{\n\t\t\tName:  \"build\",\n\t\t\tImage: builderImage,\n\t\t\tVolumeMounts: append(prepareContainer.VolumeMounts,\n\t\t\t\tv1.VolumeMount{\n\t\t\t\t\tName:      dockerCredsVol,\n\t\t\t\t\tMountPath: dockerCredsVolMountPath,\n\t\t\t\t},\n\t\t\t),\n\t\t\tEnv: []v1.EnvVar{\n\t\t\t\t{\n\t\t\t\t\tName:  \"DOCKER_CONFIG_FOLDER\",\n\t\t\t\t\tValue: dockerCredsVolMountPath,\n\t\t\t\t},\n\t\t\t},\n\t\t\tArgs: args,\n\t\t},\n\t}\n\n\t// Create the job if doesn't exists yet\n\t_, err = client.BatchV1().Jobs(funcObj.ObjectMeta.Namespace).Create(&buildJob)\n\tif err == nil {\n\t\tlogrus.Infof(\"Started function build job %s\", jobName)\n\t}\n\treturn err\n}\n\nfunc svcTargetPort(funcObj *kubelessApi.Function) int32 {\n\tif len(funcObj.Spec.ServiceSpec.Ports) == 0 {\n\t\treturn int32(8080)\n\t}\n\treturn int32(funcObj.Spec.ServiceSpec.Ports[0].TargetPort.IntValue())\n}\n\nfunc mergeMap(dst, src map[string]string) map[string]string {\n\tif len(dst) == 0 {\n\t\tdst = make(map[string]string)\n\t}\n\tfor k, v := range src {\n\t\tdst[k] = v\n\t}\n\treturn dst\n}\n\n// EnsureFuncDeployment creates/updates a function deployment\nfunc EnsureFuncDeployment(client kubernetes.Interface, funcObj *kubelessApi.Function, or []metav1.OwnerReference, lr *langruntime.Langruntimes, prebuiltRuntimeImage, provisionImage string, imagePullSecrets []v1.LocalObjectReference) error {\n\n\tvar err error\n\n\tpodAnnotations := map[string]string{\n\t\t// Attempt to attract the attention of prometheus.\n\t\t// For runtimes that don't support /metrics,\n\t\t// prometheus will get a 404 and mostly silently\n\t\t// ignore the pod (still displayed in the list of\n\t\t// \"targets\")\n\t\t\"prometheus.io/scrape\": \"true\",\n\t\t\"prometheus.io/path\":   \"/metrics\",\n\t\t\"prometheus.io/port\":   strconv.Itoa(int(svcTargetPort(funcObj))),\n\t}\n\tmaxUnavailable := intstr.FromInt(0)\n\n\t// add deployment and copy all func's Spec.Deployment to the deployment\n\tdpm := funcObj.Spec.Deployment.DeepCopy()\n\tdpm.OwnerReferences = or\n\tdpm.ObjectMeta.Name = funcObj.ObjectMeta.Name\n\tdpm.Spec.Selector = &metav1.LabelSelector{\n\t\tMatchLabels: map[string]string{\"created-by\": funcObj.ObjectMeta.Labels[\"created-by\"], \"function\": funcObj.ObjectMeta.Labels[\"function\"]},\n\t}\n\n\tdpm.Spec.Strategy = appsv1.DeploymentStrategy{\n\t\tRollingUpdate: &appsv1.RollingUpdateDeployment{\n\t\t\tMaxUnavailable: &maxUnavailable,\n\t\t},\n\t}\n\n\t// append data to dpm deployment\n\tdpm.Labels = addDefaultLabel(mergeMap(dpm.Labels, funcObj.Labels))\n\tdpm.Spec.Template.Labels = mergeMap(dpm.Spec.Template.Labels, funcObj.Labels)\n\tdpm.Annotations = mergeMap(dpm.Annotations, funcObj.Annotations)\n\tdpm.Spec.Template.Annotations = mergeMap(dpm.Spec.Template.Annotations, funcObj.Annotations)\n\tdpm.Spec.Template.Annotations = mergeMap(dpm.Spec.Template.Annotations, podAnnotations)\n\n\tif len(dpm.Spec.Template.Spec.Containers) == 0 {\n\t\tdpm.Spec.Template.Spec.Containers = append(dpm.Spec.Template.Spec.Containers, v1.Container{})\n\t}\n\n\truntimeVolumeMount := getRuntimeVolumeMount(funcObj.ObjectMeta.Name)\n\tif funcObj.Spec.Handler != \"\" && funcObj.Spec.Function != \"\" {\n\t\tmodName, handlerName, err := splitHandler(funcObj.Spec.Handler)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// only resolve the image name and build the function if it has not been built already\n\t\tif dpm.Spec.Template.Spec.Containers[0].Image == \"\" && prebuiltRuntimeImage == \"\" {\n\t\t\terr := populatePodSpec(funcObj, lr, &dpm.Spec.Template.Spec, runtimeVolumeMount, provisionImage, imagePullSecrets)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\timageName, err := lr.GetFunctionImage(funcObj.Spec.Runtime)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdpm.Spec.Template.Spec.Containers[0].Image = imageName\n\n\t\t\tdpm.Spec.Template.Spec.Containers[0].VolumeMounts = append(dpm.Spec.Template.Spec.Containers[0].VolumeMounts, runtimeVolumeMount)\n\n\t\t} else {\n\t\t\tif dpm.Spec.Template.Spec.Containers[0].Image == \"\" {\n\t\t\t\tdpm.Spec.Template.Spec.Containers[0].Image = prebuiltRuntimeImage\n\t\t\t}\n\t\t\tdpm.Spec.Template.Spec.ImagePullSecrets = imagePullSecrets\n\t\t}\n\t\ttimeout := funcObj.Spec.Timeout\n\t\tif timeout == \"\" {\n\t\t\t// Set default timeout to 180 seconds\n\t\t\ttimeout = defaultTimeout\n\t\t}\n\t\tdpm.Spec.Template.Spec.Containers[0].Env = append(dpm.Spec.Template.Spec.Containers[0].Env,\n\t\t\tv1.EnvVar{\n\t\t\t\tName:  \"FUNC_HANDLER\",\n\t\t\t\tValue: handlerName,\n\t\t\t},\n\t\t\tv1.EnvVar{\n\t\t\t\tName:  \"MOD_NAME\",\n\t\t\t\tValue: modName,\n\t\t\t},\n\t\t\tv1.EnvVar{\n\t\t\t\tName:  \"FUNC_TIMEOUT\",\n\t\t\t\tValue: timeout,\n\t\t\t},\n\t\t\tv1.EnvVar{\n\t\t\t\tName:  \"FUNC_RUNTIME\",\n\t\t\t\tValue: funcObj.Spec.Runtime,\n\t\t\t},\n\t\t\tv1.EnvVar{\n\t\t\t\tName:  \"FUNC_MEMORY_LIMIT\",\n\t\t\t\tValue: dpm.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().String(),\n\t\t\t},\n\t\t)\n\t} else {\n\t\tlogrus.Warn(\"Expected non-empty handler and non-empty function content\")\n\t}\n\n\tdpm.Spec.Template.Spec.Containers[0].Env = append(dpm.Spec.Template.Spec.Containers[0].Env,\n\t\tv1.EnvVar{\n\t\t\tName:  \"FUNC_PORT\",\n\t\t\tValue: strconv.Itoa(int(svcTargetPort(funcObj))),\n\t\t},\n\t)\n\n\tdpm.Spec.Template.Spec.Containers[0].Name = funcObj.ObjectMeta.Name\n\tdpm.Spec.Template.Spec.Containers[0].Ports = append(dpm.Spec.Template.Spec.Containers[0].Ports, v1.ContainerPort{\n\t\tContainerPort: svcTargetPort(funcObj),\n\t})\n\n\t// update deployment for loading dependencies\n\tlr.UpdateDeployment(dpm, runtimeVolumeMount.MountPath, funcObj.Spec.Runtime)\n\n\tlivenessProbeInfo := lr.GetLivenessProbeInfo(funcObj.Spec.Runtime, int(svcTargetPort(funcObj)))\n\n\tif dpm.Spec.Template.Spec.Containers[0].LivenessProbe == nil {\n\t\tdpm.Spec.Template.Spec.Containers[0].LivenessProbe = livenessProbeInfo\n\t}\n\n\t// Add security context\n\truntimeUser := int64(1000)\n\tif dpm.Spec.Template.Spec.SecurityContext == nil {\n\t\tdpm.Spec.Template.Spec.SecurityContext = &v1.PodSecurityContext{\n\t\t\tRunAsUser: &runtimeUser,\n\t\t\tFSGroup:   &runtimeUser,\n\t\t}\n\t}\n\n\t// Add soft pod anti affinity\n\tif dpm.Spec.Template.Spec.Affinity == nil {\n\t\tdpm.Spec.Template.Spec.Affinity = &v1.Affinity{\n\t\t\tPodAntiAffinity: &v1.PodAntiAffinity{\n\t\t\t\tPreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{\n\t\t\t\t\t{\n\t\t\t\t\t\tWeight: 100,\n\t\t\t\t\t\tPodAffinityTerm: v1.PodAffinityTerm{\n\t\t\t\t\t\t\tLabelSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\t\t\t\"created-by\": \"kubeless\",\n\t\t\t\t\t\t\t\t\t\"function\":   funcObj.ObjectMeta.Name,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTopologyKey: \"kubernetes.io/hostname\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\t_, err = client.AppsV1().Deployments(funcObj.ObjectMeta.Namespace).Create(dpm)\n\tif err != nil && k8sErrors.IsAlreadyExists(err) {\n\t\t// In case the Deployment already exists we should update\n\t\t// just certain fields (to avoid race conditions)\n\t\tvar newDpm *appsv1.Deployment\n\t\tnewDpm, err = client.AppsV1().Deployments(funcObj.ObjectMeta.Namespace).Get(funcObj.ObjectMeta.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !hasDefaultLabel(newDpm.ObjectMeta.Labels) {\n\t\t\treturn fmt.Errorf(\"Found a conflicting deployment object %s/%s. Aborting\", funcObj.ObjectMeta.Namespace, funcObj.ObjectMeta.Name)\n\t\t}\n\t\tnewDpm.ObjectMeta.Labels = funcObj.ObjectMeta.Labels\n\t\tnewDpm.ObjectMeta.Annotations = funcObj.Spec.Deployment.ObjectMeta.Annotations\n\t\tnewDpm.ObjectMeta.OwnerReferences = or\n\t\t// We should maintain previous selector to avoid duplicated ReplicaSets\n\t\tselector := newDpm.Spec.Selector\n\t\tnewDpm.Spec = dpm.Spec\n\t\tnewDpm.Spec.Selector = selector\n\t\tdata, err := json.Marshal(newDpm)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Use `Patch` to do a rolling update\n\t\t_, err = client.AppsV1().Deployments(funcObj.ObjectMeta.Namespace).Patch(newDpm.Name, types.MergePatchType, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn err\n}\n\n// CreateServiceMonitor creates a Service Monitor for the given function\nfunc CreateServiceMonitor(smclient monitoringv1alpha1.MonitoringV1alpha1Client, funcObj *kubelessApi.Function, ns string, or []metav1.OwnerReference) error {\n\t_, err := smclient.ServiceMonitors(ns).Get(funcObj.ObjectMeta.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tif k8sErrors.IsNotFound(err) {\n\t\t\ts := &monitoringv1alpha1.ServiceMonitor{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      funcObj.ObjectMeta.Name,\n\t\t\t\t\tNamespace: ns,\n\t\t\t\t\tLabels: addDefaultLabel(map[string]string{\n\t\t\t\t\t\t\"service-monitor\": \"function\",\n\t\t\t\t\t}),\n\t\t\t\t\tOwnerReferences: or,\n\t\t\t\t},\n\t\t\t\tSpec: monitoringv1alpha1.ServiceMonitorSpec{\n\t\t\t\t\tSelector: metav1.LabelSelector{\n\t\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\t\"function\": funcObj.ObjectMeta.Name,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tEndpoints: []monitoringv1alpha1.Endpoint{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tPort: \"http-function-port\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err = smclient.ServiceMonitors(ns).Create(s)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"service monitor has already existed\")\n}\n\n// GetOwnerReference returns ownerRef for appending to objects's metadata\nfunc GetOwnerReference(kind, apiVersion, name string, uid types.UID) ([]metav1.OwnerReference, error) {\n\tif name == \"\" {\n\t\treturn []metav1.OwnerReference{}, fmt.Errorf(\"name can't be empty\")\n\t}\n\tif uid == \"\" {\n\t\treturn []metav1.OwnerReference{}, fmt.Errorf(\"uid can't be empty\")\n\t}\n\treturn []metav1.OwnerReference{\n\t\t{\n\t\t\tKind:       kind,\n\t\t\tAPIVersion: apiVersion,\n\t\t\tName:       name,\n\t\t\tUID:        uid,\n\t\t},\n\t}, nil\n}\n\n// GetInClusterConfig returns necessary Config object to authenticate k8s clients if env variable is set\nfunc GetInClusterConfig() (*rest.Config, error) {\n\tconfig, err := rest.InClusterConfig()\n\n\ttokenFile := os.Getenv(\"KUBELESS_TOKEN_FILE_PATH\")\n\tif len(tokenFile) == 0 {\n\t\treturn config, err\n\t}\n\ttokenBytes, err := ioutil.ReadFile(tokenFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to read file containing oauth token: %s\", err)\n\t}\n\tconfig.BearerToken = string(tokenBytes)\n\n\treturn config, nil\n}\n\nfunc getConfigLocation(apiExtensionsClientset clientsetAPIExtensions.Interface) (ConfigLocation, error) {\n\tconfigLocation := ConfigLocation{}\n\tcontrollerNamespace := os.Getenv(\"KUBELESS_NAMESPACE\")\n\tkubelessConfig := os.Getenv(\"KUBELESS_CONFIG\")\n\n\tannotationsCRD, err := GetAnnotationsFromCRD(apiExtensionsClientset, \"functions.kubeless.io\")\n\tif err != nil {\n\t\treturn configLocation, err\n\t}\n\tif len(controllerNamespace) == 0 {\n\t\tif ns, ok := annotationsCRD[\"kubeless.io/namespace\"]; ok {\n\t\t\tcontrollerNamespace = ns\n\t\t} else {\n\t\t\tcontrollerNamespace = \"kubeless\"\n\t\t}\n\t}\n\tconfigLocation.Namespace = controllerNamespace\n\tif len(kubelessConfig) == 0 {\n\t\tif config, ok := annotationsCRD[\"kubeless.io/config\"]; ok {\n\t\t\tkubelessConfig = config\n\t\t} else {\n\t\t\tkubelessConfig = \"kubeless-config\"\n\t\t}\n\t}\n\tconfigLocation.Name = kubelessConfig\n\treturn configLocation, nil\n}\n\n// GetKubelessConfig Returns Kubeless ConfigMap\nfunc GetKubelessConfig(cli kubernetes.Interface, cliAPIExtensions clientsetAPIExtensions.Interface) (*v1.ConfigMap, error) {\n\tconfigLocation, err := getConfigLocation(cliAPIExtensions)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error while fetching config location: %v\", err)\n\t}\n\tcontrollerNamespace := configLocation.Namespace\n\tkubelessConfig := configLocation.Name\n\tconfig, err := cli.CoreV1().ConfigMaps(controllerNamespace).Get(kubelessConfig, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to read the configmap: %s\", err)\n\t}\n\treturn config, nil\n}\n\n// DryRunFmt stringify the given interface in a specific format\nfunc DryRunFmt(format string, trigger interface{}) (string, error) {\n\tswitch format {\n\tcase \"json\":\n\t\tj, err := json.MarshalIndent(trigger, \"\", \"    \")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn string(j[:]), nil\n\tcase \"yaml\":\n\t\ty, err := yaml.Marshal(trigger)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn string(y[:]), nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"Output format needs to be yaml or json\")\n\t}\n}\n\n// getCompressionType returns the compression type (if any) of the given file by looking at the file extension\nfunc getCompressionType(filename string) (compressionType string) {\n\tif strings.HasSuffix(filename, \".zip\") {\n\t\tcompressionType = \"+zip\"\n\t}\n\n\textensions := []string{\".tar.gz\", \".taz\", \".tgz\", \".tar.bz2\", \".tb2\", \".tbz\", \".tbz2\", \".tz2\", \".tar.xz\"}\n\tfor _, ext := range extensions {\n\t\tif strings.HasSuffix(filename, ext) {\n\t\t\tcompressionType = \"+compressedtar\"\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\n// GetContentType Gets the content type of a given filename\nfunc GetContentType(filename string) (string, error) {\n\tvar contentType string\n\n\tif strings.Index(filename, \"http://\") == 0 || strings.Index(filename, \"https://\") == 0 {\n\t\tcontentType = \"url\" + getCompressionType(strings.Split(filename, \"?\")[0])\n\t} else {\n\t\tfbytes, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tisText := utf8.ValidString(string(fbytes))\n\t\tif isText {\n\t\t\tcontentType = \"text\"\n\t\t} else {\n\t\t\tcontentType = \"base64\"\n\t\t}\n\t\tcontentType += getCompressionType(filename)\n\t}\n\treturn contentType, nil\n}\n\n// ParseContent Parses the content of a file as string\nfunc ParseContent(file, contentType string) (string, string, error) {\n\tvar checksum, content string\n\n\tif strings.Contains(contentType, \"url\") {\n\n\t\tfunctionURL, err := url.Parse(file)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\tresp, err := http.Get(functionURL.String())\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tfunctionBytes, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\tcontent = string(functionBytes)\n\t\tchecksum, err = getSha256(functionBytes)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\n\t} else {\n\n\t\tfunctionBytes, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\tif contentType == \"text\" {\n\t\t\tcontent = string(functionBytes)\n\t\t} else {\n\t\t\tcontent = base64.StdEncoding.EncodeToString(functionBytes)\n\t\t}\n\t\tchecksum, err = getFileSha256(file)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\treturn content, checksum, nil\n}\n\n// Get the checksum of a file using sha256\nfunc getFileSha256(file string) (string, error) {\n\th := sha256.New()\n\tff, err := os.Open(file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer ff.Close()\n\t_, err = io.Copy(h, ff)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tchecksum := hex.EncodeToString(h.Sum(nil))\n\treturn \"sha256:\" + checksum, err\n}\n\n// Get the checksum using sha256\nfunc getSha256(bytes []byte) (string, error) {\n\th := sha256.New()\n\t_, err := h.Write(bytes)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tchecksum := hex.EncodeToString(h.Sum(nil))\n\treturn \"sha256:\" + checksum, nil\n}\n"
  },
  {
    "path": "pkg/utils/kubelessutil_test.go",
    "content": "package utils\n\nimport (\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\tkubelessApi \"github.com/kubeless/kubeless/pkg/apis/kubeless/v1beta1\"\n\t\"github.com/kubeless/kubeless/pkg/langruntime\"\n\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/util/intstr\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\nfunc getEnvValueFromList(envName string, l []v1.EnvVar) string {\n\tvar res v1.EnvVar\n\tfor _, env := range l {\n\t\tif env.Name == envName {\n\t\t\tres = env\n\t\t\tbreak\n\t\t}\n\t}\n\treturn res.Value\n}\n\nfunc TestEnsureConfigMap(t *testing.T) {\n\tclientset := fake.NewSimpleClientset()\n\tor := []metav1.OwnerReference{\n\t\t{\n\t\t\tKind:       \"Function\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t},\n\t}\n\tns := \"default\"\n\tfuncLabels := map[string]string{\n\t\t\"foo\": \"bar\",\n\t}\n\tf1Name := \"f1\"\n\tf1 := &kubelessApi.Function{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      f1Name,\n\t\t\tNamespace: ns,\n\t\t\tLabels:    funcLabels,\n\t\t},\n\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\tFunction: \"function\",\n\t\t\tDeps:     \"deps\",\n\t\t\tHandler:  \"foo.bar\",\n\t\t\tRuntime:  \"python2.7\",\n\t\t},\n\t}\n\n\tlangruntime.AddFakeConfig(clientset)\n\tlr := langruntime.SetupLangRuntime(clientset)\n\tlr.ReadConfigMap()\n\n\terr := EnsureFuncConfigMap(clientset, f1, or, lr)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tcm, err := clientset.CoreV1().ConfigMaps(ns).Get(f1Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\texpectedCM := v1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:            f1Name,\n\t\t\tNamespace:       ns,\n\t\t\tLabels:          funcLabels,\n\t\t\tOwnerReferences: or,\n\t\t},\n\t\tData: map[string]string{\n\t\t\t\"handler\":          \"foo.bar\",\n\t\t\t\"foo.py\":           \"function\",\n\t\t\t\"requirements.txt\": \"deps\",\n\t\t},\n\t}\n\tif !reflect.DeepEqual(*cm, expectedCM) {\n\t\tt.Errorf(\"Unexpected ConfigMap:\\n %+v\\nExpecting:\\n %+v\", *cm, expectedCM)\n\t}\n}\n\nfunc TestEnsureFuncMapWithoutDeps(t *testing.T) {\n\tclientset := fake.NewSimpleClientset()\n\tor := []metav1.OwnerReference{\n\t\t{\n\t\t\tKind:       \"Function\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t},\n\t}\n\tns := \"default\"\n\tlangruntime.AddFakeConfig(clientset)\n\tlr := langruntime.SetupLangRuntime(clientset)\n\tlr.ReadConfigMap()\n\t// It should skip the dependencies field in case it is not supported\n\tf2Name := \"f2\"\n\tf2 := &kubelessApi.Function{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      f2Name,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\tFunction: \"function\",\n\t\t\tHandler:  \"foo.bar\",\n\t\t\tRuntime:  \"cobol\",\n\t\t},\n\t}\n\n\terr := EnsureFuncConfigMap(clientset, f2, or, lr)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tcm, err := clientset.CoreV1().ConfigMaps(ns).Get(f2Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\texpectedData := map[string]string{\n\t\t\"handler\": \"foo.bar\",\n\t\t\"foo\":     \"function\",\n\t}\n\tif !reflect.DeepEqual(cm.Data, expectedData) {\n\t\tt.Errorf(\"Unexpected ConfigMap:\\n %+v\\nExpecting:\\n %+v\", cm.Data, expectedData)\n\t}\n\n\t// If there is already a config map it should update the previous one\n\tf2 = &kubelessApi.Function{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      f2Name,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\tFunction: \"function2\",\n\t\t\tHandler:  \"foo2.bar2\",\n\t\t\tRuntime:  \"python3.4\",\n\t\t},\n\t}\n\terr = EnsureFuncConfigMap(clientset, f2, or, lr)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tcm, err = clientset.CoreV1().ConfigMaps(ns).Get(f2Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\texpectedData = map[string]string{\n\t\t\"handler\":          \"foo2.bar2\",\n\t\t\"foo2.py\":          \"function2\",\n\t\t\"requirements.txt\": \"\",\n\t}\n\tif !reflect.DeepEqual(cm.Data, expectedData) {\n\t\tt.Errorf(\"Unexpected ConfigMap:\\n %+v\\nExpecting:\\n %+v\", cm.Data, expectedData)\n\t}\n}\n\nfunc TestAvoidConfigMapOverwrite(t *testing.T) {\n\tf1Name := \"f1\"\n\tclientset, or, ns, lr := prepareDeploymentTest(f1Name)\n\tclientset.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      f1Name,\n\t\t\tNamespace: ns,\n\t\t},\n\t})\n\tf1 := getDefaultFunc(f1Name, ns)\n\terr := EnsureFuncConfigMap(clientset, f1, or, lr)\n\tif err == nil && strings.Contains(err.Error(), \"conflicting object\") {\n\t\tt.Errorf(\"It should fail because a conflict\")\n\t}\n}\n\nfunc TestEnsureFileNames(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tcontentType    string\n\t\tfileNameSuffix string\n\t}{\n\t\t{name: \"text\", contentType: \"text\", fileNameSuffix: \".py\"},\n\t\t{name: \"empty\", contentType: \"\", fileNameSuffix: \".py\"},\n\t\t{name: \"base64\", contentType: \"base64\", fileNameSuffix: \".py\"},\n\t\t{name: \"url\", contentType: \"url\", fileNameSuffix: \".py\"},\n\t\t{name: \"text+zip\", contentType: \"text+zip\", fileNameSuffix: \"\"},\n\t\t{name: \"text+compressedtar\", contentType: \"text+compressedtar\", fileNameSuffix: \"\"},\n\t\t{name: \"base64+zip\", contentType: \"base64+zip\", fileNameSuffix: \"\"},\n\t\t{name: \"base64+compressedtar\", contentType: \"base64+compressedtar\", fileNameSuffix: \"\"},\n\t\t{name: \"url+zip\", contentType: \"url+zip\", fileNameSuffix: \"\"},\n\t\t{name: \"url+compressedtar\", contentType: \"url+compressedtar\", fileNameSuffix: \"\"},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tclientset := fake.NewSimpleClientset()\n\t\t\tor := []metav1.OwnerReference{\n\t\t\t\t{\n\t\t\t\t\tKind:       \"Function\",\n\t\t\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t\t\t},\n\t\t\t}\n\t\t\tns := \"default\"\n\t\t\tf1Name := \"f1\"\n\t\t\tf1Runtime := \"python\"\n\t\t\tf1 := &kubelessApi.Function{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      f1Name,\n\t\t\t\t\tNamespace: ns,\n\t\t\t\t},\n\t\t\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\t\t\tFunction:            \"function\",\n\t\t\t\t\tHandler:             \"foo.bar\",\n\t\t\t\t\tFunctionContentType: test.contentType,\n\t\t\t\t\tRuntime:             f1Runtime,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tlangruntime.AddFakeConfig(clientset)\n\t\t\tlr := langruntime.SetupLangRuntime(clientset)\n\t\t\tlr.ReadConfigMap()\n\n\t\t\terr := EnsureFuncConfigMap(clientset, f1, or, lr)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t\t\t}\n\n\t\t\tcm, err := clientset.CoreV1().ConfigMaps(ns).Get(f1Name, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t\t\t}\n\n\t\t\texpectedData := map[string]string{\n\t\t\t\t\"requirements.txt\":          \"\",\n\t\t\t\t\"handler\":                   \"foo.bar\",\n\t\t\t\t\"foo\" + test.fileNameSuffix: \"function\",\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(cm.Data, expectedData) {\n\t\t\t\tt.Errorf(\"Unexpected ConfigMap:\\n %+v\\nExpecting:\\n %+v\", cm.Data, expectedData)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestEnsureService(t *testing.T) {\n\tfakeSvc := v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: \"myns\",\n\t\t\tName:      \"foo\",\n\t\t},\n\t}\n\tclientset := fake.NewSimpleClientset(&fakeSvc)\n\tor := []metav1.OwnerReference{\n\t\t{\n\t\t\tKind:       \"Function\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t},\n\t}\n\tns := \"default\"\n\tfuncLabels := map[string]string{\n\t\t\"foo\": \"bar\",\n\t}\n\tf1Name := \"f1\"\n\tf1 := &kubelessApi.Function{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      f1Name,\n\t\t\tNamespace: ns,\n\t\t\tLabels:    funcLabels,\n\t\t},\n\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\tFunction: \"function\",\n\t\t\tDeps:     \"deps\",\n\t\t\tHandler:  \"foo.bar\",\n\t\t\tRuntime:  \"python2.7\",\n\t\t},\n\t}\n\terr := EnsureFuncService(clientset, f1, or)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tsvc, err := clientset.CoreV1().Services(ns).Get(f1Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\texpectedSVC := v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:            f1Name,\n\t\t\tNamespace:       ns,\n\t\t\tLabels:          funcLabels,\n\t\t\tOwnerReferences: or,\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName:       \"http-function-port\",\n\t\t\t\t\tPort:       8080,\n\t\t\t\t\tTargetPort: intstr.FromInt(8080),\n\t\t\t\t\tNodePort:   0,\n\t\t\t\t\tProtocol:   v1.ProtocolTCP,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: funcLabels,\n\t\t\tType:     v1.ServiceTypeClusterIP,\n\t\t},\n\t}\n\tif !reflect.DeepEqual(*svc, expectedSVC) {\n\t\tt.Errorf(\"Unexpected service:\\n %+v\\nExpecting:\\n %+v\", *svc, expectedSVC)\n\t}\n}\n\nfunc TestUpdateFuncSvc(t *testing.T) {\n\tfakeSvc := v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: \"myns\",\n\t\t\tName:      \"foo\",\n\t\t},\n\t}\n\tclientset := fake.NewSimpleClientset(&fakeSvc)\n\tor := []metav1.OwnerReference{\n\t\t{\n\t\t\tKind:       \"Function\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t},\n\t}\n\tns := \"default\"\n\t// If there is already a service it should update the previous one\n\tfuncLabels := map[string]string{\n\t\t\"foo\": \"bar\",\n\t}\n\tf1Name := \"f1\"\n\tf1 := &kubelessApi.Function{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      f1Name,\n\t\t\tNamespace: ns,\n\t\t\tLabels:    funcLabels,\n\t\t},\n\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\tFunction: \"function\",\n\t\t\tDeps:     \"deps\",\n\t\t\tHandler:  \"foo.bar\",\n\t\t\tRuntime:  \"python2.7\",\n\t\t},\n\t}\n\terr := EnsureFuncService(clientset, f1, or)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tnewLabels := map[string]string{\n\t\t\"foobar\": \"barfoo\",\n\t}\n\tf1.ObjectMeta.Labels = newLabels\n\terr = EnsureFuncService(clientset, f1, or)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tsvc, err := clientset.CoreV1().Services(ns).Get(f1Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif !reflect.DeepEqual(svc.ObjectMeta.Labels, newLabels) {\n\t\tt.Error(\"Unable to update the service\")\n\t}\n\tif reflect.DeepEqual(svc.Spec.Selector, newLabels) {\n\t\tt.Error(\"It should not update the selector\")\n\t}\n}\n\nfunc TestAvoidServiceOverwrite(t *testing.T) {\n\tf1Name := \"f1\"\n\tns := \"default\"\n\tor := []metav1.OwnerReference{\n\t\t{\n\t\t\tKind:       \"Function\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t},\n\t}\n\tclientset := fake.NewSimpleClientset()\n\tclientset.CoreV1().Services(ns).Create(&v1.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      f1Name,\n\t\t\tNamespace: ns,\n\t\t},\n\t})\n\tf1 := getDefaultFunc(f1Name, ns)\n\terr := EnsureFuncService(clientset, f1, or)\n\tif err == nil && strings.Contains(err.Error(), \"conflicting object\") {\n\t\tt.Errorf(\"It should fail because a conflict\")\n\t}\n}\n\nfunc TestEnsureImage(t *testing.T) {\n\tclientset := fake.NewSimpleClientset()\n\tlangruntime.AddFakeConfig(clientset)\n\tlr := langruntime.SetupLangRuntime(clientset)\n\tlr.ReadConfigMap()\n\tns := \"default\"\n\tf1Name := \"f1\"\n\tor := []metav1.OwnerReference{\n\t\t{\n\t\t\tKind:       \"Function\",\n\t\t\tAPIVersion: \"kubeless.io/v1beta1\",\n\t\t},\n\t}\n\tf1 := &kubelessApi.Function{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      f1Name,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\tFunction: \"function\",\n\t\t\tDeps:     \"deps\",\n\t\t\tHandler:  \"foo.bar\",\n\t\t\tRuntime:  \"python2.7\",\n\t\t},\n\t}\n\t// Testing happy path\n\tpullSecrets := []v1.LocalObjectReference{\n\t\t{Name: \"creds\"},\n\t}\n\terr := EnsureFuncImage(clientset, f1, lr, or, \"user/image\", \"4840d87600137157493ba43a24f0b4bb6cf524ebbf095ce96c79f85bf5a3ff5a\", \"kubeless/builder\", \"registry.docker.io\", \"registry-creds\", \"unzip\", true, pullSecrets)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tjobs, err := clientset.BatchV1().Jobs(ns).List(metav1.ListOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif len(jobs.Items) != 1 {\n\t\tt.Errorf(\"It should have created the build job\")\n\t}\n\tbuildContainer := jobs.Items[0].Spec.Template.Spec.Containers[0]\n\tif buildContainer.Image != \"kubeless/builder\" {\n\t\tt.Errorf(\"Image %s of build job is not recognised\", jobs.Items[0].Spec.Template.Spec.Containers[0].Image)\n\t}\n\tdockerConfigFolder := \"\"\n\tfor _, envvar := range buildContainer.Env {\n\t\tif envvar.Name == \"DOCKER_CONFIG_FOLDER\" {\n\t\t\tdockerConfigFolder = envvar.Value\n\t\t}\n\t}\n\tif dockerConfigFolder == \"\" {\n\t\tt.Error(\"Builder image relies on the env var DOCKER_CONFIG_FOLDER to authenticate\")\n\t}\n\tinitContainer := jobs.Items[0].Spec.Template.Spec.InitContainers[0]\n\tif initContainer.Image != \"unzip\" {\n\t\tt.Errorf(\"Unexpected init image %s\", initContainer.Image)\n\t}\n\tif reflect.DeepEqual(jobs.Items[0].Spec.Template.Spec.ImagePullSecrets, pullSecrets) {\n\t\tt.Error(\"Missing ImagePullSecrets\")\n\t}\n\n\t// ensure my-secret is mounted as /var/run/secrets/kubeless.io/my-secret to install container\n\tvar container v1.Container\n\tfor _, c := range jobs.Items[0].Spec.Template.Spec.InitContainers {\n\t\tif c.Name == \"install\" {\n\t\t\tcontainer = c\n\t\t}\n\t}\n\tif len(container.Name) == 0 {\n\t\tt.Fatalf(\"Cannot find init container %q\", \"install\")\n\t}\n\tvar found bool\n\tfor _, v := range container.VolumeMounts {\n\t\tif v.MountPath == \"/var/run/secrets/kubeless.io/my-secret\" {\n\t\t\tfound = true\n\t\t}\n\t}\n\tif !found {\n\t\tt.Fatalf(\"Cannot find volume mount /var/run/secrets/kubeless.io/my-secret\")\n\t}\n}\n\nfunc getDefaultFunc(name, ns string) *kubelessApi.Function {\n\tfPort := int32(8080)\n\tf := kubelessApi.Function{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      name,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tSpec: kubelessApi.FunctionSpec{\n\t\t\tFunction: \"function\",\n\t\t\tDeps:     \"deps\",\n\t\t\tHandler:  \"foo.bar\",\n\t\t\tRuntime:  \"python2.7\",\n\t\t\tServiceSpec: v1.ServiceSpec{\n\t\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:       \"http-function-port\",\n\t\t\t\t\t\tPort:       fPort,\n\t\t\t\t\t\tTargetPort: intstr.FromInt(int(fPort)),\n\t\t\t\t\t\tNodePort:   0,\n\t\t\t\t\t\tProtocol:   v1.ProtocolTCP,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tType: v1.ServiceTypeClusterIP,\n\t\t\t},\n\t\t\tDeployment: appsv1.Deployment{\n\t\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tEnv: []v1.EnvVar{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tName:  \"foo\",\n\t\t\t\t\t\t\t\t\t\t\tValue: \"bar\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn &f\n}\n\nfunc prepareDeploymentTest(funcName string) (*fake.Clientset, []metav1.OwnerReference, string, *langruntime.Langruntimes) {\n\tclientset := fake.NewSimpleClientset()\n\tor := []metav1.OwnerReference{\n\t\t{\n\t\t\tKind:       \"Function\",\n\t\t\tAPIVersion: \"k8s.io\",\n\t\t},\n\t}\n\tns := \"default\"\n\tlangruntime.AddFakeConfig(clientset)\n\tlr := langruntime.SetupLangRuntime(clientset)\n\tlr.ReadConfigMap()\n\treturn clientset, or, ns, lr\n}\n\nfunc TestEnsureDeployment(t *testing.T) {\n\tf1Name := \"f1\"\n\tclientset, or, ns, lr := prepareDeploymentTest(f1Name)\n\tfuncLabels := map[string]string{\n\t\t\"foo\": \"bar\",\n\t}\n\tfuncAnno := map[string]string{\n\t\t\"bar\": \"foo\",\n\t}\n\tf1 := getDefaultFunc(f1Name, ns)\n\n\tf1.Spec.Deployment.Spec.Template.Spec.InitContainers = []v1.Container{\n\t\t{\n\t\t\tResources: v1.ResourceRequirements{\n\t\t\t\tLimits: v1.ResourceList{\n\t\t\t\t\tv1.ResourceLimitsCPU: resource.MustParse(\"100m\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tf1Port := f1.Spec.ServiceSpec.Ports[0].Port\n\tf1.ObjectMeta.Labels = funcLabels\n\tf1.Spec.Deployment.ObjectMeta = metav1.ObjectMeta{\n\t\tAnnotations: funcAnno,\n\t}\n\tf1.Spec.Deployment.Spec.Template.ObjectMeta = metav1.ObjectMeta{\n\t\tAnnotations: funcAnno,\n\t}\n\t// Testing happy path\n\tpullSecrets := []v1.LocalObjectReference{\n\t\t{Name: \"creds\"},\n\t}\n\terr := EnsureFuncDeployment(clientset, f1, or, lr, \"\", \"unzip\", pullSecrets)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tdpm, err := clientset.AppsV1().Deployments(ns).Get(f1Name, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\texpectedObjectMeta := metav1.ObjectMeta{\n\t\tName:            f1Name,\n\t\tNamespace:       ns,\n\t\tLabels:          addDefaultLabel(funcLabels),\n\t\tOwnerReferences: or,\n\t\tAnnotations:     funcAnno,\n\t}\n\tif !reflect.DeepEqual(dpm.ObjectMeta, expectedObjectMeta) {\n\t\tt.Errorf(\"Unable to set metadata. Received:\\n %+v\\nExpecting:\\n %+v\", dpm.ObjectMeta, expectedObjectMeta)\n\t}\n\texpectedAnnotations := map[string]string{\n\t\t\"prometheus.io/scrape\": \"true\",\n\t\t\"prometheus.io/path\":   \"/metrics\",\n\t\t\"prometheus.io/port\":   strconv.Itoa(int(f1Port)),\n\t\t\"bar\":                  \"foo\",\n\t}\n\tfor i := range expectedAnnotations {\n\t\tif dpm.Spec.Template.Annotations[i] != expectedAnnotations[i] {\n\t\t\tt.Errorf(\"Expecting annotation %s but received %s\", expectedAnnotations[i], dpm.Spec.Template.Annotations[i])\n\t\t}\n\t}\n\tif dpm.Spec.Template.Annotations[\"bar\"] != \"foo\" {\n\t\tt.Error(\"Unable to set annotations\")\n\t}\n\texpectedContainer := v1.Container{\n\t\tName:  f1Name,\n\t\tImage: \"bar\",\n\t\tPorts: []v1.ContainerPort{\n\t\t\t{\n\t\t\t\tContainerPort: int32(f1Port),\n\t\t\t},\n\t\t},\n\t\tEnv: []v1.EnvVar{\n\t\t\t{\n\t\t\t\tName:  \"foo\",\n\t\t\t\tValue: \"bar\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName:  \"FUNC_HANDLER\",\n\t\t\t\tValue: \"bar\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName:  \"MOD_NAME\",\n\t\t\t\tValue: \"foo\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName:  \"FUNC_TIMEOUT\",\n\t\t\t\tValue: \"180\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName:  \"FUNC_RUNTIME\",\n\t\t\t\tValue: \"python2.7\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName:  \"FUNC_MEMORY_LIMIT\",\n\t\t\t\tValue: \"0\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName:  \"FUNC_PORT\",\n\t\t\t\tValue: strconv.Itoa(int(f1Port)),\n\t\t\t},\n\t\t\t{\n\t\t\t\tName:  \"KUBELESS_INSTALL_VOLUME\",\n\t\t\t\tValue: \"/kubeless\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName:  \"PYTHONPATH\",\n\t\t\t\tValue: \"/kubeless/lib/python2.7/site-packages:/kubeless\",\n\t\t\t},\n\t\t},\n\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t{\n\t\t\t\tName:      f1Name,\n\t\t\t\tMountPath: \"/kubeless\",\n\t\t\t},\n\t\t},\n\t\tLivenessProbe: &v1.Probe{\n\t\t\tInitialDelaySeconds: int32(5),\n\t\t\tPeriodSeconds:       int32(10),\n\t\t\tHandler: v1.Handler{\n\t\t\t\tExec: &v1.ExecAction{\n\t\t\t\t\tCommand: []string{\"curl\", \"-f\", \"http://localhost:8080/healthz\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif !reflect.DeepEqual(dpm.Spec.Template.Spec.Containers[0], expectedContainer) {\n\t\tt.Errorf(\"Unexpected container definition. Received:\\n %+v\\nExpecting:\\n %+v\", dpm.Spec.Template.Spec.Containers[0], expectedContainer)\n\t}\n\n\texpectedAffinity := &v1.Affinity{\n\t\tPodAntiAffinity: &v1.PodAntiAffinity{\n\t\t\tPreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{\n\t\t\t\t{\n\t\t\t\t\tWeight: 100,\n\t\t\t\t\tPodAffinityTerm: v1.PodAffinityTerm{\n\t\t\t\t\t\tLabelSelector: &metav1.LabelSelector{\n\t\t\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\t\t\"created-by\": \"kubeless\",\n\t\t\t\t\t\t\t\t\"function\":   f1Name,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTopologyKey: \"kubernetes.io/hostname\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif !reflect.DeepEqual(dpm.Spec.Template.Spec.Affinity, expectedAffinity) {\n\t\tt.Errorf(\"Unexpected pod affinity definition. Received:\\n %+v\\nExpecting:\\n %+v\", dpm.Spec.Template.Spec.Affinity, expectedAffinity)\n\t}\n\n\tsecrets := dpm.Spec.Template.Spec.ImagePullSecrets\n\tif secrets[0].Name != \"creds\" && secrets[1].Name != \"p1\" && secrets[2].Name != \"p2\" {\n\t\tt.Errorf(\"Expected first secret to be 'p1' but found %v and second secret to be 'p2' and found %v\", secrets[0], secrets[1])\n\t}\n\n\t// Init containers behavior should be tested with integration tests\n\tif len(dpm.Spec.Template.Spec.InitContainers) < 1 {\n\t\tt.Errorf(\"Expecting at least an init container to install deps\")\n\t}\n\tif dpm.Spec.Template.Spec.InitContainers[0].Image != \"unzip\" {\n\t\tt.Errorf(\"Unexpected init image %s\", dpm.Spec.Template.Spec.InitContainers[0].Image)\n\t}\n\tif dpm.Spec.Template.Spec.InitContainers[0].Resources.Limits == nil {\n\t\tt.Errorf(\"Resources must be set for init container\")\n\t}\n\n\t// ensure my-secret is mounted as /var/run/secrets/kubeless.io/my-secret to install container\n\tvar container v1.Container\n\tfor _, c := range dpm.Spec.Template.Spec.InitContainers {\n\t\tif c.Name == \"install\" {\n\t\t\tcontainer = c\n\t\t}\n\t}\n\tif len(container.Name) == 0 {\n\t\tt.Fatalf(\"Cannot find init container %q\", \"install\")\n\t}\n\tvar found bool\n\tfor _, v := range container.VolumeMounts {\n\t\tif v.MountPath == \"/var/run/secrets/kubeless.io/my-secret\" {\n\t\t\tfound = true\n\t\t}\n\t}\n\tif !found {\n\t\tt.Fatalf(\"Cannot find volume mount /var/run/secrets/kubeless.io/my-secret\")\n\t}\n}\n\nfunc TestEnsureDeploymentWithoutFuncNorHandler(t *testing.T) {\n\tfuncName := \"func2\"\n\tclientset, or, ns, lr := prepareDeploymentTest(funcName)\n\t// If no handler and function is given it should not fail\n\tf2 := getDefaultFunc(funcName, ns)\n\tf2.Spec.Function = \"\"\n\tf2.Spec.Handler = \"\"\n\terr := EnsureFuncDeployment(clientset, f2, or, lr, \"\", \"unzip\", []v1.LocalObjectReference{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\t_, err = clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n}\n\nfunc TestEnsureDeploymentWithImage(t *testing.T) {\n\tfuncName := \"func\"\n\tclientset, or, ns, lr := prepareDeploymentTest(funcName)\n\t// If the Image has been already provided it should not resolve it\n\tf3 := getDefaultFunc(funcName, ns)\n\tf3.Spec.Deployment.Spec.Template.Spec.Containers[0].Image = \"test-image\"\n\terr := EnsureFuncDeployment(clientset, f3, or, lr, \"\", \"unzip\", []v1.LocalObjectReference{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tdpm, err := clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif dpm.Spec.Template.Spec.Containers[0].Image != \"test-image\" {\n\t\tt.Errorf(\"Unexpected Image Name: %s\", dpm.Spec.Template.Spec.Containers[0].Image)\n\t}\n}\n\nfunc TestEnsureDeploymentWithoutFunc(t *testing.T) {\n\tfuncName := \"func\"\n\tclientset, or, ns, lr := prepareDeploymentTest(funcName)\n\t// If no function is given it should not use an init container\n\tf4 := getDefaultFunc(funcName, ns)\n\tf4.Spec.Function = \"\"\n\tf4.Spec.Deps = \"\"\n\terr := EnsureFuncDeployment(clientset, f4, or, lr, \"\", \"unzip\", []v1.LocalObjectReference{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tdpm, err := clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif len(dpm.Spec.Template.Spec.InitContainers) > 0 {\n\t\tt.Error(\"It should not setup an init container\")\n\t}\n}\n\nfunc TestEnsureUpdateDeployment(t *testing.T) {\n\tf1Name := \"f1\"\n\tclientset, or, ns, lr := prepareDeploymentTest(f1Name)\n\t// It should update a deployment if it is already present\n\tfuncAnno := map[string]string{\n\t\t\"bar\": \"foo\",\n\t}\n\tf1 := getDefaultFunc(f1Name, ns)\n\tf1.Spec.Deployment.ObjectMeta = metav1.ObjectMeta{\n\t\tAnnotations: funcAnno,\n\t}\n\tf1.Spec.Deployment.Spec.Template.ObjectMeta = metav1.ObjectMeta{\n\t\tAnnotations: funcAnno,\n\t}\n\terr := EnsureFuncDeployment(clientset, f1, or, lr, \"\", \"unzip\", []v1.LocalObjectReference{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tf6 := kubelessApi.Function{}\n\tf6 = *f1\n\tf6.Spec.Handler = \"foo.bar2\"\n\tf6.Spec.Deployment.ObjectMeta.Annotations[\"new-key\"] = \"value\"\n\terr = EnsureFuncDeployment(clientset, &f6, or, lr, \"\", \"unzip\", []v1.LocalObjectReference{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\t// Unable to ensure that the new deployment is patched since fake\n\t// ignores PATCH actions: https://github.com/kubernetes/client-go/issues/364\n}\n\nfunc TestAvoidDeploymentOverwrite(t *testing.T) {\n\tf1Name := \"f1\"\n\tclientset, or, ns, lr := prepareDeploymentTest(f1Name)\n\tclientset.AppsV1().Deployments(ns).Create(&appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      f1Name,\n\t\t\tNamespace: ns,\n\t\t},\n\t})\n\tf1 := getDefaultFunc(f1Name, ns)\n\terr := EnsureFuncDeployment(clientset, f1, or, lr, \"\", \"unzip\", []v1.LocalObjectReference{})\n\tif err == nil && strings.Contains(err.Error(), \"conflicting object\") {\n\t\tt.Errorf(\"It should fail because a conflict\")\n\t}\n}\n\nfunc TestDeploymentWithUnsupportedRuntime(t *testing.T) {\n\tfuncName := \"func\"\n\tclientset, or, ns, lr := prepareDeploymentTest(funcName)\n\t// It should return an error if some dependencies are given but the runtime is not supported\n\tf7 := getDefaultFunc(\"func7\", ns)\n\tf7.Spec.Deps = \"deps\"\n\tf7.Spec.Runtime = \"cobol\"\n\terr := EnsureFuncDeployment(clientset, f7, or, lr, \"\", \"unzip\", []v1.LocalObjectReference{})\n\n\tif err == nil {\n\t\tt.Fatal(\"An error should be thrown\")\n\t}\n}\n\nfunc TestDeploymentWithTimeout(t *testing.T) {\n\tfuncName := \"func\"\n\tclientset, or, ns, lr := prepareDeploymentTest(funcName)\n\t// If a timeout is specified it should set an environment variable FUNC_TIMEOUT\n\tf8 := getDefaultFunc(funcName, ns)\n\tf8.Spec.Timeout = \"10\"\n\terr := EnsureFuncDeployment(clientset, f8, or, lr, \"\", \"unzip\", []v1.LocalObjectReference{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tdpm, err := clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif getEnvValueFromList(\"FUNC_TIMEOUT\", dpm.Spec.Template.Spec.Containers[0].Env) != \"10\" {\n\t\tt.Error(\"Unable to set timeout\")\n\t}\n}\n\nfunc TestDeploymentWithPrebuiltImage(t *testing.T) {\n\tfuncName := \"func\"\n\tclientset, or, ns, lr := prepareDeploymentTest(funcName)\n\t// If a prebuilt image is specified it should not build the function using init containers\n\tf9 := getDefaultFunc(funcName, ns)\n\terr := EnsureFuncDeployment(clientset, f9, or, lr, \"user/image:test\", \"unzip\", []v1.LocalObjectReference{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tdpm, err := clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif dpm.Spec.Template.Spec.Containers[0].Image != \"user/image:test\" {\n\t\tt.Errorf(\"Unexpected image %s, expecting prebuilt user/image:test\", dpm.Spec.Template.Spec.Containers[0].Image)\n\t}\n\tif len(dpm.Spec.Template.Spec.InitContainers) != 0 {\n\t\tt.Error(\"Unexpected init containers\")\n\t}\n}\n\nfunc TestDeploymentWithVolumes(t *testing.T) {\n\tfuncName := \"func\"\n\tclientset, or, ns, lr := prepareDeploymentTest(funcName)\n\t// It should include existing volumes\n\tf10 := getDefaultFunc(funcName, ns)\n\tf10.Spec.Deployment.Spec.Template.Spec.Volumes = []v1.Volume{\n\t\t{\n\t\t\tName:         \"test\",\n\t\t\tVolumeSource: v1.VolumeSource{},\n\t\t},\n\t}\n\tf10.Spec.Deployment.Spec.Template.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{\n\t\t{\n\t\t\tName:      \"test\",\n\t\t\tMountPath: \"/tmp/test\",\n\t\t},\n\t}\n\terr := EnsureFuncDeployment(clientset, f10, or, lr, \"\", \"unzip\", []v1.LocalObjectReference{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tdpm, err := clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif dpm.Spec.Template.Spec.Volumes[0].Name != \"test\" {\n\t\tt.Error(\"Should maintain volumen test\")\n\t}\n\tif dpm.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name != \"test\" {\n\t\tt.Error(\"Should maintain volumen test\")\n\t}\n}\n\nfunc TestEnsureDeploymentWithAffinityOverridden(t *testing.T) {\n\tfuncName := \"func\"\n\tclientset, or, ns, lr := prepareDeploymentTest(funcName)\n\t// If the Image has been already provided it should not resolve it\n\tf3 := getDefaultFunc(funcName, ns)\n\tf3.Spec.Deployment.Spec.Template.Spec.Affinity = &v1.Affinity{}\n\terr := EnsureFuncDeployment(clientset, f3, or, lr, \"\", \"unzip\", []v1.LocalObjectReference{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tdpm, err := clientset.AppsV1().Deployments(ns).Get(funcName, metav1.GetOptions{})\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\texpectedAffinity := &v1.Affinity{NodeAffinity: nil, PodAffinity: nil, PodAntiAffinity: nil}\n\tif *dpm.Spec.Template.Spec.Affinity != *expectedAffinity {\n\t\tt.Errorf(\n\t\t\t\"Unexpected Affinity Definition:\\nExpecting: %+v\\nReceived: %+v\",\n\t\t\texpectedAffinity,\n\t\t\tdpm.Spec.Template.Spec.Affinity,\n\t\t)\n\t}\n}\n\nfunc doesNotContain(envs []v1.EnvVar, env v1.EnvVar) bool {\n\tfor _, e := range envs {\n\t\tif e == env {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc TestGetProvisionContainer(t *testing.T) {\n\tclientset := fake.NewSimpleClientset()\n\tlangruntime.AddFakeConfig(clientset)\n\tlr := langruntime.SetupLangRuntime(clientset)\n\tlr.ReadConfigMap()\n\n\trvol := v1.VolumeMount{Name: \"runtime\", MountPath: \"/runtime\"}\n\tdvol := v1.VolumeMount{Name: \"deps\", MountPath: \"/deps\"}\n\tresources := v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceLimitsCPU: resource.MustParse(\"100m\")}}\n\n\tc, err := getProvisionContainer(\"test\", \"sha256:abc1234\", \"test.func\", \"test.foo\", \"text\", \"python2.7\", \"unzip\", rvol, dvol, resources, lr)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\texpectedContainer := v1.Container{\n\t\tName:            \"prepare\",\n\t\tImage:           \"unzip\",\n\t\tCommand:         []string{\"sh\", \"-c\"},\n\t\tArgs:            []string{\"echo 'abc1234  /deps/test.func' > /tmp/func.sha256 && sha256sum -c /tmp/func.sha256 && cp /deps/test.func /runtime/test.py && cp /deps/requirements.txt /runtime\"},\n\t\tVolumeMounts:    []v1.VolumeMount{rvol, dvol},\n\t\tImagePullPolicy: v1.PullIfNotPresent,\n\t\tResources:       v1.ResourceRequirements{Limits: v1.ResourceList{v1.ResourceLimitsCPU: resource.MustParse(\"100m\")}},\n\t}\n\tif !reflect.DeepEqual(expectedContainer, c) {\n\t\tt.Errorf(\"Unexpected result:\\n %+v\", c)\n\t}\n\n\t// If the content type is encoded it should decode it\n\tc, err = getProvisionContainer(\"Zm9vYmFyCg==\", \"sha256:abc1234\", \"test.func\", \"test.foo\", \"base64\", \"python2.7\", \"unzip\", rvol, dvol, resources, lr)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif !strings.HasPrefix(c.Args[0], \"base64 -d < /deps/test.func > /tmp/func.decoded\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\n\tsecrets, err := lr.GetImageSecrets(\"python2.7\")\n\tif err != nil {\n\t\tt.Errorf(\"Unable to fetch secrets: %v\", err)\n\t}\n\n\tif secrets[0].Name != \"p1\" && secrets[1].Name != \"p2\" {\n\t\tt.Errorf(\"Expected first secret to be 'p1' but found %v and second secret to be 'p2' but found %v\", secrets[0], secrets[1])\n\t}\n\n\t// It should skip the dependencies installation if the runtime is not supported\n\tc, err = getProvisionContainer(\"function\", \"sha256:abc1234\", \"test.func\", \"test.foo\", \"text\", \"cobol\", \"unzip\", rvol, dvol, resources, lr)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif strings.Contains(c.Args[0], \"cp /deps \") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\n\t// It should extract the file in case it is a Zip\n\tc, err = getProvisionContainer(\"Zm9vYmFyCg==\", \"sha256:abc1234\", \"test.zip\", \"test.foo\", \"base64+zip\", \"python2.7\", \"unzip\", rvol, dvol, resources, lr)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif !strings.HasPrefix(c.Args[0], \"base64 -d < /deps/test.zip > /tmp/func.decoded\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\tif !strings.Contains(c.Args[0], \"unzip -o /tmp/func.decoded -d /runtime\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\n\t// It should extract the compressed tar file\n\tc, err = getProvisionContainer(\"Zm9vYmFyCg==\", \"sha256:abc1234\", \"test.tar.gz\", \"test.foo\", \"base64+compressedtar\", \"python2.7\", \"unzip\", rvol, dvol, resources, lr)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif !strings.HasPrefix(c.Args[0], \"base64 -d < /deps/test.tar.gz > /tmp/func.decoded\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\tif !strings.Contains(c.Args[0], \"tar xf /tmp/func.decoded -C /runtime\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\n\t// If the content type is url it should use curl\n\tc, err = getProvisionContainer(\"https://raw.githubusercontent.com/test/test/test/test.py\", \"sha256:abc1234\", \"\", \"test.foo\", \"url\", \"python2.7\", \"unzip\", rvol, dvol, resources, lr)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif !strings.HasPrefix(c.Args[0], \"curl 'https://raw.githubusercontent.com/test/test/test/test.py' -L --silent --output /tmp/func.fromurl\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\n\t// If the content type is url+zip it should use curl and unzip\n\tc, err = getProvisionContainer(\"https://raw.githubusercontent.com/test/test/test/test.zip\", \"sha256:abc1234\", \"\", \"test.foo\", \"url+zip\", \"python2.7\", \"unzip\", rvol, dvol, resources, lr)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif !strings.HasPrefix(c.Args[0], \"curl 'https://raw.githubusercontent.com/test/test/test/test.zip' -L --silent --output /tmp/func.fromurl\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\tif !strings.Contains(c.Args[0], \"unzip -o /tmp/func.fromurl -d /runtime\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\n\t// If the content type is url+compressedtar it should use curl and tar\n\tc, err = getProvisionContainer(\"https://raw.githubusercontent.com/test/test/test/test.tar.gz\", \"sha256:abc1234\", \"\", \"test.foo\", \"url+compressedtar\", \"python2.7\", \"unzip\", rvol, dvol, resources, lr)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif !strings.HasPrefix(c.Args[0], \"curl 'https://raw.githubusercontent.com/test/test/test/test.tar.gz' -L --silent --output /tmp/func.fromurl\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\tif !strings.Contains(c.Args[0], \"tar xf /tmp/func.fromurl -C /runtime\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\n\t// if the function use bundled deps in remote zip file\n\tc, err = getProvisionContainer(\"https://raw.githubusercontent.com/test/test/test/test.zip\", \"sha256:abc1234\", \"\", \"test.foo\", \"url+zip+deps\", \"python2.7\", \"unzip\", rvol, dvol, resources, lr)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %s\", err)\n\t}\n\tif !strings.HasPrefix(c.Args[0], \"curl 'https://raw.githubusercontent.com/test/test/test/test.zip' -L --silent --output /tmp/func.fromurl\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\tif !strings.Contains(c.Args[0], \"unzip -o /tmp/func.fromurl -d /runtime\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n\t// use bundled deps will not copy the requirements.txt to /runtime\n\tif strings.Contains(c.Args[0], \"cp /deps/requirements.txt /runtime\") {\n\t\tt.Errorf(\"Unexpected command: %s\", c.Args[0])\n\t}\n}\n"
  },
  {
    "path": "pkg/utils/metrics.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage utils\n\nimport (\n\t\"bytes\"\n\n\t\"k8s.io/client-go/kubernetes\"\n\n\t\"github.com/prometheus/common/expfmt\"\n)\n\n// Metric contains metrics for a functions\ntype Metric struct {\n\tFunctionName         string  `json:\"function,omitempty\"`\n\tNamespace            string  `json:\"namespace,omitempty\"`\n\tMethod               string  `json:\"method,omitempty\"`\n\tMessage              string  `json:\"message,omitempty\"`\n\tTotalCalls           float64 `json:\"total_calls,omitempty\"`\n\tTotalFailures        float64 `json:\"total_failures,omitempty\"`\n\tTotalDurationSeconds float64 `json:\"total_duration_seconds,omitempty\"`\n\tAvgDurationSeconds   float64 `json:\"avg_duration_seconds,omitempty\"`\n}\n\n// MetricsRetriever is an interface for retreiving metrics from an endpoint\ntype MetricsRetriever interface {\n\tGetRawMetrics(kubernetes.Interface, string, string) ([]byte, error)\n}\n\n// PrometheusMetricsHandler is a handler for retreiving metrics from Prometheus\ntype PrometheusMetricsHandler struct{}\n\nfunc parseMetrics(namespace, functionName string, rawMetrics []byte) ([]*Metric, error) {\n\tparser := expfmt.TextParser{}\n\tparsedData, err := parser.TextToMetricFamilies(bytes.NewReader(rawMetrics))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttmp := map[string]*Metric{}\n\tvar parsedMetrics []*Metric\n\n\tmetricsOfInterest := []string{\"function_duration_seconds\", \"function_calls_total\", \"function_failures_total\"}\n\tfor _, m := range metricsOfInterest {\n\t\tfor _, metric := range parsedData[m].GetMetric() {\n\t\t\t// a function can have metrics for multiple methods (GET, POST, etc.)\n\t\t\t// method names can be values other than GET/POST/PUT/DELETE\n\t\t\tfor _, label := range metric.GetLabel() {\n\t\t\t\tif label.GetName() == \"method\" {\n\t\t\t\t\tif _, ok := tmp[label.GetValue()]; !ok {\n\t\t\t\t\t\ttmp[label.GetValue()] = &Metric{\n\t\t\t\t\t\t\tFunctionName: functionName,\n\t\t\t\t\t\t\tNamespace:    namespace,\n\t\t\t\t\t\t\tMethod:       label.GetValue(),\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif m == \"function_failures_total\" {\n\t\t\t\t\t\ttmp[label.GetValue()].TotalFailures = metric.GetCounter().GetValue()\n\t\t\t\t\t}\n\t\t\t\t\tif m == \"function_duration_seconds\" {\n\t\t\t\t\t\ttmp[label.GetValue()].TotalDurationSeconds = metric.GetHistogram().GetSampleSum()\n\t\t\t\t\t}\n\t\t\t\t\tif m == \"function_calls_total\" {\n\t\t\t\t\t\ttmp[label.GetValue()].TotalCalls = metric.GetCounter().GetValue()\n\t\t\t\t\t\tif tmp[label.GetValue()].TotalCalls > 0 {\n\t\t\t\t\t\t\ttmp[label.GetValue()].AvgDurationSeconds = float64(tmp[label.GetValue()].TotalDurationSeconds) / tmp[label.GetValue()].TotalCalls\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// if the funciton hasn't been invoked, add an item to the list so the function displays in the output\n\tif len(tmp) == 0 {\n\t\ttmp[\"\"] = &Metric{\n\t\t\tFunctionName: functionName,\n\t\t\tNamespace:    namespace,\n\t\t}\n\t}\n\n\tfor _, v := range tmp {\n\t\tparsedMetrics = append(parsedMetrics, v)\n\t}\n\n\treturn parsedMetrics, nil\n}\n\n// GetRawMetrics returns the raw metrics for a Prometheus endpoint\nfunc (h *PrometheusMetricsHandler) GetRawMetrics(apiV1Client kubernetes.Interface, namespace, functionName string) ([]byte, error) {\n\n\tport, err := GetFunctionPort(apiV1Client, namespace, functionName)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\treq := apiV1Client.CoreV1().RESTClient().Get().Namespace(namespace).Resource(\"services\").SubResource(\"proxy\").Name(functionName + \":\" + port).Suffix(\"/metrics\")\n\treturn req.Do().Raw()\n}\n\n// GetFunctionMetrics returns Prometheus metrics as a slice of *Metrics\nfunc GetFunctionMetrics(apiV1Client kubernetes.Interface, h MetricsRetriever, namespace, functionName string) []*Metric {\n\n\tres, err := h.GetRawMetrics(apiV1Client, namespace, functionName)\n\tif err != nil {\n\t\treturn []*Metric{\n\t\t\t{\n\t\t\t\tFunctionName: functionName,\n\t\t\t\tNamespace:    namespace,\n\t\t\t\tMessage:      \"Function does not expose metrics\",\n\t\t\t},\n\t\t}\n\t}\n\n\tmetrics, err := parseMetrics(namespace, functionName, res)\n\tif err != nil {\n\t\treturn []*Metric{\n\t\t\t{\n\t\t\t\tFunctionName: functionName,\n\t\t\t\tNamespace:    namespace,\n\t\t\t\tMessage:      \"Unable to get function metrics\",\n\t\t\t},\n\t\t}\n\t}\n\treturn metrics\n}\n"
  },
  {
    "path": "pkg/version/version.go",
    "content": "/*\nCopyright (c) 2016-2017 Bitnami\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage version\n\nvar (\n\t// Version will be set automatically by the build system via -ldflags\n\tVersion string\n)\n"
  },
  {
    "path": "script/.validate",
    "content": "#!/bin/bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nif [ -z \"$VALIDATE_UPSTREAM\" ]; then\n\t# this is kind of an expensive check, so let's not do this twice if we\n\t# are running more than one validate bundlescript\n\t\n\tVALIDATE_REPO='https://github.com/kubeless/kubeless.git'\n\tVALIDATE_BRANCH='master'\n\t\n\tif [ \"$TRAVIS\" = 'true' -a \"$TRAVIS_PULL_REQUEST\" != 'false' ]; then\n\t\tVALIDATE_REPO=\"https://github.com/${TRAVIS_REPO_SLUG}.git\"\n\t\tVALIDATE_BRANCH=\"${TRAVIS_BRANCH}\"\n\tfi\n\t\n\tVALIDATE_HEAD=\"$(git rev-parse --verify HEAD)\"\n\t\n\tgit fetch -q \"$VALIDATE_REPO\" \"refs/heads/$VALIDATE_BRANCH\"\n\tVALIDATE_UPSTREAM=\"$(git rev-parse --verify FETCH_HEAD)\"\n\t\n\tVALIDATE_COMMIT_LOG=\"$VALIDATE_UPSTREAM..$VALIDATE_HEAD\"\n\tVALIDATE_COMMIT_DIFF=\"$VALIDATE_UPSTREAM...$VALIDATE_HEAD\"\n\t\n\tvalidate_diff() {\n\t\tif [ \"$VALIDATE_UPSTREAM\" != \"$VALIDATE_HEAD\" ]; then\n\t\t\tgit diff \"$VALIDATE_COMMIT_DIFF\" \"$@\"\n\t\tfi\n\t}\n\tvalidate_log() {\n\t\tif [ \"$VALIDATE_UPSTREAM\" != \"$VALIDATE_HEAD\" ]; then\n\t\t\tgit log \"$VALIDATE_COMMIT_LOG\" \"$@\"\n\t\tfi\n\t}\nfi\n"
  },
  {
    "path": "script/binary",
    "content": "#!/usr/bin/env bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -e\n\n\nGIT_COMMIT=$(git describe --tags --dirty)\nBUILD_FLAGS=(-ldflags=\"-w -X github.com/kubeless/kubeless/pkg/version.Version=${GIT_COMMIT}\")\n\n# Get rid of existing binary\necho \"Removing Old Kubeless binaries\"\nrm -f ${GOPATH%%:*}/bin/kubeless\nrm -f ${GOPATH%%:*}/bin/function-controller\n\necho \"Build Kubeless Components binaries\"\n# Build binary\ngo install \\\n    \"${BUILD_FLAGS[@]}\" \\\n    ./cmd/...\n\nif [ $? -eq 0 ]; then\n  echo \"Build Kubeless Components successful. Program saved at ${GOPATH%%:*}/bin\"\nelse\n  echo \"Build Kubeless Components failed.\"\nfi\n"
  },
  {
    "path": "script/binary-cli",
    "content": "#!/usr/bin/env bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -e\n\nOS_PLATFORM_ARG=(-os=\"darwin linux windows\")\nOS_ARCH_ARG=(-arch=\"amd64\")\n\nGIT_COMMIT=$(git describe --tags --dirty)\nBUILD_DATE=$(date)\nBUILD_FLAGS=(-ldflags=\"-w -X github.com/kubeless/kubeless/pkg/version.Version=${GIT_COMMIT}\")\n\n# Get rid of existing binaries\nrm -rf bundles/kubeless*\n\n# Build kubeless\ngox \"${OS_PLATFORM_ARG[@]}\" \"${OS_ARCH_ARG[@]}\" \\\n    -output=\"bundles/kubeless_{{.OS}}-{{.Arch}}/kubeless\" \\\n    \"${BUILD_FLAGS[@]}\" \\\n    ./cmd/kubeless\n"
  },
  {
    "path": "script/binary-controller",
    "content": "#!/usr/bin/env bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -e\n\nif [ -z \"$1\" ]; then\n#    TODO: Skip windows at this moment\n    OS_PLATFORM_ARG=(-os=\"linux\")\nelse\n    OS_PLATFORM_ARG=($1)\nfi\n\nif [ -z \"$2\" ]; then\n    OS_ARCH_ARG=(-arch=\"amd64\")\nelse\n    OS_ARCH_ARG=($2)\nfi\n\nif [ -z \"$3\" ]; then\n    TARGET=\"kubeless-function-controller\"\nelse\n    TARGET=($3)\nfi\n\nif [ -z \"$4\" ]; then\n    PKG=\"./cmd/function-controller\"\nelse\n    PKG=($4)\nfi\n\n\nGIT_COMMIT=$(git describe --tags --dirty)\nBUILD_FLAGS=(-ldflags=\"-w -X github.com/kubeless/kubeless/pkg/version.Version=${GIT_COMMIT}\")\n\n# Get rid of existing binaries\nrm -rf bundles/kubeless*\n\n# Build kubeless-controller\ngox \"${OS_PLATFORM_ARG[@]}\" \"${OS_ARCH_ARG[@]}\" \\\n    -output=\"bundles/kubeless_{{.OS}}-{{.Arch}}/$TARGET\" \\\n    \"${BUILD_FLAGS[@]}\" \\\n    \"$PKG\"\n"
  },
  {
    "path": "script/cluster-up-minikube.sh",
    "content": "#!/usr/bin/env bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# From minikube howto\nexport MINIKUBE_WANTUPDATENOTIFICATION=false\nexport MINIKUBE_WANTREPORTERRORPROMPT=false\nexport MINIKUBE_HOME=$HOME\nexport CHANGE_MINIKUBE_NONE_USER=true\nmkdir -p ~/.kube\ntouch ~/.kube/config\n\nexport KUBECONFIG=$HOME/.kube/config\nexport PATH=${PATH}:${GOPATH:?}/bin\n\nMINIKUBE_VERSION=${MINIKUBE_VERSION:?}\n\ninstall_bin() {\n    local exe=${1:?}\n    sudo install -v ${exe} /usr/local/bin || install ${exe} ${GOPATH:?}/bin\n}\n\n# Travis ubuntu trusty env doesn't have nsenter, needed for VM-less minikube\n# (--vm-driver=none, runs dockerized)\ncheck_or_build_nsenter() {\n    which nsenter >/dev/null && return 0\n    echo \"INFO: Getting 'nsenter' ...\"\n    curl -LO http://mirrors.kernel.org/ubuntu/pool/main/u/util-linux/util-linux_2.30.1-0ubuntu4_amd64.deb\n    dpkg -x ./util-linux_2.30.1-0ubuntu4_amd64.deb /tmp/out\n    install_bin /tmp/out/usr/bin/nsenter\n}\ncheck_or_install_minikube() {\n    which minikube || {\n        wget -q --no-clobber -O minikube \\\n            https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-linux-amd64\n        install_bin ./minikube\n    }\n}\n\n# Install nsenter if missing\ncheck_or_build_nsenter\n# Install minikube if missing\ncheck_or_install_minikube\nMINIKUBE_BIN=$(which minikube)\n\n# Start minikube\nsudo -E ${MINIKUBE_BIN} start --vm-driver=none \\\n    --extra-config=kubelet.cgroup-driver=cgroupfs \\\n    --memory 4096\n\n# Wait til settles\necho \"INFO: Waiting for minikube cluster to be ready ...\"\ntypeset -i cnt=120\nuntil kubectl --context=minikube get pods >& /dev/null; do\n    ((cnt=cnt-1)) || exit 1\n    sleep 1\ndone\n\nsudo -E ${MINIKUBE_BIN} update-context\n\n# Enable Nginx Ingress\necho \"INFO: Enabling ingress addon to minikube...\"\nsudo -E ${MINIKUBE_BIN} addons enable ingress\nsudo -E ${MINIKUBE_BIN} config set WantUpdateNotification false\n\n# Give some time for the cluster to become healthy\nsleep 10\n\nexit 0\n# vim: sw=4 ts=4 et si\n"
  },
  {
    "path": "script/create_release.sh",
    "content": "#!/bin/bash\nset -e\n\nREPO_NAME=kubeless\nREPO_DOMAIN=kubeless\nTAG=${1:?}\nMANIFESTS=${2:?} # Space separated list of manifests to publish\n\nPROJECT_DIR=$(cd $(dirname $0)/.. && pwd)\n\nsource $(dirname $0)/release_utils.sh\n\nif [[ -z \"$REPO_NAME\" || -z \"$REPO_DOMAIN\" ]]; then\n  echo \"Github repository not specified\" > /dev/stderr\n  exit 1\nfi\n\nif [[ -z \"$ACCESS_TOKEN\" ]]; then\n  echo \"Unable to release: Github Token not specified\" > /dev/stderr\n  exit 1\nfi\n\nrepo_check=`curl -H \"Authorization: token $ACCESS_TOKEN\" -s https://api.github.com/repos/$REPO_DOMAIN/$REPO_NAME`\nif [[ $repo_check == *\"Not Found\"* ]]; then\n  echo \"Not found a Github repository for $REPO_DOMAIN/$REPO_NAME, it is not possible to publish it\" > /dev/stderr\n  exit 1\nelse\n  RELEASE_ID=$(release_tag $TAG $REPO_DOMAIN $REPO_NAME | jq '.id') \nfi\n\nIFS=' ' read -r -a manifests <<< \"$MANIFESTS\"\nfor f in \"${manifests[@]}\"; do\n  cp ${PROJECT_DIR}/${f}.yaml ${PROJECT_DIR}/${f}-${TAG}.yaml\n  upload_asset $REPO_DOMAIN $REPO_NAME \"$RELEASE_ID\" \"${PROJECT_DIR}/${f}-${TAG}.yaml\"\ndone\nfor f in `ls ${PROJECT_DIR}/bundles/kubeless_*.zip`; do\n  upload_asset $REPO_DOMAIN $REPO_NAME $RELEASE_ID $f\ndone\n"
  },
  {
    "path": "script/enable-gcloud.sh",
    "content": "#!/bin/bash\nset -e\nBUILD_DIR=${1:?}\nexport GOOGLE_APPLICATION_CREDENTIALS=$BUILD_DIR/client_secrets.json\necho $GCLOUD_KEY > $GOOGLE_APPLICATION_CREDENTIALS\nif [ ! -d $HOME/gcloud/google-cloud-sdk ]; then\n    mkdir -p $HOME/gcloud &&\n    wget -q https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-187.0.0-linux-x86_64.tar.gz --directory-prefix=$HOME/gcloud &&\n    cd $HOME/gcloud &&\n    tar xzf google-cloud-sdk-187.0.0-linux-x86_64.tar.gz &&\n    printf '\\ny\\n\\ny\\ny\\n' | ./google-cloud-sdk/install.sh &&\n    sudo ln -s $HOME/gcloud/google-cloud-sdk/bin/gcloud /usr/local/bin/gcloud\n    cd $BUILD_DIR;\nfi\ngcloud -q config set project $GKE_PROJECT\nif [ -a $GOOGLE_APPLICATION_CREDENTIALS ]; then\n    gcloud -q auth activate-service-account --key-file $GOOGLE_APPLICATION_CREDENTIALS;\nfi\n"
  },
  {
    "path": "script/find_digest.sh",
    "content": "#!/usr/bin/env bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nREPOSITORY=$1\nTARGET_TAG=$2\n\n# get authorization token\nTOKEN=$(curl -s \"https://auth.docker.io/token?service=registry.docker.io&scope=repository:$REPOSITORY:pull\" | jq -r .token)\n\n# find all tags\nALL_TAGS=$(curl -s -H \"Authorization: Bearer $TOKEN\" https://index.docker.io/v2/$REPOSITORY/tags/list | jq -r .tags[])\n\n# get image digest for target\nTARGET_DIGEST=$(curl -s -D - -H \"Authorization: Bearer $TOKEN\" -H \"Accept: application/vnd.docker.distribution.manifest.v2+json\" https://index.docker.io/v2/$REPOSITORY/manifests/$TARGET_TAG | grep Docker-Content-Digest | cut -d ' ' -f 2)\n\n# for each tags\nfor tag in ${ALL_TAGS[@]}; do\n  # get image digest\n  digest=$(curl -s -D - -H \"Authorization: Bearer $TOKEN\" -H \"Accept: application/vnd.docker.distribution.manifest.v2+json\" https://index.docker.io/v2/$REPOSITORY/manifests/$tag | grep Docker-Content-Digest | cut -d ' ' -f 2)\n\n  # check digest\n  if [[ $TARGET_DIGEST = $digest ]]; then\n    echo \"$tag $digest\"\n  fi\ndone\n"
  },
  {
    "path": "script/integration-tests",
    "content": "#!/usr/bin/env bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Special case: if ./ksonnet-lib exists, set KUBECFG_JPATH\ntest -d $PWD/ksonnet-lib && export KUBECFG_JPATH=$PWD/ksonnet-lib\n\n# We require below env\n: ${GOPATH:?} ${KUBECFG_JPATH:?}\nexport PATH=${PATH}:${GOPATH}/bin\n\n# Default kubernetes context - if it's \"dind\" or \"minikube\" will\n# try to bring up a local (dockerized) cluster\ntest -n \"${TRAVIS_K8S_CONTEXT}\" && set -- ${TRAVIS_K8S_CONTEXT}\n# minikube seems to be more stable than dind, sp for kafka\nINTEGRATION_TESTS_CTX=${1:-minikube}\n\nINTEGRATION_TESTS_TARGET=${2:-default}\n\n# Check for some needed tools, install (some) if missing\nwhich bats > /dev/null || {\n   echo \"ERROR: 'bats' is required to run these tests,\" \\\n        \"install it from https://github.com/sstephenson/bats\"\n   exit 255\n}\n\n# Start a k8s cluster (minikube, dind) if not running\nkubectl get nodes --context=${INTEGRATION_TESTS_CTX:?} || {\n    cluster_up=./script/cluster-up-${INTEGRATION_TESTS_CTX}.sh\n    test -f ${cluster_up} || {\n        echo \"FATAL: bringing up k8s cluster '${INTEGRATION_TESTS_CTX}' not supported\"\n        exit 255\n    }\n    ${cluster_up}\n}\n\n# Both RBAC'd dind and minikube seem to be missing rules to make kube-dns work properly\n# add some (granted) broad ones:\nkubectl --context=${INTEGRATION_TESTS_CTX:?} get clusterrolebinding kube-dns-admin >& /dev/null || \\\n    kubectl --context=${INTEGRATION_TESTS_CTX:?} create clusterrolebinding kube-dns-admin --serviceaccount=kube-system:default --clusterrole=cluster-admin\n\n# Prep: load test library, save current k8s default context (and restore it at exit),\n# as kubeless doesn't support --context\nexport TEST_CONTEXT=${INTEGRATION_TESTS_CTX}\nsource script/libtest.bash\ntrap k8s_context_restore 0\nk8s_context_save\n\n# Run the tests thru bats:\nkubectl create namespace kubeless\ncase $INTEGRATION_TESTS_TARGET in\ndeployment)\n    bats tests/deployment-tests.bats\n    ;;\nbasic)\n    bats tests/integration-tests.bats\n    ;;\nhttp)\n    bats tests/integration-tests-http.bats\n    ;;\ncronjob)\n    bats tests/integration-tests-cronjob.bats\n    ;;\nprebuilt_functions)\n    bats tests/integration-tests-prebuilt.bats\n    ;;\n*)\n    bats tests/deployment-tests.bats && \\\n    bats tests/integration-tests.bats && \\\n    bats tests/integration-tests-http.bats && \\\n    bats tests/integration-tests-cronjob.bats\n    ;;\nesac\nexit_code=$?\n\n# Just showing remaining k8s objects\nkubectl get all --all-namespaces\n\nif [ ${exit_code} -ne 0 -o -n \"${TRAVIS_DUMP_LOGS}\" ]; then\n    echo \"INFO: Build ERRORed, dumping logs: ##\"\n    for ns in kubeless default; do\n        echo \"### LOGs: namespace: ${ns} ###\"\n        kubectl get pod -n ${ns} -oname|xargs -I@ sh -xc \"kubectl logs -n ${ns} @|sed 's|^|@: |'\"\n    done\n    echo \"INFO: Description\"\n    kubectl describe pod -l created-by=kubeless\n    echo \"INFO: LOGs: pod: kube-dns ###\"\n    kubectl logs -n kube-system -l k8s-app=kube-dns -c kubedns\n    echo \"INFO: LOGs: END\"\nfi\n[ ${exit_code} -eq 0 ] && echo \"INFO: $0: SUCCESS\" || echo \"ERROR: $0: FAILED\"\nexit ${exit_code}\n# vim: sw=4 ts=4 et si\n"
  },
  {
    "path": "script/libtest.bash",
    "content": "# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# k8s and kubeless helpers, specially \"wait\"-ers on pod ready/deleted/etc\n\nKUBELESS_MANIFEST=kubeless-non-rbac.yaml\nKUBELESS_MANIFEST_RBAC=kubeless.yaml\nKAFKA_MANIFEST=kafka-zookeeper.yaml\nNATS_MANIFEST=nats.yaml\nKINESIS_MANIFEST=kinesis.yaml\n\nKUBECTL_BIN=$(which kubectl)\n: ${KUBECTL_BIN:?ERROR: missing binary: kubectl}\n\nexport TEST_MAX_WAIT_SEC=300\n\n# Workaround 'bats' lack of forced output support, dup() stderr fd\nexec 9>&2\necho_info() {\n    test -z \"$TEST_DEBUG\" && return 0\n    echo \"INFO: $*\" >&9\n}\nexport -f echo_info\n\nkubectl() {\n    ${KUBECTL_BIN:?} --context=${TEST_CONTEXT:?} \"$@\"\n}\n\n## k8s specific Helper functions\nk8s_wait_for_pod_ready() {\n    echo_info \"Waiting for pod '${@}' to be ready ... \"\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n\n    # Retries just in case it is not stable\n    local -i successCount=0\n    while [ \"$successCount\" -lt \"3\" ]; do\n        if kubectl get pod \"${@}\" | grep -q Running; then\n            ((successCount=successCount+1))\n        fi\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n}\nk8s_wait_for_pod_count() {\n    local pod_cnt=${1:?}; shift\n    echo_info \"Waiting for pod '${@}' to have count==${pod_cnt} running ... \"\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    # Retries just in case it is not stable\n    local -i successCount=0\n    while [ \"$successCount\" -lt \"3\" ]; do\n        if [[ $(kubectl get pod \"${@}\" -ogo-template='{{.items|len}}') == ${pod_cnt} ]]; then\n            ((successCount=successCount+1))\n        fi\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n    k8s_wait_for_pod_ready \"${@}\"\n    echo \"Finished waiting\"\n}\nk8s_wait_for_uniq_pod() {\n    k8s_wait_for_pod_count 1 \"$@\"\n}\nk8s_wait_for_pod_gone() {\n    echo_info \"Waiting for pod '${@}' to be gone ... \"\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    until kubectl get pod \"${@}\" | grep -q No.resources.found; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n}\nk8s_wait_for_pod_logline() {\n    local string=\"${1:?}\"; shift\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    echo_info \"Waiting for '${@}' to show logline '${string}' ...\"\n    until kubectl logs \"${@}\"| grep -q \"${string}\"; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n}\nk8s_wait_for_cluster_ready() {\n    echo_info \"Waiting for k8s cluster to be ready (context=${TEST_CONTEXT}) ...\"\n    _wait_for_cmd_ok kubectl get po 2>/dev/null && \\\n    k8s_wait_for_pod_ready -n kube-system -l component=kube-addon-manager && \\\n    k8s_wait_for_pod_ready -n kube-system -l k8s-app=kube-dns && \\\n        return 0\n    return 1\n}\nk8s_log_all_pods() {\n    local namespaces=${*:?} ns\n    for ns in ${*}; do\n        echo \"### namespace: ${ns} ###\"\n        kubectl get pod -n ${ns} -oname|xargs -I@ sh -xc \"kubectl logs -n ${ns} @|sed 's|^|@: |'\"\n    done\n}\nk8s_context_save() {\n    TEST_CONTEXT_SAVED=$(${KUBECTL_BIN} config current-context)\n    # Kubeless doesn't support contexts yet, save+restore it\n    # Don't save current_context if it's the same already\n    [[ $TEST_CONTEXT_SAVED == $TEST_CONTEXT ]] && TEST_CONTEXT_SAVED=\"\"\n\n    # Save current_context\n    [[ $TEST_CONTEXT_SAVED != \"\" ]] && \\\n        echo_info \"Saved context: '${TEST_CONTEXT_SAVED}'\" && \\\n        ${KUBECTL_BIN} config use-context ${TEST_CONTEXT}\n}\nk8s_context_restore() {\n    # Restore saved context\n    [[ $TEST_CONTEXT_SAVED != \"\" ]] && \\\n        echo_info \"Restoring context: '${TEST_CONTEXT_SAVED}'\" && \\\n        ${KUBECTL_BIN} config use-context ${TEST_CONTEXT_SAVED}\n}\n_wait_for_cmd_ok() {\n    local cmd=\"${*:?}\"; shift\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    echo_info \"Waiting for '${*}' to successfully exit ...\"\n    until env ${cmd}; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n}\n\n## Specific for kubeless\nkubeless_recreate() {\n    local manifest_del=${1:?missing delete manifest} manifest_upd=${2:?missing update manifest}\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    echo_info \"Delete kubeless namespace, wait to be gone ... \"\n    kubectl delete -f ${manifest_del} || true\n    kubectl delete namespace kubeless >& /dev/null || true\n    while kubectl get namespace kubeless >& /dev/null; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n    kubectl create namespace kubeless\n    kubectl create -f ${manifest_upd}\n}\nkubeless_function_delete() {\n    local func=${1:?}; shift\n    echo_info \"Deleting function \"${func}\" in case still present ... \"\n    kubeless function ls |grep -w \"${func}\" && kubeless function delete \"${func}\" >& /dev/null || true\n    echo_info \"Wait for function \"${func}\" to be deleted \"\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    while kubectl get functions \"${func}\" >& /dev/null; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n}\nkubeless_kafka_trigger_delete() {\n    local trigger=${1:?}; shift\n    echo_info \"Deleting kafka trigger \"${trigger}\" in case still present ... \"\n    kubeless trigger kafka list |grep -w \"${trigger}\" && kubeless trigger kafka delete \"${trigger}\" >& /dev/null || true\n}\nkubeless_nats_trigger_delete() {\n    local trigger=${1:?}; shift\n    echo_info \"Deleting NATS trigger \"${trigger}\" in case still present ... \"\n    kubeless trigger nats list |grep -w \"${trigger}\" && kubeless trigger nats delete \"${trigger}\" >& /dev/null || true\n}    \nkubeless_function_deploy() {\n    local func=${1:?}; shift\n    echo_info \"Deploying function ...\"\n    kubeless function deploy ${func} ${@}\n}\n_wait_for_kubeless_controller_ready() {\n    echo_info \"Waiting for kubeless controller to be ready ... \"\n    k8s_wait_for_pod_ready -n kubeless -l kubeless=controller\n    _wait_for_cmd_ok kubectl get functions 2>/dev/null\n}\n_wait_for_kubeless_controller_logline() {\n    local string=\"${1:?}\"\n    k8s_wait_for_pod_logline \"${string}\" -n kubeless -l kubeless=controller -c kubeless-function-controller\n}\nwait_for_ingress() {\n    echo_info \"Waiting until Nginx pod is ready ...\"\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    until kubectl get pods -l name=nginx-ingress-controller -n kube-system>& /dev/null; do\n        ((cnt=cnt-1)) || exit 1\n        sleep 1\n    done\n}\nwait_for_kubeless_kafka_server_ready() {\n    [[ $(kubectl get pod -n kubeless kafka-0 -ojsonpath='{.metadata.annotations.ready}') == true ]] && return 0\n    echo_info \"Waiting for kafka-0 to be ready ...\"\n    k8s_wait_for_pod_logline \"Kafka.*Server.*started\" -n kubeless kafka-0\n    echo_info \"Waiting for kafka-trigger-controller pod to be ready ...\"\n    k8s_wait_for_pod_ready -n kubeless -l kubeless=kafka-trigger-controller\n    _wait_for_cmd_ok kubectl get kafkatriggers 2>/dev/null\n    kubectl annotate pods --overwrite -n kubeless kafka-0 ready=true\n}\nwait_for_kubeless_nats_operator_ready() {\n    echo_info \"Waiting for NATS operator pod to be ready ...\"\n    k8s_wait_for_pod_ready -n nats-io -l name=nats-operator\n}\nwait_for_kubeless_nats_cluster_ready() {\n    echo_info \"Waiting for NATS cluster pods to be ready ...\"\n    k8s_wait_for_pod_ready -n nats-io -l nats_cluster=nats\n}\nwait_for_kubeless_nats_controller_ready() {\n    echo_info \"Waiting for NATS controller pods to be ready ...\"\n    k8s_wait_for_pod_ready -n kubeless -l kubeless=nats-trigger-controller\n}\n_wait_for_kubeless_kafka_topic_ready() {\n    local topic=${1:?}\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    echo_info \"Waiting for kafka-0 topic='${topic}' to be ready ...\"\n    # zomg enter kafka-0 container to peek for topic already present\n    until \\\n        kubectl exec -n kubeless kafka-0 -- sh -c \\\n        '/opt/bitnami/kafka/bin/kafka-topics.sh --list --zookeeper $(\n            sed -n s/zookeeper.connect=//p /bitnami/kafka/conf/server.properties)'| \\\n                grep -qw ${topic}\n        do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n}\n_wait_for_simple_function_pod_ready() {\n    k8s_wait_for_pod_ready -l function=get-python\n}\n_deploy_simple_function() {\n    make -C examples get-python\n}\n_call_simple_function() {\n    # Artifact to dodge 'bats' lack of support for positively testing _for_ errors\n    case \"${1:?}\" in\n        1) make -C examples get-python-verify |  egrep Error.1;;\n        0) make -C examples get-python-verify;;\n    esac\n}\n_delete_simple_function() {\n    kubeless_function_delete get-python\n}\n\n## Entry points used by 'bats' tests:\nverify_k8s_tools() {\n    local tools=\"kubectl kubecfg kubeless\"\n    for exe in $tools; do\n        which ${exe} >/dev/null && continue\n        echo \"ERROR: '${exe}' needs to be installed\"\n        return 1\n    done\n}\nverify_rbac_mode() {\n    kubectl api-versions | grep -q rbac && return 0\n    echo \"ERROR: Please run w/RBAC, eg minikube as: minikube start --extra-config=apiserver.Authorization.Mode=RBAC\"\n    return 1\n}\n\nwait_for_endpoint() {\n    local func=${1:?}\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    local endpoint=$(kubectl get endpoints -l function=$func | grep $func | awk '{print $2}')\n    echo_info \"Waiting for the endpoint ${endpoint}' to be ready ...\"\n    until curl -s $endpoint; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n}\nwait_for_autoscale() {\n    local func=${1:?}\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    local hap=$()\n    echo_info \"Waiting for HAP ${func} to be ready ...\"\n    until kubectl get horizontalpodautoscalers | grep $func; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n}\nwait_for_job() {\n    local func=${1:?}\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    echo_info \"Waiting for build job of ${func} to be finished ...\"\n    until kubectl get job -l function=${func} -o yaml | grep \"succeeded: 1\"; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n}\ntest_must_fail_without_rbac_roles() {\n    echo_info \"RBAC TEST: function deploy/call must fail without RBAC roles\"\n    _delete_simple_function\n    kubeless_recreate $KUBELESS_MANIFEST_RBAC $KUBELESS_MANIFEST\n     _wait_for_kubeless_controller_logline \"User.*cannot\"\n}\nredeploy_with_rbac_roles() {\n    kubeless_recreate $KUBELESS_MANIFEST_RBAC $KUBELESS_MANIFEST_RBAC\n    _wait_for_kubeless_controller_ready\n    _wait_for_kubeless_controller_logline \"controller synced and ready\"\n}\n\ndeploy_kafka() {\n    echo_info \"Deploy kafka ... \"\n    kubectl create -f $KAFKA_MANIFEST\n}\n\ndeploy_nats_operator() {\n    echo_info \"Deploy NATS operator ... \"\n    kubectl apply -f https://raw.githubusercontent.com/nats-io/nats-operator/master/example/deployment-rbac.yaml\n}\n\ndeploy_nats_cluster() {\n    echo_info \"Deploy NATS cluster ... \"\n    kubectl apply -f ./manifests/nats/nats-cluster.yaml -n nats-io\n}\n\ndeploy_nats_trigger_controller() {\n    echo_info \"Deploy NATS trigger controller ... \"\n    kubectl create -f $NATS_MANIFEST\n}\n\nexpose_nats_service() {\n    kubectl get svc nats -n nats-io -o yaml | sed 's/ClusterIP/NodePort/' | kubectl replace -f -\n}\n\ndeploy_kinesis_trigger_controller() {\n    echo_info \"Deploy Kinesis trigger controller ... \"\n    kubectl create -f $KINESIS_MANIFEST\n}\n\nwait_for_kubeless_kinesis_controller_ready() {\n    echo_info \"Waiting for Kinesis trigger controller pods to be ready ...\"\n    k8s_wait_for_pod_ready -n kubeless -l kubeless=kinesis-trigger-controller\n}\n\ndeploy_kinesalite() {\n    echo_info \"Deploy Kinesalite a AWS Kinesis mock server ... \"\n    kubectl apply -f ./manifests/kinesis/kinesalite.yaml\n}\n\nwait_for_kinesalite_pod() {\n    echo_info \"Waiting for Kinesalite pod to be ready ...\"\n    k8s_wait_for_pod_ready -l app=kinesis\n}\n\ndeploy_function() {\n    local func=${1:?} func_topic\n    echo_info \"TEST: $func\"\n    kubeless_function_delete ${func}\n    make -sC examples ${func}\n}\n\ndeploy_kafka_trigger() {\n    local trigger=${1:?}\n    echo_info \"TEST: $trigger\"\n    kubeless_kafka_trigger_delete ${trigger}\n    make -sC examples ${trigger}\n}\n\ndeploy_nats_trigger() {\n    local trigger=${1:?}\n    echo_info \"TEST: $trigger\"\n    kubeless_nats_trigger_delete ${trigger}\n    make -sC examples ${trigger}\n}\n\nverify_function() {\n    local func=${1:?}\n    local make_task=${2:-${func}-verify}\n    echo_info \"Init logs: $(kubectl logs -l function=${func} -c prepare)\"\n    k8s_wait_for_pod_ready -l function=${func}\n    case \"${func}\" in\n        *pubsub*)\n            func_topic=$(kubectl get kafkatrigger \"${func}\" -o yaml|sed -n 's/topic: //p')\n            echo_info \"FUNC TOPIC: $func_topic\"\n    esac\n    local -i counter=0\n    until make -sC examples ${make_task}; do\n        echo_info \"FUNC ${func} failed. Retrying...\"\n        ((counter=counter+1))\n        if [ \"$counter\" -ge 3 ]; then\n            echo_info \"FUNC ${func} failed ${counter} times. Exiting\"\n            return 1;\n        fi\n        sleep `expr 10 \\* $counter`\n    done\n}\ntest_kubeless_function() {\n    local func=${1:?}\n    deploy_function $func\n    verify_function $func\n}\nupdate_function() {\n    local func=${1:?} func_topic\n    echo_info \"UPDATE: $func\"\n    make -sC examples ${func}-update\n    sleep 10\n    k8s_wait_for_uniq_pod -l function=${func}\n}\nrestart_function() {\n    local func=${1:?}\n    echo_info \"Restarting: $func\"\n    kubectl delete pod -l function=${func}\n    k8s_wait_for_uniq_pod -l function=${func}\n}\ntest_kubeless_function_update() {\n    local func=${1:?}\n    update_function $func\n    verify_function $func ${func}-update-verify\n}\ncreate_basic_auth_secret() {\n    local secret=${1:?}; shift\n    htpasswd -cb auth foo bar\n    kubectl create secret generic $secret --from-file=auth\n}\ncreate_tls_secret_from_key_cert() {\n    local secret=${1:?}; shift\n    openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj \"/CN=foo.bar.com\"\n    kubectl create secret tls $secret --key /tmp/tls.key --cert /tmp/tls.crt\n}\ncreate_http_trigger_with_tls_secret(){\n    local func=${1:?}; shift\n    local domain=${1-\"\"};\n    local subpath=${2-\"\"};\n    local secret=${3-\"\"};\n    delete_http_trigger ${func}\n    echo_info \"TEST: Creating HTTP trigger\"\n    local command=\"kubeless trigger http create ing-${func} --function-name ${func}\"\n    if [ -n \"$domain\" ]; then\n        command=\"$command --hostname ${domain}\"\n    fi\n    if [ -n \"$subpath\" ]; then\n        command=\"$command --path ${subpath}\"\n    fi\n    if [ -n \"$secret\" ]; then\n        command=\"$command --tls-secret ${secret}\"\n    fi\n    eval $command\n}\ncreate_http_trigger(){\n    local func=${1:?}; shift\n    local domain=${1-\"\"};\n    local subpath=${2-\"\"};\n    local basicauth=${3-\"\"};\n    local gateway=${4-\"\"};\n    delete_http_trigger ${func}\n    echo_info \"TEST: Creating HTTP trigger\"\n    local command=\"kubeless trigger http create ing-${func} --function-name ${func}\"\n    if [ -n \"$domain\" ]; then\n        command=\"$command --hostname ${domain}\"\n    fi\n    if [ -n \"$subpath\" ]; then\n        command=\"$command --path ${subpath}\"\n    fi\n    if [ -n \"$basicauth\" ]; then\n        command=\"$command --basic-auth-secret ${basicauth}\"\n    fi\n    if [ -n \"$gateway\" ]; then\n        command=\"$command --gateway ${gateway}\"\n    fi\n    eval $command\n}\nupdate_http_trigger(){\n    local func=${1:?}; shift\n    local domain=${1:-\"\"}\n    local subpath=${2:-\"\"};\n    echo_info \"TEST: Updating HTTP trigger\"\n    local command=\"kubeless trigger http update ing-${func} --function-name ${func}\"\n    if [ -n \"$domain\" ]; then\n        command=\"$command --hostname ${domain}\"\n    fi\n    if [ -n \"$subpath\" ]; then\n        command=\"$command --path ${subpath}\"\n    fi\n    eval $command\n}\nverify_http_trigger(){\n    local func=${1:?}; shift\n    local ip=${1:?}; shift\n    local expected_response=${1:?}; shift\n    local domain=${1:?}; shift\n    local subpath=${1:-\"\"};\n    kubeless trigger http list | grep ${func}\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    echo_info \"Waiting for ingress to be ready...\"\n    until kubectl get ingress | grep $func | grep \"$domain\" | awk '{print $3}' | grep \"$ip\"; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n    sleep 3\n    curl -vv --header \"Host: $domain\" $ip\\/$subpath | grep \"${expected_response}\"\n}\nverify_http_trigger_basic_auth(){\n    local func=${1:?}; shift\n    local ip=${1:?}; shift\n    local expected_response=${1:?}; shift\n    local domain=${1:?}; shift\n    local subpath=${1:?}; shift\n    local auth=${1:-\"\"};\n    kubeless trigger http list | grep ${func}\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    echo_info \"Waiting for ingress to be ready...\"\n    until kubectl get ingress | grep $func | grep \"$domain\" | awk '{print $3}' | grep \"$ip\"; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n    sleep 3\n    curl -v --header \"Host: $domain\" $ip\\/$subpath | grep \"401 Authorization Required\"\n    curl -v --header \"Host: $domain\" -u $auth $ip\\/$subpath | grep \"${expected_response}\"\n}\nverify_https_trigger(){\n    local func=${1:?}; shift\n    local ip=${1:?}; shift\n    local expected_response=${1:?}; shift\n    local domain=${1:?}; shift\n    local subpath=${1:-\"\"};\n    kubeless trigger http list | grep ${func}\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    echo_info \"Waiting for ingress to be ready...\"\n    until kubectl get ingress | grep $func | grep \"$domain\" | awk '{print $3}' | grep \"$ip\"; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n    sleep 3\n    curl -k -vv --header \"Host: $domain\" https:\\/\\/$ip\\/$subpath | grep \"${expected_response}\"\n}\ndelete_http_trigger() {\n    local func=${1:?}; shift\n    kubeless trigger http list |grep -w ing-${func} && kubeless trigger http delete ing-${func} >& /dev/null || true\n}\ncreate_cronjob_trigger(){\n    local func=${1:?}; shift\n    local schedule=${1:?};\n    delete_cronjob_trigger ${func}\n    echo_info \"TEST: Creating CronJob trigger\"\n    kubeless trigger cronjob create ${func} --function ${func} --schedule \"${schedule}\"\n}\nupdate_cronjob_trigger(){\n    local func=${1:?}; shift\n    local schedule=${1:?};\n    echo_info \"TEST: Updating CronJob trigger\"\n    kubeless trigger cronjob update ${func} --function ${func} --schedule \"${schedule}\"\n}\nverify_cronjob_trigger(){\n    local func=${1:?}; shift\n    local schedule=${1:?}; shift\n    local expected_log=${1:?}\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    kubeless trigger cronjob list | grep ${func} | grep \"${schedule}\"\n    echo_info \"Waiting for CronJob to be executed...\"\n    until kubectl logs -l function=${func} | grep \"$expected_log\"; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n    done\n}\ndelete_cronjob_trigger() {\n    local func=${1:?}; shift\n    kubeless trigger cronjob list |grep -w ${func} && kubeless trigger cronjob delete ${func} >& /dev/null || true\n}\ntest_kubeless_autoscale() {\n    local func=${1:?} exp_autoscale act_autoscale\n    # Use some fixed values\n    local val=10 num=3\n    echo_info \"TEST: autoscale ${func}\"\n    kubeless autoscale create ${func} --value ${val:?} --min ${num:?} --max ${num:?}\n    wait_for_autoscale ${func}\n    kubeless autoscale list | fgrep -w ${func}\n    act_autoscale=$(kubectl get horizontalpodautoscaler -ojsonpath='{range .items[*].spec}{@.scaleTargetRef.name}:{@.targetCPUUtilizationPercentage}:{@.minReplicas}:{@.maxReplicas}{end}')\n    exp_autoscale=\"${func}:${val}:${num}:${num}\"\n    [[ ${act_autoscale} == ${exp_autoscale} ]]\n    k8s_wait_for_pod_count ${num} -l function=\"${func}\"\n    kubeless autoscale delete ${func}\n}\ntest_topic_deletion() {\n    local topic=$RANDOM\n    local topic_count=0\n    kubeless topic create $topic\n    kubeless topic delete $topic\n    topic_count=$(kubeless topic list | grep $topic | wc -l)\n    if [ ${topic_count} -gt 0 ] ; then\n     echo_info \"Topic $topic still exists\"\n     exit 200\n    fi\n}\nsts_restart() {\n    local num=1\n    kubectl delete pod kafka-0 -n kubeless\n    kubectl delete pod zoo-0 -n kubeless\n    k8s_wait_for_uniq_pod -l kubeless=zookeeper -n kubeless\n    k8s_wait_for_uniq_pod -l kubeless=kafka -n kubeless\n    wait_for_kubeless_kafka_server_ready\n}\nverify_clean_object() {\n    local type=${1:?}; shift\n    local name=${1:?}; shift\n    echo_info \"Checking if \"${type}\" exists for function \"${name}\"... \"\n    local -i cnt=${TEST_MAX_WAIT_SEC:?}\n    until [[ ! $(kubectl get ${type} 2>&1 | grep ${name}) ]]; do\n        ((cnt=cnt-1)) || return 1\n        sleep 1\n        echo_info \"$(kubectl get ${type} 2>&1 | grep ${name})\"\n    done\n    echo_info \"${type}/${name} is gone\"\n}\n# vim: sw=4 ts=4 et si\n"
  },
  {
    "path": "script/pull-or-build-image.sh",
    "content": "#!/bin/bash\n\nset -e\n\nTARGET=${1:?}\n\nfunction push() {\n    local image=${1:?}\n    if [[ -n \"$DOCKER_USERNAME\" && -n \"$DOCKER_PASSWORD\" ]]; then\n        docker login -u=\"$DOCKER_USERNAME\" -p=\"$DOCKER_PASSWORD\" \n        docker push $image\n    fi\n}\n\ncase \"${TARGET}\" in\n    \"function-controller\")\n      image=${CONTROLLER_IMAGE:?}\n      docker pull $image || make $TARGET CONTROLLER_IMAGE=$image\n      push $image\n      ;;\n    \"function-image-builder\")\n      image=${FUNCTION_IMAGE_BUILDER:?}\n      docker pull $image || make $TARGET FUNCTION_IMAGE_BUILDER=$image\n      push $image\n      ;;\n    \"default\")\n      echo \"Unsupported target\"\n      exit 1\nesac\n"
  },
  {
    "path": "script/release_utils.sh",
    "content": "#!/bin/bash\nset -e\n\nfunction commit_list {\n  local tag=${1:?}\n  local repo_domain=${2:?}\n  local repo_name=${3:?}\n  git fetch --tags\n  local previous_tag=`curl -H \"Authorization: token $ACCESS_TOKEN\" -s https://api.github.com/repos/$repo_domain/$repo_name/tags | jq --raw-output '.[1].name'`\n  local release_notes=`git log $previous_tag..$tag --oneline`\n  local parsed_release_notes=$(echo \"$release_notes\" | sed -n -e 'H;${x;s/\\n/\\\\n- /g;s/^\\\\n//;s/\"/\\\\\"/g;p;}')\n  echo $parsed_release_notes\n}\n\nfunction get_release_notes {\n  local tag=${1:?}\n  local repo_domain=${2:?}\n  local repo_name=${3:?}\n  commits=`commit_list $tag $repo_domain $repo_name`\n  notes=$(echo \"\\\nThis release includes the following commits and features:\\\\n\\\n$commits\\\\n\\\\n\\\nTo install this latest version, use the manifest that is part of the release:\\\\n\\\n\\\\n\\\n**WITH RBAC ENABLED:**\\\\n\\\n\\\\n\\\n\\`\\`\\`console\\\\n\\\nkubectl create ns kubeless\\\\n\\\nkubectl create -f https://github.com/kubeless/kubeless/releases/download/$tag/kubeless-$tag.yaml \\\\n\\\n\\`\\`\\`\\\\n\\\n\\\\n\\\n**WITHOUT RBAC:**\\\\n\\\n\\\\n\\\n\\`\\`\\`console\\\\n\\\nkubectl create ns kubeless\\\\n\\\nkubectl create -f https://github.com/kubeless/kubeless/releases/download/$tag/kubeless-non-rbac-$tag.yaml \\\\n\\\n\\`\\`\\`\\\\n\\\n**OPENSHIFT:**\\\\n\\\n\\\\n\\\n\\`\\`\\`console\\\\n\\\noc create ns kubeless\\\\n\\\noc create -f https://github.com/kubeless/kubeless/releases/download/$tag/kubeless-openshift-$tag.yaml \\\\n\\\n# Kafka\\\\n\\\noc create -f https://github.com/kubeless/kubeless/releases/download/$tag/kafka-zookeeper-openshift-$tag.yaml \\\\n\\\n\\`\\`\\`\\\\n\\\n\")\n  echo \"${notes}\"\n}\n\nfunction get_release_body {\n  local tag=${1:?}\n  local repo_domain=${2:?}\n  local repo_name=${3:?}\n  local release_notes=$(get_release_notes $tag $repo_domain $repo_name)\n  echo '{\n    \"tag_name\": \"'$tag'\",\n    \"target_commitish\": \"master\",\n    \"name\": \"'$tag'\",\n    \"body\": \"'$release_notes'\",\n    \"draft\": true,\n    \"prerelease\": false\n  }'\n}\n\nfunction update_release_tag {\n  local tag=${1:?}\n  local repo_domain=${2:?}\n  local repo_name=${3:?}\n  local release_id=$(curl -H \"Authorization: token $ACCESS_TOKEN\" -s https://api.github.com/repos/$repo_domain/$repo_name/releases | jq  --raw-output '.[0].id')\n  local body=$(get_release_body $tag $repo_domain $repo_name)\n  local release=`curl -H \"Authorization: token $ACCESS_TOKEN\" -s --request PATCH --data $body  https://api.github.com/repos/$repo_domain/$repo_name/releases/$release_id`\n  echo $release\n}\n\nfunction release_tag {\n  local tag=$1\n  local repo_domain=${2:?}\n  local repo_name=${3:?}\n  local body=$(get_release_body $tag $repo_domain $repo_name)\n  local release=`curl -H \"Authorization: token $ACCESS_TOKEN\" -s --request POST --data \"$body\" https://api.github.com/repos/$repo_domain/$repo_name/releases`\n  echo $release\n}\n\nfunction upload_asset {\n  local repo_domain=${1:?}\n  local repo_name=${2:?}\n  local release_id=${3:?}\n  local asset=${4:?}\n  local filename=$(basename $asset)\n  if [[ \"$filename\" == *\".zip\" ]]; then\n    local content_type=\"application/zip\"\n  elif [[ \"$filename\" == *\".yaml\" ]]; then\n    local content_type=\"text/yaml\"\n  fi\n  curl -H \"Authorization: token $ACCESS_TOKEN\" \\\n    -H \"Content-Type: $content_type\" \\\n    --data-binary @\"$asset\" \\\n    \"https://uploads.github.com/repos/$repo_domain/$repo_name/releases/$release_id/assets?name=$filename\"\n}\n"
  },
  {
    "path": "script/start-gke-env.sh",
    "content": "#!/bin/bash\n\nCLUSTER=${1:?}\nZONE=${2:?}\nBRANCH=${3:?}\nADMIN=${4:?}\n\n# Resolve latest version from a branch\nVERSION=$(gcloud container get-server-config --zone $ZONE --format='yaml(validMasterVersions)' 2> /dev/null | grep $BRANCH | awk '{print $2}' | head -n 1)\n\nfunction clean() {\n    local resource=${1:?}\n    kubectl get $resource | awk '{print $1}' | xargs kubectl delete $resource || true\n}\nif ! gcloud container clusters list; then\n    echo \"Unable to access gcloud project\"\n    exit 1\nfi\n\nif gcloud container clusters list | grep -q $CLUSTER; then\n    echo \"GKE cluster already exits. Deleting resources\"\n    # Cluster already exists, make sure it is clean\n    gcloud container clusters get-credentials $CLUSTER --zone $ZONE\n    kubectl delete ns kubeless || true\n    resources=(\n        cronjobs\n        jobs\n        deployments\n        horizontalpodautoscalers\n    )\n    for res in \"${resources[@]}\"; do\n        clean $res\n    done\n\n    echo \"Removing clusterroles\"  >&9\n    kubectl delete clusterrole kubeless-controller-deployer || true\n    kubectl delete clusterrole kafka-controller-deployer || true\n    kubectl delete clusterrolebindings kubeless-controller-deployer || true\n    kubectl delete clusterrolebindings kafka-controller-deployer || true\n\n    echo \"Removing customresourcecleanup.apiextensions.k8s.io finalizer from CRD's\"  >&9\n    kubectl patch crd/functions.kubeless.io -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge       || true\n    kubectl patch crd/cronjobtriggers.kubeless.io -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge || true\n    kubectl patch crd/httptriggers.kubeless.io -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge    || true\n    kubectl patch crd/kafkatriggers.kubeless.io -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge   || true\n\n    echo \"Removing finalizers from CRD object's and deleting the CRD objects\"  >&9\n    functions=$(kubectl get functions -o name)\n    for func in $functions; do\n        kubectl patch $func -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge || true\n        kubectl delete $func\n    done\n    cronjobtriggers=$(kubectl get cronjobtriggers -o name)\n    for trigger in $cronjobtriggers; do\n        kubectl patch $trigger -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge || true\n        kubectl delete $trigger\n    done\n    httptriggers=$(kubectl get httptriggers -o name)\n    for trigger in $httptriggers; do\n        kubectl patch $trigger -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge || true\n        kubectl delete $trigger        \n    done\n    kafkatriggers=$(kubectl get kafkatriggers -o name)\n    for trigger in $kafkatriggers; do\n        kubectl patch $trigger -p '{\"metadata\":{\"finalizers\":[]}}' --type=merge || true\n        kubectl delete $trigger        \n    done\n\n    echo \"Deleting CRD's\"  >&9\n    kubectl delete crd functions.kubeless.io       || true\n    kubectl delete crd cronjobtriggers.kubeless.io || true\n    kubectl delete crd httptriggers.kubeless.io    || true\n    kubectl delete crd kafkatriggers.kubeless.io   || true\nelse\n    echo \"Creating cluster $CLUSTER in $ZONE (v$VERSION)\"\n    gcloud container clusters create --cluster-version=$VERSION --zone $ZONE $CLUSTER --num-nodes 5 --machine-type=n1-standard-2\n    # Wait for the cluster to respond\n    cnt=20\n    until kubectl get pods; do\n        ((cnt=cnt-1)) || (echo \"Waited 20 seconds but cluster is not reachable\" && return 1)\n        sleep 1\n    done\n    kubectl create clusterrolebinding kubeless-cluster-admin --clusterrole=cluster-admin --user=$ADMIN\nfi\n\n"
  },
  {
    "path": "script/start-test-environment.sh",
    "content": "#!/bin/bash\nset -e\nSCRIPT=$0\nif [ -h $SCRIPT ]; then\n    SCRIPT=`readlink $SCRIPT`\nfi\nROOTDIR=`cd $(dirname $SCRIPT)/.. && pwd`\n\nCOMMAND=\"${@:-bash}\"\n\nif ! minikube status | grep -q \"minikube: $\"; then\n  echo \"Unable to start the test environment with an existing instance of minikube\"\n  echo \"Delete the current profile executing 'minikube delete' or create a new one\"\n  echo \"executing 'minikube profile new_profile'\"\n  exit 1\nfi\n\nminikube start --extra-config=apiserver.authorization-mode=RBAC --insecure-registry 0.0.0.0/0\neval $(minikube docker-env)\n\nCONTEXT=$(kubectl config current-context)\n\n# Both RBAC'd dind and minikube seem to be missing rules to make kube-dns work properly\n# add some (granted) broad ones:\nkubectl --context=${CONTEXT} get clusterrolebinding kube-dns-admin >& /dev/null || \\\n    kubectl --context=${CONTEXT} create clusterrolebinding kube-dns-admin --serviceaccount=kube-system:default --clusterrole=cluster-admin\n\ndocker run --privileged -it \\\n  -v $ROOTDIR:/go/src/github.com/kubeless/kubeless \\\n  -v $HOME/.kube:/root/.kube \\\n  -v $HOME/.minikube:$HOME/.minikube \\\n  -e TEST_CONTEXT=$(kubectl config current-context) \\\n  -e TEST_DEBUG=1 \\\n  kubeless/dev-environment:latest bash -c \"$COMMAND\"\n"
  },
  {
    "path": "script/upload_release_notes.sh",
    "content": "#!/bin/bash\nset -e\n\nREPO_NAME=kubeless\nREPO_DOMAIN=kubeless\n\nsource $(dirname $0)/release_utils.sh\n\nif [[ -z \"$REPO_NAME\" || -z \"$REPO_DOMAIN\" ]]; then\n  echo \"Github repository not specified\" > /dev/stderr\n  exit 1\nfi\n\nif [[ -z \"$ACCESS_TOKEN\" ]]; then\n  echo \"Unable to release: Github Token not specified\" > /dev/stderr\n  exit 1\nfi\n\nrepo_check=`curl -H \"Authorization: token $ACCESS_TOKEN\" -s https://api.github.com/repos/$REPO_DOMAIN/$REPO_NAME`\nif [[ $repo_check == *\"Not Found\"* ]]; then\n  echo \"Not found a Github repository for $REPO_DOMAIN/$REPO_NAME, it is not possible to publish it\" > /dev/stderr\n  exit 1\nelse\n  update_release_tag $1 $REPO_DOMAIN $REPO_DOMAIN\nfi\n"
  },
  {
    "path": "script/validate-git-marks",
    "content": "#!/usr/bin/env bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nsource \"$(dirname \"$BASH_SOURCE\")/.validate\"\n\n# folders=$(find * -type d | egrep -v '^Godeps|bundles|.git')\n\nIFS=$'\\n'\nfiles=( $(validate_diff --diff-filter=ACMR --name-only -- '*' | grep -v '^vendor/' || true) )\nunset IFS\n\nbadFiles=()\nfor f in \"${files[@]}\"; do\n    if [ $(grep -r \"^<<<<<<<\" $f) ]; then\n        badFiles+=( \"$f\" )\n        continue\n    fi\n\n    if [ $(grep -r \"^>>>>>>>\" $f) ]; then\n        badFiles+=( \"$f\" )\n        continue\n    fi\n\n    if [ $(grep -r \"^=======$\" $f) ]; then\n        badFiles+=( \"$f\" )\n        continue\n    fi\n    set -e\ndone\n\n\nif [ ${#badFiles[@]} -eq 0 ]; then\n\techo 'Congratulations!  There is no conflict.'\nelse\n\t{\n\t\techo \"There is trace of conflict(s) in the following files :\"\n\t\tfor f in \"${badFiles[@]}\"; do\n\t\t\techo \" - $f\"\n\t\tdone\n\t\techo\n\t\techo 'Please fix the conflict(s) commit the result.'\n\t\techo\n\t} >&2\n\tfalse\nfi\n"
  },
  {
    "path": "script/validate-gofmt",
    "content": "#!/bin/bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nsource \"$(dirname \"$BASH_SOURCE\")/.validate\"\n\nIFS=$'\\n'\nfiles=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/\\|kubeless.tpl.go' || true) )\nunset IFS\n\nbadFiles=()\nfor f in \"${files[@]}\"; do\n\t# we use \"git show\" here to validate that what's committed is formatted\n\tif [ \"$(git show \"$VALIDATE_HEAD:$f\" | gofmt -s -l)\" ]; then\n\t\tbadFiles+=( \"$f\" )\n\tfi\ndone\n\nif [ ${#badFiles[@]} -eq 0 ]; then\n\techo 'Congratulations!  All Go source files are properly formatted.'\nelse\n\t{\n\t\techo \"These files are not properly gofmt'd:\"\n\t\tfor f in \"${badFiles[@]}\"; do\n\t\t\techo \" - $f\"\n\t\tdone\n\t\techo\n\t\techo 'Please reformat the above files using \"gofmt -s -w\" and commit the result.'\n\t\techo\n\t} >&2\n\tfalse\nfi\n"
  },
  {
    "path": "script/validate-lint",
    "content": "#!/usr/bin/env bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nsource \"$(dirname \"$BASH_SOURCE\")/.validate\"\n\n# We will eventually get to the point where packages should be the complete list\n# of subpackages, vendoring excluded, as given by:\n#\nIFS=$'\\n'\nfiles=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/\\|^pkg/client/\\|^pkg/apis/kubeless/v1beta1/zz_generated.deepcopy.go\\|^integration\\|kubeless.tpl.go' || true) )\nunset IFS\n\nerrors=()\nfor f in \"${files[@]}\"; do\n\t# we use \"git show\" here to validate that what's committed passes go lint\n\tfailedLint=$(golint \"$f\")\n\tif [ \"$failedLint\" ]; then\n\t\terrors+=( \"$failedLint\" )\n\tfi\ndone\n\nif [ ${#errors[@]} -eq 0 ]; then\n\techo 'Congratulations!  All Go source files have been linted.'\nelse\n\t{\n\t\techo \"Errors from golint:\"\n\t\tfor err in \"${errors[@]}\"; do\n\t\t\techo \"$err\"\n\t\tdone\n\t\techo\n\t\techo 'Please fix the above errors. You can test via \"golint\" and commit the result.'\n\t\techo\n\t} >&2\n\tfalse\nfi\n"
  },
  {
    "path": "script/validate-test",
    "content": "#!/usr/bin/env bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# TODO: Simplify once `./...` ignores `vendor/`\n\ngo test \\\n   github.com/kubeless/kubeless/cmd/... \\\n   github.com/kubeless/kubeless/pkg/... \\\n   github.com/kubeless/kubeless/version/...\n"
  },
  {
    "path": "script/validate-vet",
    "content": "#!/usr/bin/env bash\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nsource \"$(dirname \"$BASH_SOURCE\")/.validate\"\n\nIFS=$'\\n'\nfiles=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/\\|kubeless.tpl.go' || true) )\nunset IFS\n\nfailed=0\nfor f in \"${files[@]}\"; do\n\t# we use \"git show\" here to validate that what's committed passes go tool vet\n\tif ! go vet \"$f\"; then\n\t\tfailed=1\n\tfi\ndone\n\nexit $failed\n"
  },
  {
    "path": "tests/deployment-tests.bats",
    "content": "# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nload ../script/libtest\n\n@test \"Verify TEST_CONTEXT envvar\" {\n  : ${TEST_CONTEXT:?}\n}\n@test \"Verify needed kubernetes tools installed\" {\n  verify_k8s_tools\n}\n@test \"Verify k8s RBAC mode\" {\n  verify_rbac_mode\n}\n@test \"Test simple function failure without RBAC rules\" {\n  test_must_fail_without_rbac_roles\n}\n@test \"Redeploy with proper RBAC rules\" {\n  redeploy_with_rbac_roles\n}\n"
  },
  {
    "path": "tests/integration-tests-cronjob.bats",
    "content": "#!/usr/bin/env bats\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nload ../script/libtest\n\n@test \"Create Cronjob Trigger\" {\n    deploy_function get-python\n    verify_function get-python\n    create_cronjob_trigger get-python '* * * * *'\n    verify_cronjob_trigger get-python '* * * * *' '\"GET / HTTP/1.1\" 200'\n    update_cronjob_trigger get-python '*/60 * * * *'\n    verify_cronjob_trigger get-python '*/60 * * * *' '\"GET / HTTP/1.1\" 200'\n    delete_cronjob_trigger get-python\n    verify_clean_object cronjobtrigger get-python\n}\n \n@test \"Test no-errors\" {\n  if kubectl logs -n kubeless -l kubeless=controller | grep \"level=error\"; then\n    echo \"Found errors in the controller logs\"\n    false\n  fi\n}\n"
  },
  {
    "path": "tests/integration-tests-http.bats",
    "content": "#!/usr/bin/env bats\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nload ../script/libtest\n\n@test \"Wait for Ingress\" {\n  wait_for_ingress\n}\n\n@test \"Create HTTP Trigger\" {\n    deploy_function get-python\n    verify_function get-python\n    create_http_trigger get-python \"test.domain\"\n    verify_http_trigger get-python $(minikube ip) \"hello.*world\" \"test.domain\"\n    update_http_trigger get-python \"test.domain-updated\"\n    verify_http_trigger get-python $(minikube ip) \"hello.*world\" \"test.domain-updated\"\n    delete_http_trigger get-python\n    verify_clean_object httptrigger ing-get-python\n    verify_clean_object ingress ing-get-python\n}\n\n@test \"Create HTTP Trigger with a path\" {\n    deploy_function get-python\n    verify_function get-python\n    create_http_trigger get-python \"test.domain\" \"get-python\"\n    verify_http_trigger get-python $(minikube ip) \"hello.*world\" \"test.domain\" \"get-python\"\n    delete_http_trigger get-python\n    verify_clean_object httptrigger ing-get-python\n    verify_clean_object ingress ing-get-python\n}\n\n@test \"Create HTTP Trigger with TLS private key and certificate\" {\n    deploy_function get-python\n    verify_function get-python\n    create_tls_secret_from_key_cert foo-secret\n    create_http_trigger_with_tls_secret get-python \"foo.bar.com\" \"get-python\" \"foo-secret\"\n    verify_https_trigger get-python $(minikube ip) \"hello.*world\" \"foo.bar.com\" \"get-python\"\n    delete_http_trigger get-python\n    verify_clean_object httptrigger ing-get-python\n    verify_clean_object ingress ing-get-python\n}\n\n@test \"Create HTTP Trigger with basic auth\" {\n    deploy_function get-python\n    verify_function get-python\n    create_basic_auth_secret \"basic-auth\"\n    create_http_trigger get-python \"test.domain\"  \"get-python\" \"basic-auth\" \"nginx\"\n    verify_http_trigger_basic_auth get-python $(minikube ip) \"hello.*world\" \"test.domain\" \"get-python\" \"foo:bar\"\n    delete_http_trigger get-python\n    verify_clean_object httptrigger ing-get-python\n    verify_clean_object ingress ing-get-python\n}\n\n@test \"Test no-errors\" {\n  if kubectl logs -n kubeless -l kubeless=controller | grep \"level=error\"; then\n    echo \"Found errors in the controller logs\"\n    false\n  fi\n}\n"
  },
  {
    "path": "tests/integration-tests-kafka.bats",
    "content": "#!/usr/bin/env bats\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nload ../script/libtest\n\n# 'bats' lacks loop support, unroll-them-all ->\n@test \"Wait for kafka\" {\n  deploy_kafka\n  wait_for_kubeless_kafka_server_ready\n}\n@test \"Test function: pubsub-python\" {\n  deploy_function pubsub-python\n  verify_function pubsub-python\n  kubeless_function_delete pubsub-python\n}\n@test \"Test function: pubsub-python34\" {\n  deploy_function pubsub-python34\n  verify_function pubsub-python34\n  kubeless_function_delete pubsub-python34\n}\n@test \"Test 1:n association between Kafka trigger and functions\" {\n  deploy_function kafka-python-func1-topic-s3-python\n  deploy_function kafka-python-func2-topic-s3-python\n  deploy_kafka_trigger s3-python-kafka-trigger\n  verify_function kafka-python-func1-topic-s3-python\n  verify_function kafka-python-func2-topic-s3-python\n  kubeless_function_delete kafka-python-func1-topic-s3-python\n  kubeless_function_delete kafka-python-func2-topic-s3-python\n}\n@test \"Test function: pubsub-nodejs\" {\n  deploy_function pubsub-nodejs\n  verify_function pubsub-nodejs\n  test_kubeless_function_update pubsub-nodejs\n  kubeless_function_delete pubsub-nodejs\n}\n@test \"Test function: pubsub-ruby\" {\n  deploy_function pubsub-ruby\n  verify_function pubsub-ruby\n  kubeless_function_delete pubsub-ruby\n}\n@test \"Test function: pubsub-go\" {\n  deploy_function pubsub-go\n  verify_function pubsub-go\n  kubeless_function_delete pubsub-go\n}\n@test \"Test topic list\" {\n  wait_for_kubeless_kafka_server_ready\n  for topic in topic1 topic2; do\n    kubeless topic create $topic\n    _wait_for_kubeless_kafka_topic_ready $topic\n  done\n\n  kubeless topic list >$BATS_TMPDIR/kubeless-topic-list\n  grep -qxF topic1 $BATS_TMPDIR/kubeless-topic-list\n  grep -qxF topic2 $BATS_TMPDIR/kubeless-topic-list\n}\n@test \"Test topic deletion\" {\n  test_topic_deletion\n}\n@test \"Verify Kafka after restart (if context=='minikube')\" {\n    local topic=$RANDOM\n    kubeless topic create $topic\n    sts_restart\n    kubeless topic list | grep $topic\n}\n# vim: ts=2 sw=2 si et syntax=sh\n"
  },
  {
    "path": "tests/integration-tests-kinesis.bats",
    "content": "#!/usr/bin/env bats\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nload ../script/libtest\n\n# 'bats' lacks loop support, unroll-them-all ->\n@test \"Deploy and wait for Kinesalite\" {\n  deploy_kinesis_trigger_controller\n  wait_for_kubeless_kinesis_controller_ready\n  deploy_kinesalite\n  wait_for_kinesalite_pod\n}\n@test \"Test function: stream-python-kinesis\" {\n  deploy_function python-kinesis\n  verify_function python-kinesis\n  kubeless_function_delete python-kinesis\n}\n@test \"Test function: stream-multi-record-pubish-python-kinesis\" {\n  deploy_function python-kinesis-multi-record\n  verify_function python-kinesis-multi-record\n  kubeless_function_delete python-kinesis-multi-record\n}\n"
  },
  {
    "path": "tests/integration-tests-nats.bats",
    "content": "#!/usr/bin/env bats\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nload ../script/libtest\n\n# 'bats' lacks loop support, unroll-them-all ->\n@test \"Deploy and wait for NATS\" {\n  deploy_nats_operator\n  wait_for_kubeless_nats_operator_ready\n  deploy_nats_trigger_controller\n  wait_for_kubeless_nats_controller_ready\n  deploy_nats_cluster\n  wait_for_kubeless_nats_cluster_ready\n  expose_nats_service\n}\n@test \"Test function: pubsub-python-nats\" {\n  deploy_function python-nats\n  verify_function python-nats\n  kubeless_function_delete python-nats\n}\n@test \"Test 1:n association between NATS trigger and functions\" {\n  deploy_function nats-python-func1-topic-test\n  deploy_function nats-python-func2-topic-test\n  deploy_nats_trigger nats-python-trigger-topic-test\n  verify_function nats-python-func1-topic-test\n  verify_function nats-python-func2-topic-test\n  kubeless_function_delete nats-python-func1-topic-test\n  kubeless_function_delete nats-python-func2-topic-test\n}\n@test \"Test 1:n association between function and NATS triggers\" {\n  deploy_function nats-python-func-multi-topic\n  deploy_nats_trigger nats-python-trigger-topic1\n  deploy_nats_trigger nats-python-trigger-topic2\n  verify_function nats-python-func-multi-topic\n  kubeless_function_delete nats-python-func-multi-topic\n}"
  },
  {
    "path": "tests/integration-tests-prebuilt.bats",
    "content": "#!/usr/bin/env bats\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nload ../script/libtest\n\n@test \"Ensure build step\" {\n  kubectl get -n kubeless configMap kubeless-config -o yaml | grep enable-build-step | grep true\n  kubectl get -n kubeless configMap kubeless-config -o yaml | grep function-registry-tls-verify | grep false\n  kubectl get secret kubeless-registry-credentials ||\n    kubectl create secret docker-registry kubeless-registry-credentials \\\n      --docker-server=http://$(minikube ip):5000/v2 \\\n      --docker-username=\"user\" \\\n      --docker-password=\"password\" \\\n      --docker-email=\"email\"\n}\n\n@test \"Deploy a function using the build system\" {\n  deploy_function get-python\n  wait_for_job get-python\n  curl http://$(minikube ip):5000/v2/_catalog\n  # Speed up pod start when the image is ready\n  restart_function get-python\n  verify_function get-python\n  kubectl logs -n kubeless -l kubeless=controller -c kubeless-function-controller | grep \"Started function build job\"\n  kubectl get deployment -o yaml get-python | grep image | grep $(minikube ip):5000\n}\n\n@test \"Deploy a Golang function using the build system\" {\n  deploy_function get-go-deps\n  wait_for_job get-go-deps\n  # Speed up pod start when the image is ready\n  restart_function get-go-deps\n  verify_function get-go-deps\n  kubectl get deployment -o yaml get-go-deps | grep image | grep $(minikube ip):5000\n}\n\n@test \"Test no-errors\" {\n  if kubectl logs -n kubeless -l kubeless=controller | grep \"level=error\"; then\n    echo \"Found errors in the controller logs\"\n    false\n  fi\n}\n"
  },
  {
    "path": "tests/integration-tests.bats",
    "content": "#!/usr/bin/env bats\n\n# Copyright (c) 2016-2017 Bitnami\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nload ../script/libtest\n\n# 'bats' lacks loop support, unroll-them-all ->\n@test \"Deploy functions to evaluate\" {\n  deploy_function get-python\n  deploy_function get-python-deps\n  deploy_function get-python-deps-tar-gz\n  deploy_function get-python-deps-tar-bz2\n  deploy_function get-python-deps-tar-xz\n  deploy_function get-python-custom-port\n  deploy_function timeout-nodejs\n  deploy_function get-nodejs-multi\n  deploy_function get-python-metadata\n  deploy_function get-python-secrets\n  deploy_function post-python\n  deploy_function custom-get-python\n  deploy_function get-python-url-deps\n  deploy_function get-node-url-zip\n  deploy_function get-node-url-tar-gz\n  deploy_function get-node-url-tar-bz2\n  deploy_function get-node-url-tar-xz\n}\n@test \"Test function: get-python\" {\n  verify_function get-python\n}\n@test \"Test function: get-python-deps\" {\n  verify_function get-python-deps\n}\n@test \"Test function: get-python-deps-tar-gz\" {\n  verify_function get-python-deps-tar-gz\n  kubeless_function_delete get-python-deps-tar-gz\n}\n@test \"Test function: get-python-deps-tar-bz2\" {\n  verify_function get-python-deps-tar-bz2\n  kubeless_function_delete get-python-deps-tar-bz2\n}\n@test \"Test function: get-python-deps-tar-xz\" {\n  verify_function get-python-deps-tar-xz\n  kubeless_function_delete get-python-deps-tar-xz\n}\n@test \"Test function: get-python-custom-port\" {\n  verify_function get-python-custom-port\n}\n@test \"Test function update: get-python\" {\n  test_kubeless_function_update get-python\n}\n@test \"Test function update: get-python-deps\" {\n  test_kubeless_function_update get-python-deps\n  kubeless_function_delete get-python-deps\n}\n@test \"Test function autoscale: get-python\" {\n  if kubectl api-versions | tr '\\n' ' ' | grep -q -v \"autoscaling/v2beta1\"; then\n    skip \"Autoscale is only supported for Kubernetes >= 1.8\"\n  fi\n  test_kubeless_autoscale get-python\n  kubeless_function_delete get-python\n}\n@test \"Test function: timeout-nodejs\" {\n  verify_function timeout-nodejs\n  kubeless_function_delete timeout-nodejs\n}\n@test \"Test function: get-nodejs-multi\" {\n  verify_function get-nodejs-multi\n  kubeless_function_delete get-nodejs-multi\n}\n@test \"Test custom runtime image\" {\n  verify_function custom-get-python\n  test_kubeless_function_update custom-get-python\n  kubeless_function_delete custom-get-python\n}\n@test \"Test function: post-python\" {\n  verify_function post-python\n  kubeless_function_delete post-python\n}\n@test \"Test function: get-python-metadata\" {\n  verify_function get-python-metadata\n  kubeless_function_delete get-python-metadata\n}\n@test \"Test function: get-python-secrets\" {\n  verify_function get-python-secrets\n  kubeless_function_delete get-python-secrets\n}\n@test \"Test no-errors\" {\n  if kubectl logs -n kubeless -l kubeless=controller | grep \"level=error\"; then\n    echo \"Found errors in the controller logs\"\n    false\n  fi\n}\n@test \"Test function: get-python-url-deps\" {\n  verify_function get-python-url-deps\n  kubeless_function_delete get-python-url-deps\n}\n@test \"Test function: get-node-url-zip\" {\n  verify_function get-node-url-zip\n  kubeless_function_delete get-node-url-zip\n}\n@test \"Test function: get-node-url-tar-gz\" {\n  verify_function get-node-url-tar-gz\n  kubeless_function_delete get-node-url-tar-gz\n}\n@test \"Test function: get-node-url-tar-bz2\" {\n  verify_function get-node-url-tar-bz2\n  kubeless_function_delete get-node-url-tar-bz2\n}\n@test \"Test function: get-node-url-tar-xz\" {\n  verify_function get-node-url-tar-xz\n  kubeless_function_delete get-node-url-tar-xz\n}\n# vim: ts=2 sw=2 si et syntax=sh\n"
  }
]