Repository: angrycub/nomad_example_jobs Branch: main Commit: 034cc3998d19 Files: 503 Total size: 84.4 MB Directory structure: gitextract_paoaxy22/ ├── .envrc ├── .gitignore ├── HCL2/ │ ├── add_local_file/ │ │ ├── README.md │ │ ├── input.file │ │ ├── raw_file_b64.nomad │ │ ├── raw_file_delims.nomad │ │ ├── raw_file_json.nomad │ │ └── use_file.nomad │ ├── always_change/ │ │ ├── README.md │ │ ├── before.nomad │ │ ├── uuid.nomad │ │ └── variable.nomad │ ├── dynamic/ │ │ ├── README.md │ │ └── example.nomad │ ├── object_to_template/ │ │ ├── README.md │ │ └── example.nomad │ └── variable_jobs/ │ ├── README.md │ ├── decode-external-file/ │ │ ├── README.MD │ │ ├── env.json │ │ ├── job1.nomad │ │ └── job2.nomad │ ├── env-vars/ │ │ ├── README.MD │ │ ├── env.vars │ │ ├── job1.nomad │ │ └── job2.nomad │ ├── job.nomad │ ├── job.vars │ └── multiple-var-files/ │ ├── README.MD │ ├── job1.nomad │ ├── job1.vars │ ├── job2.nomad │ ├── job2.vars │ ├── job3.nomad │ ├── job3.vars │ └── shared.vars ├── README.md ├── alloc_folder/ │ ├── mount_alloc.nomad │ └── sidecar.nomad ├── applications/ │ ├── artifactory_oss/ │ │ ├── README.md │ │ └── registry.nomad │ ├── cluster-broccoli/ │ │ └── example.nomad │ ├── docker_registry/ │ │ ├── README.md │ │ └── registry.nomad │ ├── docker_registry_v2/ │ │ ├── README.md │ │ ├── htpasswd │ │ ├── make_password.sh │ │ └── registry.nomad │ ├── docker_registry_v3/ │ │ ├── README.md │ │ ├── make_password.sh │ │ └── registry.nomad │ ├── mariadb/ │ │ └── mariadb.nomad │ ├── membrane-soa/ │ │ ├── README.md │ │ ├── soap-proxy-v1-linux.nomad │ │ ├── soap-proxy-v1-windows.nomad │ │ └── soap-proxy.nomad │ ├── minio/ │ │ ├── README.md │ │ ├── minio.nomad │ │ └── secure-variables/ │ │ ├── README.md │ │ ├── minio-data/ │ │ │ └── .gitkeep │ │ ├── minio.nomad │ │ ├── start.sh │ │ ├── stop.sh │ │ └── volume.hcl │ ├── postgres/ │ │ ├── README.md │ │ └── postgres.nomad │ ├── prometheus/ │ │ ├── README.md │ │ ├── fabio-service.nomad │ │ ├── grafana/ │ │ │ ├── README.md │ │ │ └── nomad_jobs.json │ │ ├── node-exporter.nomad │ │ └── prometheus.nomad │ ├── vms/ │ │ ├── freedos/ │ │ │ ├── .gitignore │ │ │ ├── README.md │ │ │ ├── freedos.img.tgz │ │ │ ├── freedos.img.tgz.SHASUM │ │ │ └── freedos.nomad │ │ └── tinycore/ │ │ ├── README.md │ │ ├── tc_ssh.nomad │ │ └── tinycore.qcow2.tgz │ └── wordpress/ │ ├── README.md │ ├── distributed/ │ │ ├── README.md │ │ ├── build-site.nomad │ │ ├── nginx.nomad │ │ ├── reset.sh │ │ ├── wordpress-db.nomad │ │ └── wordpress.nomad │ └── simple/ │ ├── README.md │ └── wordpress.nomad ├── artifact_sleepyecho/ │ ├── README.md │ ├── SleepyEcho.sh │ ├── artifact_sleepyecho.nomad │ └── vault_sleepyecho.nomad ├── batch/ │ ├── batch_gc/ │ │ └── example.nomad │ ├── dispatch/ │ │ ├── sleepy.nomad │ │ ├── sleepy1.nomad │ │ ├── sleepy10.nomad │ │ ├── sleepy2.nomad │ │ ├── sleepy3.nomad │ │ ├── sleepy4.nomad │ │ ├── sleepy5.nomad │ │ ├── sleepy6.nomad │ │ ├── sleepy7.nomad │ │ ├── sleepy8.nomad │ │ └── sleepy9.nomad │ ├── dont_restart_fail/ │ │ ├── README.md │ │ └── example.nomad │ ├── lost_batch/ │ │ ├── README.md │ │ ├── batch.nomad │ │ └── periodic.nomad │ ├── lots_of_batches/ │ │ ├── README.md │ │ └── payload.nomad.template │ ├── periodic/ │ │ ├── prohibit-overlap.nomad │ │ └── template.nomad │ └── spread_batch/ │ ├── example.nomad │ └── example2.nomad ├── batch_overload/ │ ├── example.nomad │ └── periodic.nomad ├── blocked_eval/ │ ├── README.md │ └── example.nomad ├── check.sh ├── cni/ │ ├── README.md │ ├── diy_brige/ │ │ ├── README.md │ │ ├── diybridge.conflist │ │ ├── example.nomad │ │ └── repro.nomad │ └── example.nomad ├── complex_meta/ │ ├── template_env.nomad │ └── template_meta.nomad ├── connect/ │ ├── consul.nomad │ ├── discuss/ │ │ ├── blocky.yaml │ │ └── job.nomad │ ├── dns-via-mesh/ │ │ ├── README.md │ │ ├── consul-dns.nomad │ │ ├── consul-dns2.nomad │ │ └── go-resolv-test/ │ │ ├── .gitignore │ │ ├── build.sh │ │ └── main.go │ ├── ingress_gateways/ │ │ └── ingress_gateway.nomad │ ├── native/ │ │ └── cn-demo.nomad │ ├── nginx_ingress/ │ │ ├── countdash.nomad │ │ └── ingress.nomad │ └── sidecar/ │ ├── countdash.nomad │ └── countdash2.nomad ├── consul/ │ ├── add_check/ │ │ ├── README.md │ │ ├── e1.nomad │ │ ├── e2.nomad │ │ └── e3.nomad │ └── use_consul_for_kv_path/ │ ├── README.md │ └── template.nomad ├── consul-template/ │ ├── coordination/ │ │ ├── README.md │ │ └── sample.nomad │ ├── missing_vault_value/ │ │ └── sample.nomad │ └── my_first_kv/ │ ├── README.md │ └── example.nomad ├── countdash/ │ ├── connect/ │ │ └── countdash.nomad │ └── simple/ │ └── countdash.nomad ├── csi/ │ ├── aws/ │ │ ├── ebs/ │ │ │ ├── README.md │ │ │ ├── busybox.nomad │ │ │ ├── mysql-server.nomad │ │ │ ├── plugin-ebs-controller.nomad │ │ │ ├── plugin-ebs-nodes.nomad │ │ │ └── volume.hcl │ │ └── efs/ │ │ ├── README.md │ │ ├── busybox.nomad │ │ ├── node.nomad │ │ └── volume.hcl │ ├── gcp/ │ │ └── gce-pd/ │ │ ├── README.md │ │ ├── config.nomad │ │ ├── controller.nomad │ │ ├── cv-nomad.hcl │ │ ├── disk.hcl │ │ ├── job.nomad │ │ └── nodes.nomad │ ├── hetzner/ │ │ └── volume/ │ │ ├── README.md │ │ ├── config.nomad │ │ ├── job.nomad │ │ ├── node.nomad │ │ └── volume.hcl │ └── hostpath/ │ ├── block/ │ │ ├── README.md │ │ ├── csi-hostpath-driver.nomad │ │ ├── job.nomad │ │ └── test.sh │ ├── file/ │ │ ├── README.md │ │ ├── csi-hostpath-driver.nomad │ │ ├── job.nomad │ │ └── test.sh │ └── volume.hcl ├── deployments/ │ └── failing_deployment/ │ └── example.nomad ├── docker/ │ ├── auth_from_template/ │ │ ├── README.md │ │ └── auth.nomad │ ├── datadog/ │ │ ├── container_network.nomad │ │ ├── ex3.nomad │ │ └── example2.nomad │ ├── docker+host_volume/ │ │ ├── README.md │ │ ├── task_deps.nomad │ │ └── unsafe.nomad │ ├── docker_dynamic_hostname/ │ │ ├── README.md │ │ ├── finished.nomad │ │ ├── res_file │ │ └── view.sh │ ├── docker_entrypoint/ │ │ ├── Dockerfile │ │ └── example.nomad │ ├── docker_image_not_found/ │ │ ├── README.md │ │ ├── reschedule.nomad │ │ └── restart.nomad │ ├── docker_interpolated_image_name/ │ │ ├── README.md │ │ ├── example.nomad │ │ └── hostname.nomad │ ├── docker_logging/ │ │ └── example.nomad │ ├── docker_mac_address/ │ │ └── example.nomad │ ├── docker_network/ │ │ ├── example1.nomad │ │ └── example2.nomad │ ├── docker_nfs/ │ │ ├── README.md │ │ └── example.nomad │ ├── docker_template/ │ │ └── example.nomad │ ├── docker_twice_in_alloc/ │ │ └── example.nomad │ ├── docker_windows_abs_mount/ │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── SleepyEcho.ps1 │ │ └── repro.nomad │ ├── env_var_args/ │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── cmd.sh │ │ ├── cmd_alt.sh │ │ ├── entrypoint.sh │ │ ├── start.nomad │ │ └── test.nomad │ ├── get_fact_from_consul/ │ │ ├── README.md │ │ ├── args.nomad │ │ └── image.nomad │ ├── host-volumes-and-users/ │ │ ├── README.md │ │ └── scratch.nomad │ ├── labels/ │ │ ├── README.md │ │ ├── heredoc.nomad │ │ ├── interpolation.nomad │ │ └── literal.nomad │ └── mount_alloc/ │ ├── README.md │ └── example.nomad ├── drain/ │ └── example.nomad ├── dummy/ │ └── example.nomad ├── echo_stack/ │ ├── README.md │ ├── fabio-system.nomad │ ├── login-service.nomad │ └── profile-service.nomad ├── env/ │ └── escaped_env_vars/ │ ├── Dockerfile │ ├── README.md │ ├── entrypoint.sh │ └── example.nomad ├── environment/ │ ├── README.md │ └── example.nomad ├── exec/ │ └── host-volumes-and-users/ │ ├── README.md │ └── scratch.nomad ├── exec-zip/ │ ├── README.md │ ├── example.nomad │ └── folder.tgz ├── fabio/ │ ├── README.md │ ├── fabio-docker.nomad │ ├── fabio-service.nomad │ └── fabio-system.nomad ├── fabio-ssl/ │ └── fabio-ssl.nomad ├── failing_jobs/ │ ├── README.md │ ├── failing_sidecar/ │ │ ├── README.md │ │ └── example.nomad │ └── impossible_constratint/ │ ├── README.md │ └── example.nomad ├── giant/ │ └── example.nomad ├── guide/ │ └── TUTORIAL_TEMPLATE.mdx ├── host_volume/ │ ├── README.md │ ├── mariadb/ │ │ └── mariadb.nomad │ ├── prometheus/ │ │ ├── README.md │ │ ├── grafana/ │ │ │ ├── README.md │ │ │ └── nomad_jobs.json │ │ └── prometheus.nomad │ └── read_only/ │ └── read_only.nomad ├── http_echo/ │ ├── arm-service.nomad │ ├── bar-service.nomad │ ├── car-service-broken-check.nomad │ ├── foo-service.deployment.nomad │ ├── foo-service.nomad │ ├── foo-test.nomad │ └── template/ │ ├── echo_template.nomad │ ├── ets.nomad │ ├── ets2.nomad │ └── ets3.nomad ├── httpd_site/ │ ├── README.md │ ├── httpd.nomad │ ├── make_site.sh │ ├── site-content/ │ │ ├── about.html │ │ ├── css/ │ │ │ └── style.css │ │ └── index.html │ └── site-content.tgz ├── ipv6/ │ └── SimpleHTTPServer/ │ └── sample.nomad ├── java/ │ ├── JavaDriverTest/ │ │ ├── java-driver-test.nomad │ │ └── test2.nomad │ ├── README.md │ ├── SampleWebApp.war │ ├── apache_camel/ │ │ ├── camel-standalone-helloworld-1.0-SNAPSHOT.jar │ │ └── java_files.nomad │ └── jar-test/ │ ├── README.md │ ├── jar/ │ │ └── Count.jar │ ├── jar-test.nomad │ └── src/ │ └── Count.java ├── job_examples/ │ ├── base-batch.nomad │ └── meta/ │ ├── README.md │ └── meta-batch.nomad ├── json-jobs/ │ ├── example.nomad │ └── job.json ├── load_balancers/ │ └── traefik/ │ ├── README.md │ ├── traefik.nomad │ ├── webapp.nomad │ └── webapp2.nomad ├── meta/ │ ├── README.md │ └── example.nomad ├── microservice/ │ └── example.nomad ├── minecraft/ │ ├── minecraft.nomad │ ├── minecraft_exec.nomad │ └── plugin.nomad ├── monitoring/ │ └── sensu/ │ ├── fabio-docker.nomad │ └── sensu.nomad ├── nginx-fabio-clone/ │ ├── README.md │ ├── bar-service.nomad │ ├── e.ct │ ├── e.out │ ├── example.nomad │ ├── foo-service.nomad │ ├── tj.ct │ └── tj.out ├── oom/ │ └── example.nomad ├── output.html ├── parameterized/ │ ├── README.md │ ├── docker_hello_world/ │ │ └── hello-world.nomad │ ├── template.nomad │ └── to_specific_client/ │ ├── example.nomad │ └── workaround/ │ ├── README.md │ ├── example.nomad │ ├── rolling_run.sh │ └── watch.py ├── ports/ │ ├── README.md │ └── example.nomad ├── preserve_state/ │ ├── bar-service.jsonjob │ ├── example.jsonjob │ ├── fabio.jsonjob │ ├── foo-service.jsonjob │ ├── hashi-ui.jsonjob │ ├── jam.sh │ ├── nomad_debug │ └── preserve.sh ├── qemu/ │ ├── README.md │ ├── hass/ │ │ └── hass.nomad │ ├── imagebuilder/ │ │ ├── Core-current.iso │ │ ├── Dockerfile │ │ ├── NOTES.md │ │ └── core-image.qcow2 │ ├── job.json │ ├── tc.qcow2 │ ├── tc_ssh.nomad │ ├── tc_ssh2.nomad │ ├── tc_ssh_arm.nomad │ └── tinycore.qcow2 ├── raw_exec/ │ ├── env.nomad │ ├── mkdir/ │ │ ├── README.md │ │ ├── mkdir-bash.nomad │ │ └── mkdir.nomad │ ├── ps.nomad │ ├── quoted_args/ │ │ ├── quoted_args.nomad │ │ └── quoted_args_2.nomad │ └── user/ │ └── example.nomad ├── reproductions/ │ └── cpu_rescheduling/ │ ├── README.md │ └── repro.nomad ├── reschedule/ │ └── ex.nomad ├── restart/ │ └── restart.nomad ├── rolling_upgrade/ │ ├── README.md │ ├── cv-new.nomad │ ├── cv.nomad │ ├── example-new.nomad │ └── example.nomad ├── sentinel/ │ ├── README.md │ ├── alwaysFalse.sentinel │ ├── example.nomad │ ├── exampleGroupMissingNodeClass.nomad │ ├── exampleGroupNodeClass.nomad │ ├── exampleJobNodeClass.nomad │ ├── exampleNoNodeClass.nomad │ ├── payload.json │ └── requireNodeClass.sentinel ├── server-variables/ │ ├── README.md │ ├── build-site.nomad │ ├── nginx.nomad │ ├── reset.sh │ ├── wordpress-db.nomad │ └── wordpress.nomad ├── sleepy/ │ ├── README.md │ ├── sleepy_bash/ │ │ └── sleepy.nomad │ └── sleepy_python/ │ ├── README.md │ ├── batch_sleepy_python.nomad │ └── sleepy_python.nomad ├── spread/ │ ├── example.nomad │ ├── scheduler.json │ └── scheduler_b.json ├── stress/ │ ├── README.md │ └── cpu_throttled_time/ │ ├── README.md │ └── stress.nomad ├── super_big/ │ ├── README.md │ ├── super_big.nomad │ └── super_big2.nomad ├── system_jobs/ │ ├── sleepy/ │ │ ├── README.md │ │ ├── sleepy_bash/ │ │ │ └── sleepy.nomad │ │ └── sleepy_python/ │ │ ├── README.md │ │ ├── batch_sleepy_python.nomad │ │ └── sleepy_python.nomad │ ├── system_deployment/ │ │ ├── deploy_jdk.nomad │ │ ├── fabio-system.nomad │ │ ├── fabio-system.nomad2 │ │ ├── foo-system.nomad │ │ └── foo-system.nomad2 │ └── system_filter/ │ ├── filtered.nomad │ └── host_vol.nomad ├── task_deps/ │ ├── consul-lock/ │ │ └── myapp.nomad │ ├── disk_check/ │ │ ├── README.md │ │ └── disk.nomad │ ├── init_artifact/ │ │ ├── README.md │ │ ├── batch-init-artifact.nomad │ │ └── service-init-artifact.nomad │ ├── interjob/ │ │ ├── README.md │ │ ├── myapp.nomad │ │ └── myservice.nomad │ ├── k8sdoc/ │ │ ├── README.md │ │ ├── init.nomad │ │ ├── k8sdoc1.nomad │ │ ├── myapp.nomad │ │ └── myservice.nomad │ └── sidecar/ │ └── example.nomad ├── template/ │ ├── batch/ │ │ ├── README.md │ │ ├── context.nomad │ │ ├── parameter.nomad │ │ ├── services.nomad │ │ └── template.nomad │ ├── from_consul/ │ │ ├── README.md │ │ ├── artifact.nomad │ │ ├── init.nomad │ │ └── issue.nomad │ ├── learning/ │ │ └── README.md │ ├── rerender/ │ │ └── example.nomad │ ├── secure_variables/ │ │ ├── README.md │ │ ├── example.nomad │ │ ├── interpolated_job/ │ │ │ ├── README.md │ │ │ ├── interpolated_job.hcl │ │ │ └── makeJobVars.sh │ │ ├── makeJobVars.sh │ │ ├── makeVars.sh │ │ ├── multiregion/ │ │ │ ├── start.sh │ │ │ ├── stop.sh │ │ │ ├── template.nomad │ │ │ ├── test.out │ │ │ └── test.tmpl │ │ ├── template copy.tmpl │ │ ├── template-playground.nomad │ │ ├── template.html │ │ ├── template.tmpl │ │ ├── variable_view.nomad │ │ └── write/ │ │ ├── t0.out │ │ ├── t0.tmpl │ │ ├── t1.out │ │ ├── t1.tmpl │ │ ├── t2.out │ │ └── t2.tmpl │ ├── services/ │ │ ├── README.md │ │ └── byTag.nomad │ ├── template-system/ │ │ ├── README.md │ │ ├── composed_keys.nomad │ │ ├── services-on-nomad-client.nomad │ │ └── template.nomad │ ├── template_handoff/ │ │ ├── README.md │ │ ├── handoff.nomad │ │ └── handoff_restart.nomad │ ├── template_into_docker/ │ │ └── example.nomad │ ├── template_playground/ │ │ ├── composed_keys.nomad │ │ ├── template-exec.nomad │ │ ├── template-hcl2.nomad │ │ └── template.nomad │ └── use_whitespace/ │ └── byTag.nomad ├── test.sh ├── vault/ │ ├── deleted_policy/ │ │ ├── README.md │ │ ├── break_it.sh │ │ ├── nomad-cluster-role.broken.json │ │ ├── nomad-cluster-role.json │ │ ├── nomad-server-policy.hcl │ │ ├── setup.sh │ │ ├── temp1.nomad │ │ └── workload.nomad │ ├── pki/ │ │ ├── README.md │ │ ├── sleepy_bash_pki.nomad │ │ └── test.nomad │ └── sleepy_vault_bash/ │ ├── sleepy_bash.nomad │ └── test.nomad ├── vault_reload_triggered_by_consul/ │ ├── README.md │ ├── SleepyEcho.sh │ └── sample.nomad ├── victoriametrics/ │ └── vm.nomad ├── win_rawexec_restart/ │ ├── SleepyEcho.ps1 │ └── artifact_sleepyecho.nomad └── windows_docker/ ├── docker-iis.nomad └── windows-test.nomad ================================================ FILE CONTENTS ================================================ ================================================ FILE: .envrc ================================================ echo "Processing .direnv..." function template { echo "Creating a skeleton tutorial in $1." mkdir -p $1 cp $(pwd)/guide/TUTORIAL_TEMPLATE.mdx $1/README.md } echo "Done." ================================================ FILE: .gitignore ================================================ .DS_Store ================================================ FILE: HCL2/add_local_file/README.md ================================================ # Include a Local File at Job Runtime You can use the HCL2 file function and a runtime variable to include a file in your Nomad jobs. **These files should be small because they are stored in the Nomad server state until the job is eligible for garbage collection.** ## Techniques ### Use the HCL2 file() function - [`use_file.nomad`] — demonstrates the file function. This allows you to include a template to be rendered. ### Wrap included files Nomad will inject the file content into the template stanza directly and it will be rendered by the client. You might want to prevent Nomad from seeing the content as renderable. There are a few techniques that you can use for this. - [`raw_file_delims.nomad`] — Uses alternative delimiters for the template stanza. These delimiter characters must never appear in the included file content. You can use interesting characters like emoji as delimiters because of golang's Unicode support. - [`raw_file_json.nomad`] — JSON encodes the file and uses the Nomad template engine to decode it on the client. The input file must not contain the default template delimiters (`{{` and `}}`) or you must redefine them because they are not escaped.
You can even use emoji, depending on OS support. !["Image of the Nomad UI's job definition tab showing the "prohibited" emoji as LedtDelimiter and RightDelimiter"](doc/emoji-delimiters.png "Emoji are fun and functional.")
- [`raw_file_b64.nomad`] — demonstrates using base64 as a means to wrap your included file so that it is only unwrapped on the destination client. ## Explore This directory contains a test file you can use named `input.file`, or you can supply your own file to include. ### Run the job The jobs all define an input variable named `input_file`. You must supply the path to the file to include. You must provide it as an environment variable or as a flag. #### Environment variable ``` export NOMAD_VAR_input_file=./input.file nomad job run use_file.nomad ``` #### Flag ``` nomad job run -var "input_file=./input.file" use_file.nomad ``` ### Inspect the job Run the `nomad job inspect` command to see how the JSON job specification represents the job. Some techniques are very clear and some opaque the file contents completely. ``` nomad job inspect use_file.nomad ``` ### Get the logs from the allocation Get the allocation ID from the output of the `nomad job run` command and fetch the logs. ``` nomad alloc logs «alloc_id» ``` ### Stop the job. ``` nomad job stop use_file.nomad ``` ## About the job The job contains one task. Nomad renders the `template` stanza's content—the included file—into the task's `local` directory. It then starts an `alpine:latest` container that runs `cat` on the rendered file and sleeps until stopped. the task's `local` directoryuses Nomad's Docker task driver to download an Alpine container. [`use_file.nomad`]: ./use_file.nomad [`raw_file_delims.nomad`]: ./raw_file_delims.nomad [`raw_file_json.nomad`]: ./raw_file_json.nomad [`raw_file_b64.nomad`]: ./raw_file_b64.nomad ================================================ FILE: HCL2/add_local_file/input.file ================================================ This is the input file content Particularly evil stuff: Single quotes: 'hello' Double quotes: "howdy" Go-template: {{ "hello" }} Backticks: `this is a raw-string in go, but raw strings can't be in rawstrings` JSON: { "object": { "foo": true, "bar": 5, "baz": [1,2,3] } } ================================================ FILE: HCL2/add_local_file/raw_file_b64.nomad ================================================ variable "input_file" { type = string description = "local path to the redis configuration to inject into the job." } job "raw_file_b64.nomad" { datacenters = ["dc1"] group "services" { task "alpine" { driver = "docker" template { destination = "local/file.out" } config { image = "alpine" command = "bash" args = [ "-c", "cat local/file.out; while true; do sleep 30; done", ] } template { destination = "local/file.out" data = "{{base64Decode \"${base64encode(file(var.input_file))}\"}}" } } } } ================================================ FILE: HCL2/add_local_file/raw_file_delims.nomad ================================================ variable "input_file" { type = string description = "local path to the redis configuration to inject into the job." } job "raw_file_delims.nomad" { datacenters = ["dc1"] group "services" { task "alpine" { driver = "docker" config { image = "alpine" command = "sh" args = [ "-c", "cat local/file.out; while true; do sleep 30; done", ] } template { destination = "local/file.out" data = file(var.input_file) left_delimiter = "🚫" right_delimiter = "🚫" } } } } ================================================ FILE: HCL2/add_local_file/raw_file_json.nomad ================================================ variable "input_file" { type = string description = "local path to the redis configuration to inject into the job." } job "raw_file_json.nomad" { datacenters = ["dc1"] group "services" { task "alpine" { driver = "docker" template { destination = "local/file.out" } config { image = "alpine" command = "bash" args = [ "-c", "cat local/file.out; while true; do sleep 30; done", ] } template { destination = "local/file.out" data = "{{jsonDecode \"${jsonencode(file(var.input_file))}\"}}" } } } } ================================================ FILE: HCL2/add_local_file/use_file.nomad ================================================ variable "input_file" { type = string description = "local path to the redis configuration to inject into the job." } job "use_file.nomad" { datacenters = ["dc1"] group "services" { task "alpine" { driver = "docker" config { image = "alpine" command = "sh" args = [ "-c", "cat local/file.out; while true; do sleep 30; done", ] } template { destination = "local/file.out" data = file(var.input_file) } } } } ================================================ FILE: HCL2/always_change/README.md ================================================ # Use HCL2 to make re-runnable batch jobs Nomad will refuse to run a batch job again unless it detects a change to the job. This behavior exists to prevent duplicate job submissions from creating unnecessary work—unchanged jobs are "the same job" to Nomad. A Nomad job's `meta` stanza is an ideal place to make changes to a Nomad job that do not change the behavior of the job itself. Some ways to provide variation in a meta value are using an HCL2 variable or the `uuidv4()` function. - [`before.nomad`]—Demonstrates the normal behavior. - [`uuid.nomad`]—Use a random UUID to change the job every time it's run. This guarantees that Nomad will always run the submitted job. - [`variable.nomad`]—Submit a variable at runtime. This can preserve the single run behavior in cases where the job submission is a duplicate. ## Nomad's default behavior Run the `before.nomad` job. Nomad will start a copy of the `hello-world:latest` docker container. This container outputs some text and exits. ```text $ nomad run before.nomad ==> Monitoring evaluation "1fef4d80" Evaluation triggered by job "before.nomad" ==> Monitoring evaluation "1fef4d80" Allocation "7e6a767b" created: node "14ab9290", group "before" Evaluation status changed: "pending" -> "complete" ==> Evaluation "1fef4d80" finished with status "complete" ``` Check the status of the allocation created by the run command. ```text $ nomad alloc status 7eg ID = 7e6a767b-5604-5268-653b-905948928de5 Eval ID = 1fef4d80 Name = before.nomad.before[0] Node ID = 14ab9290 Node Name = nomad-client-2.node.consul Job ID = before.nomad Job Version = 0 Client Status = complete Client Description = All tasks have completed Desired Status = run Desired Description = Created = 6m55s ago Modified = 6m45s ago Task "hello-world" is "dead" Task Resources CPU Memory Disk Addresses 100 MHz 300 MiB 300 MiB Task Events: Started At = 2021-05-18T18:03:10Z Finished At = 2021-05-18T18:03:10Z Total Restarts = 0 Last Restart = N/A Recent Events: Time Type Description 2021-05-18T14:03:10-04:00 Terminated Exit Code: 0 2021-05-18T14:03:10-04:00 Started Task started by client 2021-05-18T14:03:01-04:00 Driver Downloading image 2021-05-18T14:03:01-04:00 Task Setup Building Task Directory 2021-05-18T14:03:01-04:00 Received Task received by client ``` As expected, the Docker container finished and exited with exit code 0. Check the status of the job to verify that its status is `dead`. ```text $ nomad status ID Type Priority Status Submit Date before.nomad batch 50 dead 2021-05-18T14:03:00-04:00 ``` Try running the `before.nomad` job again. ```text $ nomad run before.nomad ==> Monitoring evaluation "a855fa2b" Evaluation triggered by job "before.nomad" ==> Monitoring evaluation "a855fa2b" Evaluation status changed: "pending" -> "complete" ==> Evaluation "a855fa2b" finished with status "complete" ``` Note that this time, Nomad did not schedule an allocation and the job remains dead. This is expected and is a safety feature of Nomad to prevent duplicated submissions of the same job from creating unnecessary duplicate work. If your job should always run you can use one of the following techniques to inject variation in ways that don't require you to alter the job files contents. ## Techniques ### Use a UUID as an ever-changing value ```text $ nomad run uuid.nomad ==> Monitoring evaluation "27fe0c84" Evaluation triggered by job "uuid.nomad" ==> Monitoring evaluation "27fe0c84" Allocation "6de97aa7" created: node "14ab9290", group "uuid" Evaluation status changed: "pending" -> "complete" ==> Evaluation "27fe0c84" finished with status "complete" ``` ```text $ nomad alloc status 6de ID = 6de97aa7-e6b1-c6bf-e8e0-16d5f7ed39bf Eval ID = 27fe0c84 Name = uuid.nomad.uuid[0] Node ID = 14ab9290 Node Name = nomad-client-2.node.consul Job ID = uuid.nomad Job Version = 0 Client Status = complete Client Description = All tasks have completed Desired Status = run Desired Description = Created = 6m52s ago Modified = 6m50s ago Task "hello-world" is "dead" Task Resources CPU Memory Disk Addresses 100 MHz 300 MiB 300 MiB Task Events: Started At = 2021-05-18T18:07:33Z Finished At = 2021-05-18T18:07:33Z Total Restarts = 0 Last Restart = N/A Recent Events: Time Type Description 2021-05-18T14:07:33-04:00 Terminated Exit Code: 0 2021-05-18T14:07:33-04:00 Started Task started by client 2021-05-18T14:07:31-04:00 Driver Downloading image 2021-05-18T14:07:31-04:00 Task Setup Building Task Directory 2021-05-18T14:07:31-04:00 Received Task received by client ``` ```text $ nomad status ID Type Priority Status Submit Date uuid.nomad batch 50 dead 2021-05-18T14:07:30-04:00 before.nomad batch 50 dead 2021-05-18T14:03:00-04:00 ``` ```text $ nomad run uuid.nomad ==> Monitoring evaluation "2943fe82" Evaluation triggered by job "uuid.nomad" Allocation "61f5861a" created: node "f7bc1f2d", group "uuid" ==> Monitoring evaluation "2943fe82" Evaluation status changed: "pending" -> "complete" ==> Evaluation "2943fe82" finished with status "complete" ``` ### Use an HCL2 variable Using a variable can allow you to leverage Nomad's default behavior of not running unchanged work, but also to provide a change to the job without requiring a round trip to source control. ```text $ nomad run -var run_index=1 variable.nomad ==> Monitoring evaluation "454f6fb4" Evaluation triggered by job "variable.nomad" ==> Monitoring evaluation "454f6fb4" Allocation "74f9cbf5" created: node "f7bc1f2d", group "variable" Evaluation status changed: "pending" -> "complete" ==> Evaluation "454f6fb4" finished with status "complete" ``` ```text $ nomad alloc status 74f ID = 74f9cbf5-a793-5022-c831-b83e31712725 Eval ID = 454f6fb4 Name = variable.nomad.variable[0] Node ID = f7bc1f2d Node Name = nomad-client-1.node.consul Job ID = variable.nomad Job Version = 0 Client Status = complete Client Description = All tasks have completed Desired Status = run Desired Description = Created = 6m52s ago Modified = 6m48s ago Task "hello-world" is "dead" Task Resources CPU Memory Disk Addresses 100 MHz 300 MiB 300 MiB Task Events: Started At = 2021-05-18T18:21:27Z Finished At = 2021-05-18T18:21:27Z Total Restarts = 0 Last Restart = N/A Recent Events: Time Type Description 2021-05-18T14:21:27-04:00 Terminated Exit Code: 0 2021-05-18T14:21:27-04:00 Started Task started by client 2021-05-18T14:21:24-04:00 Driver Downloading image 2021-05-18T14:21:24-04:00 Task Setup Building Task Directory 2021-05-18T14:21:24-04:00 Received Task received by client ``` ```text $ nomad status ID Type Priority Status Submit Date variable.nomad batch 50 dead 2021-05-18T14:21:23-04:00 ``` Resubmit the job with the same `run_index` value—`1`. ```text $ nomad run -var run_index=1 variable.nomad ==> Monitoring evaluation "4d7064ea" Evaluation triggered by job "variable.nomad" ==> Monitoring evaluation "4d7064ea" Evaluation status changed: "pending" -> "complete" ==> Evaluation "4d7064ea" finished with status "complete" ``` Note that Nomad does not re-run the job. Now, change the `run_index` value to `2` and run the command again. ```text $ nomad run -var run_index=2 variable.nomad ==> Monitoring evaluation "73e7902f" Evaluation triggered by job "variable.nomad" ==> Monitoring evaluation "73e7902f" Allocation "9e8cbc58" created: node "f7bc1f2d", group "variable" Evaluation status changed: "pending" -> "complete" ==> Evaluation "73e7902f" finished with status "complete" ``` Nomad runs a fresh allocation of the batch job. ## Clean up Run `nomad job stop variable.nomad` to stop the job. [`before.nomad`]: ./before.nomad [`uuid.nomad`]: ./uuid.nomad [`variable.nomad`]: ./variable.nomad ================================================ FILE: HCL2/always_change/before.nomad ================================================ job "before.nomad" { datacenters = ["dc1"] type = "batch" group "before" { task "hello-world" { driver = "docker" config { image = "hello-world:latest" } } } } ================================================ FILE: HCL2/always_change/uuid.nomad ================================================ job "uuid.nomad" { datacenters = ["dc1"] type = "batch" meta { run_uuid = "${uuidv4()}" } group "uuid" { task "hello-world" { driver = "docker" config { image = "hello-world:latest" } } } } ================================================ FILE: HCL2/always_change/variable.nomad ================================================ job "variable.nomad" { datacenters = ["dc1"] type = "batch" meta { run_index = "${floor(var.run_index)}" } group "variable" { task "hello-world" { driver = "docker" config { image = "hello-world:latest" } } } } variable "run_index" { type = number description = "An integer that, when changed from the current value causes the job to restart." validation { condition = var.run_index == floor(var.run_index) error_message = "The run_index must be an integer." } } ================================================ FILE: HCL2/dynamic/README.md ================================================ # HCL2 dynamic blocks This job specification leverages the `dynamic` HCL2 blocks and HCL2 variables to create a multi-task job specification. ================================================ FILE: HCL2/dynamic/example.nomad ================================================ variable "job_name" { type = string default = "" } locals { targets = { "1": "zpool" "2": "zmirror" } tasks = { "redis": {"name":"db","port":6379} } docker_versions = { "zpool": "redis:7" "zmirror": "redis:latest" } job_name = "%{ if var.job_name != "" }${var.job_name}%{ else }example%{ endif }" } job "example" { name = local.job_name datacenters = ["dc1"] dynamic "group" { for_each = local.targets labels = ["${local.job_name}-${group.value}"] content { network { dynamic "port" { labels = ["${local.job_name}-${group.value}-${port.key}-${port.value.name}-${port.value.port}"] for_each = local.tasks content { to = port.value.port } } } dynamic "task" { labels = ["${local.job_name}-${group.value}-${task.key}"] for_each = local.tasks content { driver = "docker" config { image = local.docker_versions[group.value] ports = ["${local.job_name}-${group.value}-${task.key}-${task.value.name}-${task.value.port}"] } } } } } } ================================================ FILE: HCL2/object_to_template/README.md ================================================ ================================================ FILE: HCL2/object_to_template/example.nomad ================================================ variable "datacenters" { type = list(string) default = ["dc1"] } variable "ports" { type = list(object({ name = string internal = number external = number })) default = [ { name = "db" internal = 8300 external = 8300 }, { name = "db2" internal = 8301 external = 8301 } ] } job "example" { datacenters = var.datacenters type = "batch" group "group" { task "task" { driver = "exec" config { command = "bash" args = ["-c", "cat template.out"] } template { destination = "template.out" data = <{{.internal}}{{println}}{{end}} EOT } } } } ================================================ FILE: HCL2/variable_jobs/README.md ================================================ # Using HCL2 to add variables to Nomad jobs Nomad's HCL2 support enables you to use variables in your Nomad job specifications. This can decrease the number of job files you have to maintain in source control and can encourage job reuse. This example contains a job that consumes HCL2 variables and uses them to generate a Docker service job. The `job.nomad` file defines 3 variables: - `datacenters`(default `[ "dc1" ]`)—a list of the Nomad datacenters to run the job in. - `docker_image`—The docker image name to run. Since this is a service job, the image needs to run until explicitly stopped. The `redis` container is a small example that works well. - `image-version`—The specific version of the `docker_image` image to run. For the `redis` container, try versions like `"3"`,`"4"`, and `"latest"`. ## Quickstart ### Run the example ```bash nomad job run -var docker_image="redis" -var image_version="3" job.nomad ``` Nomad will start a `redis:3` container ```bash nomad job run -var docker_image="redis" -var image_version="latest" job.nomad ``` Nomad will stop the `redis:3` container and start a 'redis:latest' container. ## Stop the examples ```bash nomad job stop job ``` ## Submitting variable values There are three ways to provide values for HCL2 variables. - Individual `-var` flags - With a variable file and the `-var-file` flag - Environment variables You can use one or all these methods in the same call. Flags override values from the environment. The flags are parsed in the order they are presented. Precedence (highest to lowest) - `-var` flag (if a variable repeats, the last one in the command line wins) - `-var-file` flag (if a variable repeats in the files, the last one listed in the command line wins) - environment variables ### Environment variables To provide a value to the HCL2 engine via the environment, you need to create an environment variable named `NOMAD_VAR_«variable name»`. For example, to set the value of the `docker_image` variable, create an environment variable named `NOMAD_VAR_docker_image`. ## Using variable files with multiple jobs The HCL2 engine expects every variable that you supply using the `-var` or `-var-file` flags to be consumed by the job specification. You are some techniques to work around this constraint: - [Provide HCL2 variable values using environment variables](./env-vars) - [Use multiple `-var-files`](./var-files) - [Decode the contents of an external file into a `local` variable](./decode-external-file) ================================================ FILE: HCL2/variable_jobs/decode-external-file/README.MD ================================================ # Decode the contents of an external file into a `local` variable The HCL2 `file` function when paired with the `jsondecode` or `yamldecode` function enables you to externalize shared configuration elements for Nomad jobs to a JSON or YAML file. This example contains two jobs that read the `env.json` file to and use values from it to configure the Nomad job during submission from the CLI. ## Run the examples ```bash nomad job run -var="config=env.json" job1.nomad ``` Nomad will start a Redis 3 container ```bash nomad job run -var="config=env.json" job2.nomad ``` Nomad will start a Redis 4 container ## Stop the examples ```bash nomad job stop job1 nomad job stop job2 ``` ================================================ FILE: HCL2/variable_jobs/decode-external-file/env.json ================================================ { "datacenters": [ "dc1" ], "docker_image_job1": "redis:3", "docker_image_job2": "redis:4" } ================================================ FILE: HCL2/variable_jobs/decode-external-file/job1.nomad ================================================ #---------------------------------------------------------------------------- # This value can be supplied as a flag to nomad job run. # `nomad job run -var config_file=«path to config» job1.nomad` # or as an environment variable # `export NOMAD_VAR_config_file=«path to config»` # `nomad job run job1.nomad` #---------------------------------------------------------------------------- variable "config_file" { type = string description = "Path to JSON formatted shared job configuration." } locals { config = jsondecode(file(var.config_file)) } job "job1" { datacenters = local.config.datacenters group "job1" { task "job1" { driver = "docker" config { image = local.config.docker_image_job1 } } } } ================================================ FILE: HCL2/variable_jobs/decode-external-file/job2.nomad ================================================ #---------------------------------------------------------------------------- # This value can be supplied as a flag to nomad job run. # `nomad job run -var config_file=«path to config» job2.nomad` # or as an environment variable # `export NOMAD_VAR_config_file=«path to config»` # `nomad job run job2.nomad` #---------------------------------------------------------------------------- variable "config_file" { type = string description = "Path to JSON formatted shared job configuration." } locals { config = jsondecode(file(var.config_file)) } job "job2" { datacenters = local.config.datacenters group "job2" { task "job2" { driver = "docker" config { image = local.config.docker_image_job2 } } } } ================================================ FILE: HCL2/variable_jobs/env-vars/README.MD ================================================ # Provide HCL2 variable values using environment variables This example contains two jobs that read HCL2 variable values from the environment and populates the Nomad job with them during submission from the CLI. This can be a very powerful feature when paired with [`direnv`], [`envconsul`], and other tools that can manipulate environment variables. ## Run the sample ### Read in the environment variables ```bash source ./env.vars ``` ```bash nomad job run job1.nomad ``` Nomad will start a Redis 3 container ```bash nomad job run job2.nomad ``` Nomad will start a Redis 4 container ## Stop the example ```bash nomad job stop job1 nomad job stop job2 unset NOMAD_VAR_datacenters \ NOMAD_VAR_docker_image_job1 \ NOMAD_VAR_docker_image_job2 ``` [`envconsul`]: https://github.com/hashicorp/envconsul [`direnv`]: https://direnv.net/ ================================================ FILE: HCL2/variable_jobs/env-vars/env.vars ================================================ export NOMAD_VAR_datacenters='["dc1"]' export NOMAD_VAR_docker_image_job1="redis:3" export NOMAD_VAR_docker_image_job2="redis:4" ================================================ FILE: HCL2/variable_jobs/env-vars/job1.nomad ================================================ variable "datacenters" { type = list(string) description = "Path to JSON formatted shared job configuration." } variable "docker_image_job1" { type = string description = "Image for job1 to run" } job "job1" { datacenters = var.datacenters group "job1" { task "job1" { driver = "docker" config { image = var.docker_image_job1 } } } } ================================================ FILE: HCL2/variable_jobs/env-vars/job2.nomad ================================================ variable "datacenters" { type = list(string) description = "Path to JSON formatted shared job configuration." } variable "docker_image_job2" { type = string description = "Image for job2 to run" } job "job2" { datacenters = var.datacenters group "job2" { task "job2" { driver = "docker" config { image = var.docker_image_job2 } } } } ================================================ FILE: HCL2/variable_jobs/job.nomad ================================================ variable "datacenters" { type = list(string) description = "List of Nomad datacenters to run the job in. Defaults to `[\"dc1\"]`" default = ["dc1"] } variable "docker_image" { type = string description = "Docker image for the job to run" } variable "image_version" { type = string description = "Version of the docker image to run" } job "job1" { datacenters = var.datacenters group "job1" { task "job1" { driver = "docker" config { image = "${var.docker_image}:${var.image_version}" } } } } ================================================ FILE: HCL2/variable_jobs/job.vars ================================================ image_version = "99" ================================================ FILE: HCL2/variable_jobs/multiple-var-files/README.MD ================================================ # Provide HCL2 variable values using environment variables This example contains two jobs that consumes multiple HCL2 variable files and populates the Nomad job with them during submission from the CLI. The `shared.vars` file defines 2 variables: - `datacenters = [ "dc1" ]` - `docker_image = "redis"` The job .vars files set the `image_version_«job name»` value to complete the job specification. ## Run the examples ```bash nomad job run -var-file=./shared.vars -var-file=./job1.vars job1.nomad ``` Nomad will start a Redis 3 container ```bash nomad job run -var-file=./shared.vars -var-file=./job2.vars job2.nomad ``` Nomad will start a Redis 4 container ```bash nomad job run -var-file=./shared.vars -var-file=./job3.vars job3.nomad ``` Nomad will start a hello-world:latest container by overriding docker_image from the `./shared.vars` file. ## Stop the examples ```bash nomad job stop job1 nomad job stop job2 nomad job stop job3 ``` ================================================ FILE: HCL2/variable_jobs/multiple-var-files/job1.nomad ================================================ variable "datacenters" { type = list(string) description = "Path to JSON formatted shared job configuration." } variable "docker_image" { type = string description = "Shared docker image" } variable "image_version_job1" { type = string description = "Docker image version to run for job1" } job "job1" { datacenters = var.datacenters group "job1" { task "job1" { driver = "docker" config { image = "${var.docker_image}:${var.image_version_job1}" } } } } ================================================ FILE: HCL2/variable_jobs/multiple-var-files/job1.vars ================================================ image_version_job1 = "3" ================================================ FILE: HCL2/variable_jobs/multiple-var-files/job2.nomad ================================================ variable "datacenters" { type = list(string) description = "Path to JSON formatted shared job configuration." } variable "docker_image" { type = string description = "Shared docker image" } variable "image_version_job2" { type = string description = "Docker image version to run for job2" } job "job2" { datacenters = var.datacenters group "job2" { task "job2" { driver = "docker" config { image = "${var.docker_image}:${var.image_version_job2}" } } } } ================================================ FILE: HCL2/variable_jobs/multiple-var-files/job2.vars ================================================ image_version_job2 = "4" ================================================ FILE: HCL2/variable_jobs/multiple-var-files/job3.nomad ================================================ variable "datacenters" { type = list(string) description = "Path to JSON formatted shared job configuration." } variable "docker_image" { type = string description = "Shared docker image" } variable "image_version_job3" { type = string description = "Docker image version to run for job3" } job "job3" { datacenters = var.datacenters group "job3" { task "job3" { driver = "docker" config { image = "${var.docker_image}:${var.image_version_job3}" } } } } ================================================ FILE: HCL2/variable_jobs/multiple-var-files/job3.vars ================================================ docker_image = "hello-world" image_version_job3 = "latest" ================================================ FILE: HCL2/variable_jobs/multiple-var-files/shared.vars ================================================ datacenters = [ "dc1" ] docker_image = "redis" ================================================ FILE: README.md ================================================ # Nomad Example Jobs This repository holds jobs and job skeletons that I have used to create reproducers or minimum viable cases. I use them when creating guides as simple workloads as well. Some specifically useful bits: - **csi** - Example jobs that use CSI to connect to external resources such as block devices. - **fabio** - Several different fabio configurations that can be used to spin up consul-aware load balancing in your Nomad cluster. - **sleepy** - Jobs that do a thing and then sleep (perhaps redoing the thing when they wake up). - **template_playground** - a batch job that can be used to practice iterative template development. ================================================ FILE: alloc_folder/mount_alloc.nomad ================================================ job "alloc_folder" { datacenters = ["dc1"] group "group" { task "docker" { driver = "docker" config { image = "busybox:latest" command = "sh" args = ["-c", "while true; do echo $(date) | tee -a /my_data/output.txt; sleep 2; done"] volumes = ["alloc/data:/my_data"] } resources { cpu = 100 memory = 100 } } } } ================================================ FILE: alloc_folder/sidecar.nomad ================================================ job "alloc_folder" { datacenters = ["dc1"] group "group" { task "docker" { driver = "docker" config { image = "busybox:latest" command = "sh" args = ["-c", "while true; do echo $(date) | tee -a /alloc/output.txt; sleep 2; done"] } resources { cpu = 100 memory = 100 } } task "exec" { driver = "exec" config { command = "tail" args = ["-f", "/alloc/output.txt"] } resources { cpu = 100 memory = 100 } } } } ================================================ FILE: applications/artifactory_oss/README.md ================================================ # Docker Registry This job uses Nomad Host Volumes to provide an internal Docker registry which can be used to host private containers for a Nomad cluster. ## Prerequisites - **Consul** - This job leverages Consul service registrations for locating the registry instances. ## Necessary configuration ### Create the host volume in the configuration Create a folder on one of your Nomad clients to host your registry files. This example uses `/opt/volumes/docker-registry` ```shell-session $ mkdir -p /opt/volumes/docker-registry ``` Add the host_volume information to the client stanza in the Nomad configuration. ```hcl client { # ... host_volume "docker-registry" { path = "/opt/volumes/docker-registry" read_only = false } } ``` Restart Nomad to read the new configuration. ```shell-session $ systemctl restart nomad ``` ### Add your registry to your daemon.json file If you would like to use your registry with Nomad and do not want to configure SSL, you can add the following to the `daemon.json` file on each of your Nomad clients and restart Docker. ```json { "insecure-registries" : ["registry.service.consul:5000"], } ``` You will need to do this on any machine that you would like to push to or pull from your registry. ================================================ FILE: applications/artifactory_oss/registry.nomad ================================================ job "registry" { datacenters = ["dc1"] priority = 80 group "docker" { network { port "registry" { to = 5000 static = 5000 } } service { name = "registry" port = "registry" check { type = "tcp" port = "registry" interval = "10s" timeout = "2s" } } volume "artifactory-registry" { type = "host" source = "artifactory-registry" read_only = false } task "container" { driver = "docker" volume_mount { volume = "artifactory-registry" destination = "/var/lib/registry" } config { image = "docker.bintray.io/jfrog/artifactory-oss:latest" ports = ["registry"] } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: applications/cluster-broccoli/example.nomad ================================================ job "example" { datacenters = ["dc1"] group "cache" { network { port "db" { to = 6379 } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] auth_soft_fail = true } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: applications/docker_registry/README.md ================================================ # Docker Registry This job uses Nomad Host Volumes to provide an internal Docker registry which can be used to host private containers for a Nomad cluster. ## Prerequisites - **Consul** - This job leverages Consul service registrations for locating the registry instances. ## Necessary configuration ### Create the host volume in the configuration Create a folder on one of your Nomad clients to host your registry files. This example uses `/opt/volumes/docker-registry` ```shell-session $ mkdir -p /opt/volumes/docker-registry ``` Add the host_volume information to the client stanza in the Nomad configuration. ```hcl client { # ... host_volume "docker-registry" { path = "/opt/volumes/docker-registry" read_only = false } } ``` Restart Nomad to read the new configuration. ```shell-session $ systemctl restart nomad ``` ### Add your registry to your daemon.json file If you would like to use your registry with Nomad and do not want to configure SSL, you can add the following to the `daemon.json` file on each of your Nomad clients and restart Docker. ```json { "insecure-registries" : ["registry.service.consul:5000"], } ``` You will need to do this on any machine that you would like to push to or pull from your registry. ================================================ FILE: applications/docker_registry/registry.nomad ================================================ job "registry" { datacenters = ["dc1"] priority = 80 group "docker" { network { port "registry" { to = 5000 static = 5000 } } service { name = "registry" port = "registry" check { type = "tcp" port = "registry" interval = "10s" timeout = "2s" } } volume "docker-registry" { type = "host" source = "docker-registry" read_only = false } task "container" { driver = "docker" volume_mount { volume = "docker-registry" destination = "/var/lib/registry" } config { image = "registry" ports = ["registry"] } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: applications/docker_registry_v2/README.md ================================================ # Docker Registry This job uses Nomad Host Volumes to provide an internal Docker registry which can be used to host private containers for a Nomad cluster. ## Prerequisites - **Consul** - This job leverages Consul service registrations for locating the registry instances. ## Necessary configuration ### Create the host volume in the configuration Create a folder on one of your Nomad clients to host your registry files. This example uses `/opt/nomad/volumes/docker-registry` ```shell-session $ mkdir -p /opt/nomad/volumes/docker-registry ``` Add the host_volume information to the client stanza in the Nomad configuration. ```hcl client { # ... host_volume "docker-registry" { path = "/opt/nomad/volumes/docker-registry" read_only = false } } ``` Restart Nomad to read the new configuration. ```shell-session $ systemctl restart nomad ``` ### Add your registry to your daemon.json file If you would like to use your registry with Nomad and do not want to configure SSL, you can add the following to the `daemon.json` file on each of your Nomad clients and restart Docker. ```json { "insecure-registries" : ["registry.service.consul:5000"], } ``` You will need to do this on any machine that you would like to push to or pull from your registry. ================================================ FILE: applications/docker_registry_v2/htpasswd ================================================ user:$2y$05$kyEyguS/Sisz7SMjqKQZ1eQDCM7pSFiItkL9yiVIDOVyQfj8XTCAS ================================================ FILE: applications/docker_registry_v2/make_password.sh ================================================ #!/bin/bash docker run --rm -it -v $(pwd):/out --entrypoint="htpasswd" xmartlabs/htpasswd -Bbc /out/$1 $2 $3 ================================================ FILE: applications/docker_registry_v2/registry.nomad ================================================ job "registry" { datacenters = ["dc1"] priority = 80 group "docker" { network { port "registry" { to = 5000 static = 5000 } } service { name = "registry" port = "registry" check { type = "tcp" port = "registry" interval = "10s" timeout = "2s" } } volume "docker-registry" { type = "host" source = "docker-registry" read_only = false } task "container" { driver = "docker" template { destination = "secrets/htpasswd" data = <&2 exit 1 fi echo 'Notice: htpasswd is not installed. Using docker to run it.' >&2 fetchedDocker=true cmd="docker run --rm -it -v $(pwd):/out --entrypoint="htpasswd" xmartlabs/htpasswd -Bbn $1 $2" fi user=$1 password=$(eval $cmd | tr -d "\n"| tr ":" " " | awk '{print $2}') varPath="nomad/jobs/registry/docker/container" nomad var get $varPath | nomad var put - "$user"="$password" ================================================ FILE: applications/docker_registry_v3/registry.nomad ================================================ job "registry" { datacenters = ["dc1"] priority = 80 group "docker" { network { port "registry" { to = 5000 static = 5000 } } service { name = "registry" port = "registry" check { type = "tcp" port = "registry" interval = "10s" timeout = "2s" } } volume "docker-registry" { type = "host" source = "docker-registry" read_only = false } task "container" { driver = "docker" template { destination = "secrets/htpasswd" data = < http://localhost:2000/bank/37050198 service-proxy.sh ``` #!/bin/bash homeSet() { echo "MEMBRANE_HOME variable is now set" CLASSPATH="$MEMBRANE_HOME/conf" CLASSPATH="$CLASSPATH:$MEMBRANE_HOME/starter.jar" export CLASSPATH echo Membrane Router running... java -classpath "$CLASSPATH" com.predic8.membrane.core.Starter -c proxies.xml } terminate() { echo "Starting of Membrane Router failed." echo "Please execute this script from the appropriate subfolder of MEMBRANE_HOME/examples/" } homeNotSet() { echo "MEMBRANE_HOME variable is not set" if [ -f "`pwd`/../../starter.jar" ] then export MEMBRANE_HOME="`pwd`/../.." homeSet else terminate fi } if [ "$MEMBRANE_HOME" ] then homeSet else homeNotSet fi ``` ================================================ FILE: applications/membrane-soa/soap-proxy-v1-linux.nomad ================================================ job "soap-proxy" { datacenters = ["dc1"] group "membrane" { network { port "admin" { static = 9000 } port "proxy" { static = 2000 } } task "membrane" { artifact { source = "https://github.com/membrane/service-proxy/releases/download/v4.7.3/membrane-service-proxy-4.7.3.zip" destination = "local" } template { destination = "local/proxy-conf/proxies.xml" data =< EOD } template { destination = "local/proxy-conf/get2soap.xsl" data =< EOD } template { destination = "local/proxy-conf/strip-env.xsl" data =< EOD } env { MEMBRANE_HOME = "/local/membrane-service-proxy-4.7.3" } driver = "java" config { class = "com.predic8.membrane.core.Starter" class_path = "/local/membrane-service-proxy-4.7.3/conf:/local/membrane-service-proxy-4.7.3/starter.jar" args = ["-c","/local/proxy-conf/proxies.xml"] } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: applications/membrane-soa/soap-proxy-v1-windows.nomad ================================================ job "soap-proxy" { datacenters = ["dc1"] group "membrane" { network { port "admin" { static = 9000 } port "proxy" { static = 2000 } } task "membrane" { artifact { source = "https://github.com/membrane/service-proxy/releases/download/v4.7.3/membrane-service-proxy-4.7.3.zip" destination = "local" } template { destination = "local/proxy-conf/proxies.xml" data =< EOD } template { destination = "local/proxy-conf/get2soap.xsl" data =< EOD } template { destination = "local/proxy-conf/strip-env.xsl" data =< EOD } env { MEMBRANE_HOME = "local/membrane-service-proxy-4.7.3" } driver = "java" config { class = "com.predic8.membrane.core.Starter" class_path = "local/membrane-service-proxy-4.7.3/conf;/local/membrane-service-proxy-4.7.3/starter.jar" args = ["-c","local/proxy-conf/proxies.xml"] } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: applications/membrane-soa/soap-proxy.nomad ================================================ locals { membrane_home = "/local/membrane-service-proxy-4.7.3" class_path = "${local.membrane_home}/conf:${local.membrane_home}/starter.jar" } job "soap-proxy" { datacenters = ["dc1"] group "membrane" { network { mode = "bridge" dns { servers = ["8.8.8.8", "8.8.4.4"] } port "admin" { to = 9000 } port "proxy" { to = 2000 } } task "membrane" { artifact { source = "https://github.com/membrane/service-proxy/releases/download/v4.7.3/membrane-service-proxy-4.7.3.zip" destination = "local" } template { destination = "local/proxy-conf/proxies.xml" data =< EOD } template { destination = "local/proxy-conf/get2soap.xsl" data =< EOD } template { destination = "local/proxy-conf/strip-env.xsl" data =< EOD } env { MEMBRANE_HOME = "/local/membrane-service-proxy-4.7.3" } driver = "java" config { class = "com.predic8.membrane.core.Starter" class_path = "/local/membrane-service-proxy-4.7.3/conf:/local/membrane-service-proxy-4.7.3/starter.jar" args = ["-c","/local/proxy-conf/proxies.xml"] } # driver = "exec" # config { # command = "/bin/bash" # args = ["-c","while true; do sleep 500; done"] # } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: applications/minio/README.md ================================================ # Minio S3-compatible Storage This job uses Nomad Host Volumes to provide an internal s3 compatible storage environment which can be used to host private artifacts for a Nomad clusters. ## Prerequisites - **Consul** - This job leverages Consul service registrations for locating the MinIO instance. ## Necessary configuration ### Create the host volume in the configuration Create a folder on one of your Nomad clients to host your registry files. This example uses `/opt/volumes/minio-data` ```shell-session $ mkdir -p /opt/volumes/minio-data ``` Add the host_volume information to the client stanza in the Nomad configuration. ```hcl client { # ... host_volume "minio-data" { path = "/opt/volumes/minio-data" read_only = false } } ``` Restart Nomad to read the new configuration. ```shell-session $ systemctl restart nomad ``` ================================================ FILE: applications/minio/minio.nomad ================================================ job "minio" { datacenters = ["dc1"] priority = 80 group "storage" { network { port "api" { to = 9000 static = 9000 } } service { name = "minio" port = "api" check { type = "tcp" port = "api" interval = "10s" timeout = "2s" } } volume "minio-data" { type = "host" source = "minio-data" read_only = false } task "minio" { driver = "docker" env { MINIO_ROOT_USER = "AKIAIOSFODNN7EXAMPLE" MINIO_ROOT_PASSWORD = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" } volume_mount { volume = "minio-data" destination = "/data" } config { image = "minio/minio" args = ["server", "/data"] ports = ["api"] } resources { cpu = 500 memory = 256 } } } } # docker run -p 9000:9000 \ # --name minio1 \ # -v /mnt/data:/data \ # -e "MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE" \ # -e "MINIO_ROOT_PASSWORD=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \ # minio/minio server /data ================================================ FILE: applications/minio/secure-variables/README.md ================================================ # Minio S3-compatible Storage This job uses Nomad Host Volumes to provide an internal s3 compatible storage environment which can be used to host private artifacts for a Nomad clusters. ## Prerequisites - **Nomad 1.4** - This job leverages Nomad service registrations for locating the MinIO instance and used Nomad Variables. ## Necessary configuration ### Create the host volume in the configuration Create a folder on one of your Nomad clients to host your registry files. This example uses `/opt/volumes/minio-data` ```shell-session $ mkdir -p /opt/volumes/minio-data ``` Add the host_volume information to the client stanza in the Nomad configuration. ```hcl client { # ... host_volume "minio-data" { path = "/opt/volumes/minio-data" read_only = false } } ``` Restart Nomad to read the new configuration. ```shell-session $ systemctl restart nomad ``` ================================================ FILE: applications/minio/secure-variables/minio-data/.gitkeep ================================================ ================================================ FILE: applications/minio/secure-variables/minio.nomad ================================================ # minio is an AWS S3-compatible storage engine job "minio" { datacenters = ["dc1"] priority = 80 group "storage" { network { port "api" { to = 9000 static = 9000 } } service { name = "minio" port = "api" provider = "nomad" check { type = "tcp" port = "api" interval = "10s" timeout = "2s" } } volume "minio-data" { type = "host" source = "minio-data" read_only = false } task "minio" { driver = "docker" template { destination = "${NOMAD_SECRETS_DIR}/env.vars" env = true change_mode = "restart" data =< .volume_patch.hcl nohup nomad agent -dev -config=.volume_patch.hcl -acl-enabled >nomad.log 2>&1 & echo -n $! > .nomad.pid echo "Nomad PID is $(cat .nomad.pid)" disown # wait for leadership sleep 3 echo '{"BootstrapSecret": "2b778dd9-f5f1-6f29-b4b4-9a5fa948757a"}' | nomad operator api /v1/acl/bootstrap echo '' export NOMAD_TOKEN=2b778dd9-f5f1-6f29-b4b4-9a5fa948757a echo -n ${NOMAD_TOKEN} > .nomad.token nomad var put nomad/jobs/minio/storage/minio \ root_user="AKIAIOSFODNN7EXAMPLE" \ root_password="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" nomad job run -detach minio.nomad echo 'export NOMAD_TOKEN=2b778dd9-f5f1-6f29-b4b4-9a5fa948757a' ================================================ FILE: applications/minio/secure-variables/stop.sh ================================================ #! /usr/bin/env bash PID=$(cat .nomad.pid) echo "Stopping Nomad (pid: ${PID})" rm -rf .nomad.pid rm -rf .nomad.token rm -rf .volume_patch.hcl rm -rf nomad.log rm -rf minio_data echo "Done." ================================================ FILE: applications/minio/secure-variables/volume.hcl ================================================ # The host volume configuration for the minio task. The start.sh # script will make a derived copy of this file with the place- # holder--«/absolute/path/to»--replaced with the output of `pwd` client { host_volume "minio-data" { path = "«/absolute/path/to»/minio-data" read_only = false } } ================================================ FILE: applications/postgres/README.md ================================================ # Stateful example of Postgres with Host Volumes ## Configure a supportive host volume This job uses a volume named `pg-data`. On one of your Nomad clients, either create an additional configuration file (if you're `config` is pointed to a directory) or add a `host_volume` stanza to your existing client configuration similar to the following. ```hcl client { host_volume "pg-data" { path = "/opt/nomad/volumes/pg-data" read_only = false } } ``` Create the directory to support the volume. ```shell-session $ mkdir -p /opt/nomad/volumes/pg-data ``` Restart Nomad to enable the new host volume. ```shell-session $ systemctl restart nomad ``` Verify that the host volume is available. ```shell-session $ nomad node status -self -verbose ``` Once the client finishes starting, you should see the `pg-data` host volume listed in the **Host Volumes** section of the output. ``` Host Volumes Name ReadOnly Source pg-data false /opt/nomad/volumes/pg-data ``` Run the job. ```shell-session $ nomad job run postgres.nomad ``` Once the job starts, check the allocation status to determine what IP and port you need to connect to. Connect to the instance using a postgres client at the scheduled IP address and port. Use user `postgres` and secret `mysecretpassword`. ================================================ FILE: applications/postgres/postgres.nomad ================================================ job "postgres.nomad" { datacenters = ["dc1"] group "database" { network { port "db" { to = 5432 } } service { name = "db" port = "db" check { type = "tcp" port = "db" interval = "10s" timeout = "2s" } } volume "pg-data" { type = "host" source = "pg-data" read_only = false } task "postgres" { driver = "docker" env { POSTGRES_PASSWORD="mysecretpassword" # POSTGRES_USER="" # POSTGRES_DB="" PGDATA="/var/lib/postgresql/data/pgdata" } volume_mount { volume = "pg-data" destination = "/var/lib/postgresql/data" } config { image = "postgres" ports = ["db"] } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: applications/prometheus/README.md ================================================ # Prometheus On the client, you will need a rule to allow the docker containers to talk to the local consul agents. ``` firewall-cmd --permanent --zone=public --add-rich-rule='rule family=ipv4 source address=172.17.0.0/16 accept' && firewall-cmd --reload ``` ## Connecting to the instances ================================================ FILE: applications/prometheus/fabio-service.nomad ================================================ # For ACL-enabled Consul Clusters, you need to specify a Consul ACL token down # in the `fabio-linux-amd64` task's env stanza. Uncomment the example and # replace the token with a valid Consul ACL token. job "fabio" { datacenters = ["dc1"] type = "system" update { stagger = "5s" max_parallel = 1 } group "fabio-linux-amd64" { network { port "http" { static = "9999" } port "ui" { static = "9998" } } task "fabio-linux-amd64" { constraint { attribute = "${attr.cpu.arch}" operator = "=" value = "amd64" } constraint { attribute = "${attr.kernel.name}" operator = "=" value = "linux" } artifact { source = "https://github.com/fabiolb/fabio/releases/download/v1.5.15/fabio-1.5.15-go1.15.5-linux_amd64" options { checksum = "sha256:14c7a02ca95fb00a4f3010eab4e3c0e354a3f4953d2a793cb800332012f42066" } } driver = "exec" config { command = "fabio-1.5.15-go1.15.5-linux_amd64" } env { # FABIO_REGISTRY_CONSUL_TOKEN = "c62d8564-c0c5-8dfe-3e75-005debbd0e40" } resources { cpu = 200 memory = 32 } } } } ================================================ FILE: applications/prometheus/grafana/README.md ================================================ Thanks to [Nextty](https://grafana.com/orgs/derekamz) for two great grafana dashboards to start with: * Nomad Jobs - https://grafana.com/dashboards/6281 * Nomad Cluster - ================================================ FILE: applications/prometheus/grafana/nomad_jobs.json ================================================ { "__inputs": [ { "name": "DS_PROMETHEUS", "label": "prometheus", "description": "", "type": "datasource", "pluginId": "prometheus", "pluginName": "Prometheus" } ], "__requires": [ { "type": "grafana", "id": "grafana", "name": "Grafana", "version": "5.1.2" }, { "type": "panel", "id": "graph", "name": "Graph", "version": "5.0.0" }, { "type": "datasource", "id": "prometheus", "name": "Prometheus", "version": "5.0.0" } ], "annotations": { "list": [ { "builtIn": 1, "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "type": "dashboard" } ] }, "editable": true, "gnetId": 6281, "graphTooltip": 0, "id": null, "iteration": 1527401878265, "links": [], "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_PROMETHEUS}", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 0, "y": 0 }, "id": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "repeat": "host", "repeatDirection": "v", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "avg(nomad_client_allocs_cpu_total_percent{host=~\"$host\"}) by(exported_job, task)", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "{{task}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeShift": null, "title": "CPU Usage Percent - $host", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": 3, "format": "percentunit", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_PROMETHEUS}", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 12, "y": 0 }, "id": 3, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "repeat": "host", "repeatDirection": "v", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "avg(nomad_client_allocs_cpu_total_ticks{host=~\"$host\"}) by(exported_job, task)", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "{{task}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeShift": null, "title": "CPU Total Ticks - $host", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": 3, "format": "timeticks", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_PROMETHEUS}", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 0, "y": 6 }, "id": 6, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "repeat": "host", "repeatDirection": "v", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "avg(nomad_client_allocs_memory_rss{host=~\"$host\"}) by(exported_job, task)", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "{{task}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeShift": null, "title": "RSS - $host", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": 3, "format": "decbytes", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_PROMETHEUS}", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 12, "y": 6 }, "id": 7, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "repeat": "host", "repeatDirection": "v", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "avg(nomad_client_allocs_memory_cache{host=~\"$host\"}) by(exported_job, task)", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "{{task}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeShift": null, "title": "Memory Cache - $host", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": 3, "format": "decbytes", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "schemaVersion": 16, "style": "dark", "tags": [], "templating": { "list": [ { "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", "hide": 0, "includeAll": false, "label": "DC", "multi": false, "name": "datacenter", "options": [], "query": "label_values(nomad_client_uptime, datacenter)", "refresh": 1, "regex": "", "sort": 0, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false }, { "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", "hide": 0, "includeAll": true, "label": "Host", "multi": true, "name": "host", "options": [], "query": "label_values(nomad_client_uptime{datacenter=~\"$datacenter\"}, host)", "refresh": 2, "regex": "", "sort": 0, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false } ] }, "time": { "from": "now-6h", "to": "now" }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "timezone": "", "title": "Nomad Jobs", "uid": "TvqbbhViz", "version": 12, "description": "Nomad Jobs metrics" } ================================================ FILE: applications/prometheus/node-exporter.nomad ================================================ # The Prometheus Node Exporter needs access to the proc filesystem which is not # mounted into the exec jail, so it requires the raw_exec driver to run. job "prometheus-node-exporter" { datacenters = ["dc1"] type = "system" group "system" { network { port "exporter" { static = 9100 } } service { name = "node-exporter" tags = [] port = "exporter" check { name = "alive" type = "tcp" interval = "10s" timeout = "2s" } } task "node-exporter" { driver = "raw_exec" config { command = "local/node_exporter-0.18.1.linux-amd64/node_exporter" args = [ "--web.listen-address=:${NOMAD_PORT_exporter}" ] } artifact { source = "https://github.com/prometheus/node_exporter/releases/download/v0.18.1/node_exporter-0.18.1.linux-amd64.tar.gz" destination = "local" options { checksum = "sha256:b2503fd932f85f4e5baf161268854bf5d22001869b84f00fd2d1f57b51b72424" } } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: applications/prometheus/prometheus.nomad ================================================ # For ACL-enabled Consul Clusters, you need to specify a Consul ACL token down # in the `prometheus` task's scrape config. job "prometheus" { datacenters = ["dc1"] type = "service" update { max_parallel = 1 min_healthy_time = "10s" healthy_deadline = "3m" auto_revert = false canary = 0 } group "monitoring" { count = 1 restart { attempts = 10 interval = "5m" delay = "25s" mode = "delay" } network { port "prometheus_ui" { to = 9090 } port "grafana_ui" { to = 3000 } } service { name = "prometheus-ui" #tags = ["urlprefix-/prometheus"] tags = ["urlprefix-/prometheus strip=/prometheus"] port = "prometheus_ui" check { name = "prometheus_ui port alive" type = "tcp" interval = "10s" timeout = "2s" } } service { name = "grafana-ui" port = "grafana_ui" tags = ["urlprefix-/grafana strip=/grafana"] check { name = "grafana-ui port alive" type = "tcp" interval = "10s" timeout = "2s" } } ephemeral_disk { size = 1000 } task "grafana" { artifact { source="https://gist.githubusercontent.com/angrycub/046cee11bd3d8c4ab9a3819646c9660c/raw/c699095c2cb25b896e2c709da588b668ce82f8b5/prometheus_nomad.json" destination="local/provisioning/dashboards/dashs" } template { change_mode="noop" destination="local/provisioning/dashboards/file_provider.yml" data = <:/sql --link :mysql -it arey/mysql-client -h mysql -p -D -e "source /sql/" ================================================ FILE: applications/wordpress/distributed/nginx.nomad ================================================ job "nginx" { datacenters = ["dc1"] type = "system" group "nginx" { network { port "http" { static = 80 } } service { name = "wp" port = "http" } task "nginx" { driver = "docker" config { image = "nginx" ports = ["http"] volumes = [ "local:/etc/nginx/conf.d", ] } template { data = <&1 >/dev/null do echo -n '.' sleep 2 # There is a good opportunity to add a loop counter and a bail-out too, but # this script waits forever. done echo " Done." EOT } config { image = "alpine:latest" command = "local/await-db.sh" network_mode = "host" } resources { cpu = 200 memory = 128 } lifecycle { hook = "prestart" sidecar = false } } task "wordpress" { driver = "docker" template { data = <&1 >/dev/null; do echo '.'; sleep 2; done"] network_mode = "host" } resources { cpu = 200 memory = 128 } lifecycle { hook = "prestart" sidecar = false } } task "wordpress" { driver = "docker" template { data = < /alloc/data/time.txt else echo "$(date) -- Found time.txt file in /alloc/data -- $(cat /alloc/data/time.txt)" fi while true do echo "$(date) -- Alive... going back to sleep for ${SLEEP_SECS}. ${extras_part}" sleep ${SLEEP_SECS} done ================================================ FILE: artifact_sleepyecho/artifact_sleepyecho.nomad ================================================ job "repro" { datacenters = ["dc1"] type = "service" group "group" { count = 1 # constraint { # attribute = "${attr.kernel.name}" # value = "darwin" # } task "echo-task" { driver = "exec" config { command = "local/bin/SleepyEcho.sh" args = ["2"] } artifact { source = "https://angrycub-hc.s3.amazonaws.com/public/SleepyEcho.sh" destination = "local/bin" } } } } ================================================ FILE: artifact_sleepyecho/vault_sleepyecho.nomad ================================================ job "repro" { datacenters = ["dc1"] type = "service" group "group" { count = 1 task "echo-task" { driver = "exec" env { EXTRAS = "${VAULT_TOKEN}" } config { command = "local/bin/SleepyEcho.sh" args = ["2"] } vault { policies = ["nomad-client"] change_mode = "signal" change_signal = "SIGUSR1" } artifact { source = "https://angrycub-hc.s3.amazonaws.com/public/SleepyEcho.sh" destination = "local/bin" } } } } ================================================ FILE: batch/batch_gc/example.nomad ================================================ variable "body" { type = string default = "Template Rendered" } job "example" { datacenters = ["dc1"] type = "batch" group "group" { task "output" { driver = "docker" config { image = "busybox" auth_soft_fail = true command = "cat" args = ["/local/template.out"] } template { destination = "${NOMAD_TASK_DIR}/template.out" data = var.body } } } } ================================================ FILE: batch/dispatch/sleepy.nomad ================================================ job sleepy { datacenters = ["dc1"] group "group" { task "sleepy.sh" { driver = "exec" config { command = "${NOMAD_TASK_DIR}/sleepy.sh" } template { destination = "local/sleepy.sh" data = < /tmp/payload.txt"] } } } } ================================================ FILE: batch/spread_batch/example2.nomad ================================================ job "example" { datacenters = ["dc1"] type = "batch" meta { "version" = "2" } group "nodes" { count = 6 constraint { distinct_hosts = true } task "payload" { driver = "exec" config { command = "/bin/bash" args = ["-c", "echo $VAULT_ADDR > test.txt"] } } } } ================================================ FILE: batch_overload/example.nomad ================================================ job "example" { datacenters = ["dc1"] type = "batch" group "sleepers" { count = 2000 task "wait" { driver = "raw_exec" config { command = "bash" args = [ "-c", "echo Starting; sleep=`shuf -i5-10 -n1`; echo Sleeping $sleep seconds.; sleep $sleep; echo Done; exit 0" ] } resources { # This will cause us to have to create blocking allocs. memory = 200 } } } } ================================================ FILE: batch_overload/periodic.nomad ================================================ job "example" { datacenters = ["dc1"] type = "batch" periodic { cron = "*/15 * * * * *" prohibit_overlap = true } group "sleepers" { count = 5 task "wait" { driver = "raw_exec" config { command = "bash" args = [ "-c", "echo Starting; sleep=`shuf -i5-10 -n1`; echo Sleeping $sleep seconds.; sleep $sleep; echo Done; exit 0" ] } resources { # This will cause us to have to create blocking allocs. memory = 200 } } } } ================================================ FILE: blocked_eval/README.md ================================================ # Blocked jobs This job can be used to experiment with job behaviors when a job is waiting for a client that is able to serve the request. This is simulated using a constraint on a client metadata item. It will block until a client comes up with `meta.waituntil = "charlie"`. ================================================ FILE: blocked_eval/example.nomad ================================================ job "example" { datacenters = ["dc1"] constraint { attribute = "${meta.waituntil}" operator = "=" value = "charlie" } group "cache" { network { port "db" { to = 6379 } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] auth_soft_fail = true } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: check.sh ================================================ #!/bin/bash printError () { echo -n "- Checking ${CUR_FILE} ... " icon="🔴" if [ ${NO_ICON:-unset} != "unset" ]; then icon="[ERROR]" fi echo ${icon} if [ "${DEBUG:-unset}" != "unset" ]; then echo "Command output:" echo "" echo "${1}" | awk '/^$/{next} {print $0}' echo "" fi output "${CUR_FILE}" "${icon}" "$(echo "${1}" | awk '/^$/{next} {print $0}')" continue } printWarning () { echo -n "- Checking ${CUR_FILE} ... " icon="🟡" if [ ${NO_ICON:-unset} != "unset" ]; then icon="[WARN]" fi echo ${icon} if [ "${DEBUG:-unset}" != "unset" ]; then echo "Job Warning output:" echo "" echo "${1}" | awk '/Job Warnings:/{flag=1} /Job Modify Index:/{flag=0} /^$/{next} flag' echo "" fi output "${CUR_FILE}" "${icon}" "$(echo "${1}" | awk '/Job Warnings:/{flag=1} /Job Modify Index:/{flag=0} /^$/{next} flag')" continue } printSuccess () { if [ ${NO_SUCCESS:-unset} != "unset" ]; then continue fi echo -n "- Checking ${CUR_FILE} ... " icon="✅" if [ ${NO_ICON:-unset} != "unset" ]; then icon="[SUCCESS]" fi echo ${icon} output "${CUR_FILE}" "${icon}" "" continue } output() { file="${1}" status="${2}" output="${3}" asHTML "${file}" "${status}" "${output}" } setupOutput() { startHTML } finishOutput() { endHTML } startHTML() { cat < output.html Nomad Job Tester Output HERE } asHTML() { file="${1}" status="${2}" output="${3}" maybeOut="" if [ "${output}" != "" ]; then maybeOut="
Show Output
${output}
" fi echo "" >> output.html } endHTML() { cat <> output.html
FilenameOutput
${status}${file}${maybeOut}
HERE } ## Main begins here setupOutput files=$(find -s ${1:-.} -name "*.nomad") for file in ${files}; do CUR_FILE=${file} out=$(nomad plan ${CUR_FILE} 2>&1) ec=$? if [ "${ec}" == "255" ]; then printError "${out}" fi if [ "${ec}" == "1" ]; then dep=$(echo "${out}" | grep -c "Job Warnings:") if [ "$dep" != 0 ]; then printWarning "${out}" fi fi printSuccess done finishOutput ================================================ FILE: cni/README.md ================================================ # Nomad CNI examples This folder contains Nomad job specifications and configuration files that show how Nomad can use [Container Network Interface (CNI)](https://cni.dev) plugins and network configurations for running workloads. ## Examples - [`diy_bridge`](diy_bridge) - Create your own bridge network similar to the one Nomad makes for `network_mode = "bridge"` jobs. ================================================ FILE: cni/diy_brige/README.md ================================================ # DIY CNI bridge network ## About This example uses a CNI configuration based on Nomad's internal CNI template used to implement the `network_mode = "bridge"` behavior. ## Requirements This demonstration requires a Linux Nomad client. ## Running ### Validate CNI plugins are installed Generally you will install the CNI plugins as part of setting up a Nomad client, so this step may already be complete. However, for development clients that aren't using Nomad's `bridge` network mode, these might not have been installed. Nomad clients look for CNI plugins in the path given in the client's [`cni_path`], `/opt/cni/bin` by default. Check your client configuration to see if this value has been overridden. Check these folders for the CNI plugins. Verify that you have all the following binaries somewhere in the folders listed in your `cni_path`. - `bridge` - `firewall` - `host-local` - `loopback` ================================================ FILE: cni/diy_brige/diybridge.conflist ================================================ { "cniVersion": "0.4.0", "name": "diybridge", "plugins": [ { "type": "loopback" }, { "type": "bridge", "bridge": "diybridge", "ipMasq": true, "isGateway": true, "forceAddress": true, "hairpinMode": true, "ipam": { "type": "host-local", "ranges": [ [ { "subnet": "192.168.1.0/24" } ] ], "routes": [ { "dst": "0.0.0.0/0" } ] } }, { "type": "firewall", "backend": "iptables", "iptablesAdminChainName": "DIY-BRIDGE" }, { "type": "portmap", "capabilities": {"portMappings": true}, "snat": true } ] } ================================================ FILE: cni/diy_brige/example.nomad ================================================ variable "dcs" { description = "Datacenters to run job in." type = list(string) default = ["dc1"] } job "example" { datacenters = ["dc1"] group "test" { network { mode = "cni/diybridge" } task "alpine" { driver = "docker" config { image = "busybox:latest" command = "sleep" args = ["infinity"] } } } } ================================================ FILE: cni/diy_brige/repro.nomad ================================================ variable "dcs" { type = list(string) default = ["dc1"] description = "Nomad datacenters in which to run the job." } job "example" { datacenters = ["dc1"] group "g1" { network { mode = "bridge" port "foo" { to = 1337 } } task "nc-alpine" { driver = "docker" config { image = "alpine" args = ["nc", "-lk", "-p", "${NOMAD_PORT_foo}", "-e", "cat"] } resources { cpu = 100 memory = 64 } } } } ================================================ FILE: cni/example.nomad ================================================ job "example" { datacenters = ["dc1"] group "test" { network { mode = "cni/mynet3" } task "alpine" { driver = "docker" config { image = "alpine:latest" config { command = "sh" args = ["-c", "while true; do sleep 300; done "] } } } } } ================================================ FILE: complex_meta/template_env.nomad ================================================ job "template" { datacenters = ["dc1"] type = "batch" group "group" { task "meta-output" { driver = "raw_exec" config { command = "bash" args=["-c", "echo $RULES | jq ."] } template { destination = "secrets/rules.env" env = true data = < Monitoring evaluation "ba76383e" Evaluation triggered by job "template" ==> Monitoring evaluation "ba76383e" Allocation "e4d4bcf1" created: node "f7bc1f2d", group "group" Evaluation status changed: "pending" -> "complete" ==> Evaluation "ba76383e" finished with status "complete" ``` Fetch the output template file using the `nomad alloc fs` command. ``` nomad alloc fs e4d4bcf1 command/local/template.out ``` Observe that the template is built with the `config1` paths. ``` Name: config1.service.consul IP: 10.0.1.100:7777 ``` Update the KV value to `config2`. ``` consul kv put template/current "config2" ``` Consul should indcate success. ``` Success! Data written to: template/current ``` Check the status of the allocation. ``` nomad alloc status e4d4bcf1 ``` Observe that your change caused Nomad to restart it. ``` ID = e4d4bcf1-f300-b7e7-2f8a-c252eae04822 Eval ID = ba76383e Name = template.group[0] Node ID = f7bc1f2d Node Name = nomad-client-1.node.consul Job ID = template Job Version = 0 Client Status = running Client Description = Tasks are running Desired Status = run Desired Description = Created = 1m23s ago Modified = 39s ago Task "command" is "running" Task Resources CPU Memory Disk Addresses 0/100 MHz 112 KiB/300 MiB 300 MiB Task Events: Started At = 2021-06-07T17:32:22Z Finished At = N/A Total Restarts = 1 Last Restart = 2021-06-07T13:32:22-04:00 Recent Events: Time Type Description 2021-06-07T13:32:22-04:00 Started Task started by client 2021-06-07T13:32:22-04:00 Driver Downloading image 2021-06-07T13:32:22-04:00 Restarting Task restarting in 0s 2021-06-07T13:32:22-04:00 Terminated Exit Code: 137, Exit Message: "Docker container exited with non-zero exit code: 137" 2021-06-07T13:32:16-04:00 Restart Signaled Template with change_mode restart re-rendered 2021-06-07T13:31:40-04:00 Started Task started by client 2021-06-07T13:31:39-04:00 Driver Downloading image 2021-06-07T13:31:39-04:00 Task Setup Building Task Directory 2021-06-07T13:31:39-04:00 Received Task received by client ``` Now, refetch the rendered file with `nomad alloc fs`. ``` nomad alloc fs e4d4bcf1 command/local/template.out ``` Observe that the content now shows the values for the config2 paths. ``` Name: config2.service.consul IP: 10.0.2.200:8888 ``` ## Clean up Remove the running sample job. ``` nomad job stop -purge template ``` Remove the Consul keys. ``` consul kv delete template/current consul kv delete template/config1/name consul kv delete template/config1/ip consul kv delete template/config1/port consul kv delete template/config2/name consul kv delete template/config2/ip consul kv delete template/config2/port ``` ================================================ FILE: consul/use_consul_for_kv_path/template.nomad ================================================ job "template" { datacenters = ["dc1"] group "group" { count = 1 task "command" { template { data = < volume.hcl nomad volume register volume.hcl echo "querying volume $UUID..." nomad volume status $UUID ``` ================================================ FILE: csi/hostpath/block/csi-hostpath-driver.nomad ================================================ job "csi-hostpath" { datacenters = ["dc1"] type = "system" group "nodes" { task "plugin" { driver = "docker" config { image = "k8s.gcr.io/sig-storage/hostpathplugin:v1.9.0" args = [ "--v=5", "--drivername=csi-hostpath", "--endpoint=unix://csi/csi.sock", "--nodeid=${attr.unique.hostname}", ] privileged = true } csi_plugin { id = "csi_hostpath" type = "monolith" mount_dir = "/csi" health_timeout = "30s" } resources { cpu = 250 memory = 128 } } } } ================================================ FILE: csi/hostpath/block/job.nomad ================================================ job "alpine" { datacenters = ["dc1"] group "alloc" { restart { attempts = 10 interval = "5m" delay = "25s" mode = "delay" } volume "jobVolume" { type = "csi" read_only = false source = "test-volume0" } task "docker" { driver = "docker" volume_mount { volume = "jobVolume" destination = "/srv" read_only = false } config { image = "alpine" command = "sleep" args = ["infinity"] } } } } ================================================ FILE: csi/hostpath/block/test.sh ================================================ #!/bin/bash # create the volume in the "external provider" PLUGIN_ID=$1 VOLUME_NAME=$2 # non-dev mode # CSI_ENDPOINT="/var/nomad/client/csi/monolith/$PLUGIN_ID/csi.sock" # dev mode path is going to be in a tempdir PLUGIN_DOCKER_ID=$(docker ps | grep hostpath | awk -F' +' '{print $1}') CSI_ENDPOINT=$(docker inspect $PLUGIN_DOCKER_ID | jq -r '.[0].Mounts[] | select(.Destination == "/csi") | .Source')/csi.sock echo "creating volume..." UUID=$(sudo csc --endpoint $CSI_ENDPOINT controller create-volume $VOLUME_NAME --cap 1,2,ext4 | grep -o '".*"' | tr -d '"') echo "registering volume $UUID..." echo $(printf 'id = "%s" name = "%s" type = "csi" external_id = "%s" plugin_id = "%s" access_mode = "single-node-writer" attachment_mode = "file-system"' $VOLUME_NAME $VOLUME_NAME $UUID $PLUGIN_ID) > volume.hcl nomad volume register volume.hcl echo "querying volume $UUID..." nomad volume status $UUID ================================================ FILE: csi/hostpath/file/README.md ================================================ ### Nomad CSI Demo using the CSI hostvolume plugin Prerequisites - https://github.com/rexray/gocsi/tree/master/csc - https://quay.io/repository/k8scsi/hostpathplugin?tag=v1.2.0 - Nomad 0.11 This script will create a volume.hcl file ``` #!/bin/bash # create the volume in the "external provider" PLUGIN_ID=hostpath-plugin0 VOLUME_NAME=test-volume0 # non-dev mode # CSI_ENDPOINT="/var/nomad/client/csi/monolith/$PLUGIN_ID/csi.sock" # dev mode path is going to be in a tempdir PLUGIN_DOCKER_ID=$(docker ps | grep hostpath | awk -F' +' '{print $1}') CSI_ENDPOINT=$(docker inspect $PLUGIN_DOCKER_ID | jq -r '.[0].Mounts[] | select(.Destination == "/csi") | .Source')/csi.sock echo "creating volume..." UUID=$(sudo csc --endpoint $CSI_ENDPOINT controller create-volume $VOLUME_NAME --cap 1,2,ext4 | grep -o '".*"' | tr -d '"') echo "registering volume $UUID..." echo $(printf 'id = "%s" name = "%s" type = "csi" external_id = "%s" plugin_id = "%s" access_mode = "single-node-writer" attachment_mode = "file-system"' $VOLUME_NAME $VOLUME_NAME $UUID $PLUGIN_ID) > volume.hcl nomad volume register volume.hcl echo "querying volume $UUID..." nomad volume status $UUID ``` ================================================ FILE: csi/hostpath/file/csi-hostpath-driver.nomad ================================================ job "csi-hostpath-driver" { datacenters = ["dc1"] group "csi" { task "driver" { driver = "docker" config { image = "quay.io/k8scsi/hostpathplugin:v1.2.0" args = [ "--drivername=csi-hostpath", "--v=5", "--endpoint=unix://csi/csi.sock", "--nodeid=foo", ] // all known CSI plugins will require privileged=true // because they need add mountpoints. in the ACLs // design we may make csi_plugin implicitly add the // appropriate privileges. privileged = true } csi_plugin { id = "csi-hostpath" type = "monolith" mount_dir = "/csi" } } } } ================================================ FILE: csi/hostpath/file/job.nomad ================================================ job "alpine" { datacenters = ["dc1"] group "alloc" { restart { attempts = 10 interval = "5m" delay = "25s" mode = "delay" } volume "jobVolume" { type = "csi" read_only = false source = "test-volume0" } task "docker" { driver = "docker" volume_mount { volume = "jobVolume" destination = "/srv" read_only = false } config { image = "alpine" command = "sh" args = ["-c","while true; do sleep 10; done"] } } } } ================================================ FILE: csi/hostpath/file/test.sh ================================================ #!/bin/bash # create the volume in the "external provider" PLUGIN_ID=$1 VOLUME_NAME=$2 # non-dev mode # CSI_ENDPOINT="/var/nomad/client/csi/monolith/$PLUGIN_ID/csi.sock" # dev mode path is going to be in a tempdir PLUGIN_DOCKER_ID=$(docker ps | grep hostpath | awk -F' +' '{print $1}') CSI_ENDPOINT=$(docker inspect $PLUGIN_DOCKER_ID | jq -r '.[0].Mounts[] | select(.Destination == "/csi") | .Source')/csi.sock echo "creating volume..." UUID=$(sudo csc --endpoint $CSI_ENDPOINT controller create-volume $VOLUME_NAME --cap 1,2,ext4 | grep -o '".*"' | tr -d '"') echo "registering volume $UUID..." echo $(printf 'id = "%s" name = "%s" type = "csi" external_id = "%s" plugin_id = "%s" access_mode = "single-node-writer" attachment_mode = "file-system"' $VOLUME_NAME $VOLUME_NAME $UUID $PLUGIN_ID) > volume.hcl nomad volume register volume.hcl echo "querying volume $UUID..." nomad volume status $UUID ================================================ FILE: csi/hostpath/volume.hcl ================================================ id = "ebs_prod_db1" namespace = "default" name = "database" type = "csi" plugin_id = "plugin_id" # For 'nomad volume register', provide the external ID from the storage # provider. This field should be omitted when creating a volume with # 'nomad volume create' external_id = "vol-23452345" # For 'nomad volume create', specify a snapshot ID or volume to clone. You can # specify only one of these two fields. snapshot_id = "snap-12345" # clone_id = "vol-abcdef" # Optional: for 'nomad volume create', specify a maximum and minimum capacity. # Registering an existing volume will record but ignore these fields. capacity_min = "10GiB" capacity_max = "20G" # Required (at least one): for 'nomad volume create', specify one or more # capabilities to validate. Registering an existing volume will record but # ignore these fields. capability { access_mode = "single-node-writer" attachment_mode = "file-system" } capability { access_mode = "single-node-reader" attachment_mode = "block-device" } # Optional: for 'nomad volume create', specify mount options to validate for # 'attachment_mode = "file-system". Registering an existing volume will record # but ignore these fields. mount_options { fs_type = "ext4" mount_flags = ["ro"] } # Optional: specify one or more locations where the volume must be accessible # from. Refer to the plugin documentation for what segment values are supported. topology_request { preferred { topology { segments { rack = "R1" } } } required { topology { segments { rack = "R1" } } topology { segments { rack = "R2", zone = "us-east-1a" } } } } # Optional: provide any secrets specified by the plugin. secrets { example_secret = "xyzzy" } # Optional: provide a map of keys to string values expected by the plugin. parameters { skuname = "Premium_LRS" } # Optional: for 'nomad volume register', provide a map of keys to string # values expected by the plugin. This field will populated automatically by # 'nomad volume create'. context { endpoint = "http://192.168.1.101:9425" } ================================================ FILE: deployments/failing_deployment/example.nomad ================================================ job "example" { datacenters = ["dc1"] group "cache" { network { port "db" { to = 6379 } } service { name = "redis-cache" tags = ["global", "cache"] port = "db" check { name = "alive" type = "tcp" interval = "10s" timeout = "2s" } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: docker/auth_from_template/README.md ================================================ # Auth from Template Example This job specification demonstrates using the `template` stanza to create environment variables suitable for Nomad to use in variable interpolation. This example uses Consul KV, since there is less configuration necessary to run the sample; however, this exists to demonstrate that a Vault-based solution (once configured with your cluster) would be trivial to switch to. This job pairs with the docker_registry_v2 job from the applications folder, which has basic authentication enabled. Once you have started it, you will need to pull the redis:latest image from DockerHub and push it into your local repo. ### Add the values for the job to Consul ```shell-session $ consul kv put consul kv put kv/docker/config/user user $ consul kv put consul kv put kv/docker/config/pass securepassword ``` Running the job will start as expected. Stop the job. ### Add the values for the job to Consul ```shell-session $ consul kv put consul kv put kv/docker/config/pass securepasswordLOL ``` Running the job now will fail since the credential is invalid. ================================================ FILE: docker/auth_from_template/auth.nomad ================================================ job "auth" { type = "service" datacenters = ["dc1"] group "docker" { task "redis" { driver = "docker" template { destination = "secrets/secret.env" env = true change_mode = "noop" data = < group "cache" > task "redis" > config` block, add the following: ```hcl hostname = "${attr.unique.hostname}" ``` Set the count on the `group "cache"` to 3. ```hcl group "cache" { count = 3 ... ``` ### Run the job Run the job in your Nomad cluster and wait for the instances to become healthy. You will be returned to a shell prompt. ```shell nomad job run example.nomad ``` ### Validate the allocations' hostnames Once you have been returned to a shell prompt, running `view.sh` shows output like the following. The Allocation IDs, Node Names, and Host Names will vary from the output here, but you should be able to note that the Docker host name matches the Nomad Client's Node Name. ```shell $ ./view.sh Allocation ID Node Name (Nomad) Host Name (Docker) 0053d552-f461-519e-2b26-13f5e8b67524 nomad-client-3.node.consul nomad-client-3.node.consul 5767a2a6-38a4-2330-d692-9badc5840edb nomad-client-1.node.consul nomad-client-1.node.consul 59dc75cd-5acf-e21d-7d5f-befed3dfa336 nomad-client-1.node.consul nomad-client-1.node.consul ``` ================================================ FILE: docker/docker_dynamic_hostname/finished.nomad ================================================ job "example" { datacenters = ["dc1"] group "cache" { count = 3 network { port "db" { to = 6379 } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] auth_soft_fail = true hostname = "${attr.unique.hostname}" } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: docker/docker_dynamic_hostname/res_file ================================================ Allocation ID\tNode Name (Nomad)\tHostname (Docker) nomad-client-3.node.consul\tnomad-client-3.node.consul\t nomad-client-1.node.consul\tnomad-client-1.node.consul\t nomad-client-1.node.consul\tnomad-client-1.node.consul\t nomad-client-3.node.consul\tnomad-client-3.node.consul\t ================================================ FILE: docker/docker_dynamic_hostname/view.sh ================================================ #!/usr/bin/env bash function getJobAllocIds { nomad alloc status -t '{{range $A := . }}{{if eq "example" .JobID}}{{printf "%s%s%s\n" .ID "|" .NodeName }}{{end}}{{end}}' } res_file=$(mktemp) printf "Allocation ID\tNode Name (Nomad)\tHostname (Docker)\n" > "$res_file" for ALLOC_INFO in $(getJobAllocIds example) do NODENAME=${ALLOC_INFO##*|} ALLOC_ID=${ALLOC_INFO%%|*} DOCKERNAME=$(nomad alloc exec ${ALLOC_ID} cat /etc/hostname) printf "%s\t%s\t%s\n" $ALLOC_ID $NODENAME $DOCKERNAME >> "$res_file" done column -t -s"$(printf "\t")" $res_file rm -rf "$res_file" ================================================ FILE: docker/docker_entrypoint/Dockerfile ================================================ FROM alpine ENTRYPOINT ["ping"] CMD ["www.google.com"] ================================================ FILE: docker/docker_entrypoint/example.nomad ================================================ job "example" { datacenters = ["dc1"] update { max_parallel = 1 min_healthy_time = "10s" healthy_deadline = "3m" auto_revert = false canary = 0 } migrate { max_parallel = 1 health_check = "checks" min_healthy_time = "10s" healthy_deadline = "5m" } group "cache" { ephemeral_disk { size = 300 } network { port "db" { to = 6379 } } service { name = "redis-cache" tags = ["global", "cache"] port = "db" check { name = "alive" type = "tcp" interval = "10s" timeout = "2s" } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] auth_soft_fail = true } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: docker/docker_image_not_found/README.md ================================================ # Docker Image Not Found This folder containse examples that demonstrate what happens when a requested Docker image can not be found. * **restart.nomad** - contains a restart stanza that will cause this to restart infinitely on the same client * **reschedule.nomad** - will utilize the defaults and reschedule onto other nodes in nomad 0.8+ ================================================ FILE: docker/docker_image_not_found/reschedule.nomad ================================================ job "example" { datacenters = ["dc1"] group "group" { task "broken" { driver = "docker" config { image = "this_is_not_an_image:latest" } } } } ================================================ FILE: docker/docker_image_not_found/restart.nomad ================================================ job "restart" { datacenters = ["dc1"] meta { "serial_num" = "2" } group "group" { restart { attempts = 2 delay = "30s" interval = "1m" mode = "delay" } task "broken" { driver = "docker" config { image = "this_is_not_an_image:latest" } } } } ================================================ FILE: docker/docker_interpolated_image_name/README.md ================================================ # Using interpolated Docker image versions Prerequisites: - Nomad - Docker - Consul Rough Notes: - The docker image path is interpolated - The Nomad `template` block can be used to create environment variables and has access to Consul values - You can use the `keyOrDefault` template function to fetch a value from Consul KV - You can set and update the value using the `consul kv put` command. - Depending on template `change_mode`, this might restart the job. - Image caching is at play, so immutable tags help this scenario ```shell-session consul kv put service/redis/version 3.2 ``` ================================================ FILE: docker/docker_interpolated_image_name/example.nomad ================================================ job "example" { datacenters = ["dc1"] group "cache" { network { port "db" { to = 6379 } } service { tags = ["redis", "cache"] port = "db" check { name = "alive" type = "tcp" interval = "10s" timeout = "2s" } } task "redis" { template { data = < 2022-11-22T17:57:01-05:00: Monitoring evaluation "86382659" 2022-11-22T17:57:01-05:00: Evaluation triggered by job "example" 2022-11-22T17:57:02-05:00: Allocation "4691273a" created: node "d18649d1", group "g1" 2022-11-22T17:57:02-05:00: Evaluation status changed: "pending" -> "complete" ==> 2022-11-22T17:57:02-05:00: Evaluation "86382659" finished with status "complete" ``` Note from the output that the created allocation's ID starts with 469. Your allocation ID will vary. Use that with the `nomad alloc logs` command to get the output from the latest run. ```text $ nomad alloc logs 469 VAR1=foo VAR2=bar ``` The `test.nomad` file shows overriding the command with an alternative command inside the container and passing environment variables that are set in the ENTRYPOINT. The job sets both values to `$VAR2` to show that they are still being read from the environment. ```text $ nomad job run test.nomad ==> 2022-11-22T17:57:19-05:00: Monitoring evaluation "c0a0a83f" 2022-11-22T17:57:19-05:00: Evaluation triggered by job "example" 2022-11-22T17:57:20-05:00: Allocation "63800968" created: node "d18649d1", group "g1" 2022-11-22T17:57:20-05:00: Evaluation status changed: "pending" -> "complete" ==> 2022-11-22T17:57:20-05:00: Evaluation "c0a0a83f" finished with status "complete" ``` Note from the output that the created allocation's ID starts with 638. Your allocation ID will vary. Use that with the `nomad alloc logs` command to get the output from the latest run. ```text $ nomad alloc logs 638 It's the alternate version! 🎉 VAR1=bar VAR2=bar ``` ================================================ FILE: docker/env_var_args/cmd.sh ================================================ #!/bin/sh # This is the original workload for the container # it's going to echo out the values set in the # entrypoint echo VAR1=$1 echo VAR2=$2 ================================================ FILE: docker/env_var_args/cmd_alt.sh ================================================ #!/bin/sh # This is the original workload for the container # it's going to echo out the values set in the # entrypoint echo "It's the alternate version! 🎉" echo VAR1=$1 echo VAR2=$2 ================================================ FILE: docker/env_var_args/entrypoint.sh ================================================ #!/bin/sh # The entrypoint is used to set some values that the # command will use export VAR1="foo" export VAR2="bar" eval $@ ================================================ FILE: docker/env_var_args/start.nomad ================================================ job "example" { datacenters = ["dc1"] type = "batch" meta { run_uuid = "${uuidv4()}" } group "g1" { task "docker" { driver = "docker" config { image = "registry.service.consul:5000/envfun:latest" } } } } ================================================ FILE: docker/env_var_args/test.nomad ================================================ job "example" { datacenters = ["dc1"] type = "batch" meta { run_uuid = "${uuidv4()}" } group "g1" { task "docker" { driver = "docker" config { image = "registry.service.consul:5000/envfun:latest" command = "/scripts/cmd_alt.sh" args = ["$VAR2", "$VAR2"] } } } } ================================================ FILE: docker/get_fact_from_consul/README.md ================================================ ## get_fact_from_consul These demonstration jobs use Consul templates to fetch values for substitution in Docker jobs. These values can be used as interpolated values at workload runtime and are seen as concrete values in `docker inspect`. However, they are also available to the workload itself. - **image.nomad** - uses an enviroment variable that is made concrete during container startup. However, they are available to the workload as well. - **args.nomad** - uses the `template` stanza to build environment variables and provides them to the job via the `args` list. These are handled by the starting workload. ## image.nomad requires a consul key named `test/redis/docker-tag` ```shell-session $ consul kv put test/redis/docker-tag "4.0" ``` - Run the job. Find the client node that it's running on. SSH there. - Run `docker ps` to find the workload; note that it's running the version from the label. ## args.nomad requires a consul key named `test/echo/content` ```shell-session $ consul kv put test/echo/content "hello world!" ``` - Run the job. Find the client node that it's running on. SSH there. - Run `docker ps` to find the workload - Run `docker inspect` on the running container. - Look for `"Cmd"` and note that the environment variables have been expanded to their concrete values. ================================================ FILE: docker/get_fact_from_consul/args.nomad ================================================ job "args.nomad" { datacenters = ["dc1"] group "g1" { network { port "http" {} } task "echo" { template { destination = "secrets/local.env" env = true data =<

Welcome to my application.


You are on ${NOMAD_IP_http} and will be redirected to your profile.", ] } artifact { source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" options { checksum = "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" } } resources { memory = 10 } } } } ================================================ FILE: echo_stack/profile-service.nomad ================================================ job "profile-service" { datacenters = ["dc1"] group "application" { count = 3 network { port "http" {} } service { name = "profile-service" tags = ["urlprefix-/profile"] port = "http" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } task "server" { driver = "exec" config { command = "http-echo" args = [ "-listen", ":${NOMAD_PORT_http}", "-text", "

User Profile


This might be a profile page in a while
You are on instance ${NOMAD_ALLOC_INDEX} on ${NOMAD_IP_http}.", ] } artifact { source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" options { checksum = "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" } } resources { memory = 10 } } } } ================================================ FILE: env/escaped_env_vars/Dockerfile ================================================ FROM busybox COPY ./entrypoint.sh /bin/entrypoint.sh RUN chmod +x /bin/entrypoint.sh ENTRYPOINT ["entrypoint.sh"] ================================================ FILE: env/escaped_env_vars/README.md ================================================ # Escaped Environment Variables Suppose you have a Docker job that sets environment variables in the entrypoint and you would like to refer to them as arguments in the subsequent command's arguments. This sample will use an exec job to demonstrate how this would be accomplished in a Nomad job ================================================ FILE: env/escaped_env_vars/entrypoint.sh ================================================ #!/bin/sh export entryVar="Entrypoint Variable" echo "${1}" ================================================ FILE: env/escaped_env_vars/example.nomad ================================================ variable "dcs" { type = list(string) description = "Datecenters to run job in." default = ["dc1"] } job "example" { datacenters = var.dcs type = "batch" group "group" { task "escaped" { driver = "exec" config { command = "run.sh" args = [ "\\$$var1" ] } env = { var1 = "Some value" } template { destination = "run.sh" data = <Fabio is an HTTP and TCP reverse proxy that configures itself with data from Consul. > >Traditional load balancers and reverse proxies need to be configured with a config file. The configuration contains the hostnames and paths the proxy is forwarding to upstream services. This process can be automated with tools like consul-template that generate config files and trigger a reload. > >Fabio works differently since it updates its routing table directly from the data stored in Consul as soon as there is a change and without restart or reloading. More information about Fabio can be found at the project's website: ## The job specifications - `fabio-docker.nomad` - A Nomad system job that uses the Docker task driver to run the `latest` tag of the container. This configuration simplifies locating a fabio instance from an external loadbalancer like an ELB. Simplest way to get started with Fabio. - `fabio-system.nomad` - A Nomad system job that uses the exec task driver to run instances of the Fabio 1.5.15 linux/amd64 binary on all the linux/amd64 clients in your cluster. This configuration simplifies locating a fabio instance from an external loadbalancer like an ELB. - `fabio-service.nomad` - A Nomad service job that uses the exec task driver to run three instances of the Fabio 1.5.15 linux/amd64 binary. This configuration requires a load balancer capable of inspecting Consul or testing the Fabio ports over all of the clients to identify where the Fabio instances landed. ================================================ FILE: fabio/fabio-docker.nomad ================================================ job "fabio" { datacenters = ["dc1"] type = "system" update { stagger = "5s" max_parallel = 1 } group "linux-amd64" { network { port "http" { static = 9999 } port "ui" { static = 9998 } } service { tags = ["fabio", "lb"] port = "ui" check { name = "fabio ui port is alive" type = "tcp" interval = "10s" timeout = "2s" } check { name = "fabio health check" type = "http" path = "/health" interval = "10s" timeout = "2s" } } task "fabio" { constraint { attribute = "${attr.cpu.arch}" operator = "=" value = "amd64" } constraint { attribute = "${attr.kernel.name}" operator = "=" value = "linux" } env { ## Add if your consul agent is not listening on 127.0.0.1:8500 # registry_consul_addr = "${attr.unique.network.ip-address}:8500" ## Add if your Consul cluster is ACL-enabled. # registry_consul_token = "«add if you have a consul enabled cluster»" } driver = "docker" config { image = "fabiolb/fabio:latest" network_mode = "host" ports = ["proxy","ui"] } resources { cpu = 200 memory = 150 } } } } ================================================ FILE: fabio/fabio-service.nomad ================================================ job "fabio" { datacenters = ["dc1"] type = "service" update { stagger = "5s" max_parallel = 1 } group "linux-amd64" { count = 3 network { port "http" { static = 9999 } port "ui" { static = 9998 } } service { tags = ["fabio", "lb"] port = "ui" check { name = "fabio ui port is alive" type = "tcp" interval = "10s" timeout = "2s" } check { name = "fabio health check" type = "http" path = "/health" interval = "10s" timeout = "2s" } } task "fabio" { constraint { attribute = "${attr.cpu.arch}" operator = "=" value = "amd64" } constraint { attribute = "${attr.kernel.name}" operator = "=" value = "linux" } env { registry_consul_addr = "${attr.unique.network.ip-address}:8500" # registry_consul_token = "«add if you have a consul enabled cluster»" } driver = "exec" config { command = "fabio-1.5.15-go1.15.5-linux_amd64" } artifact { source = "https://github.com/fabiolb/fabio/releases/download/v1.5.15/fabio-1.5.15-go1.15.5-linux_amd64" options { checksum = "sha256:14c7a02ca95fb00a4f3010eab4e3c0e354a3f4953d2a793cb800332012f42066" } } resources { cpu = 200 memory = 150 } } } } ================================================ FILE: fabio/fabio-system.nomad ================================================ job "fabio" { datacenters = ["dc1"] type = "system" update { stagger = "5s" max_parallel = 1 } group "linux-amd64" { network { port "http" { static = 9999 } port "ui" { static = 9998 } } service { tags = ["fabio", "lb"] port = "ui" check { name = "fabio ui port is alive" type = "tcp" interval = "10s" timeout = "2s" } check { name = "fabio health check" type = "http" path = "/health" interval = "10s" timeout = "2s" } } task "fabio" { constraint { attribute = "${attr.cpu.arch}" operator = "=" value = "amd64" } constraint { attribute = "${attr.kernel.name}" operator = "=" value = "linux" } env { registry_consul_addr = "${attr.unique.network.ip-address}:8500" # registry_consul_token = "«add if you have a consul enabled cluster»" } driver = "exec" config { command = "fabio-1.5.15-go1.15.5-linux_amd64" } artifact { source = "https://github.com/fabiolb/fabio/releases/download/v1.5.15/fabio-1.5.15-go1.15.5-linux_amd64" options { checksum = "sha256:14c7a02ca95fb00a4f3010eab4e3c0e354a3f4953d2a793cb800332012f42066" } } resources { cpu = 200 memory = 150 } } } } ================================================ FILE: fabio-ssl/fabio-ssl.nomad ================================================ job "fabio-stg" { datacenters = ["dc1"] type = "system" group "fabio" { network { port "http" { static = 80 } port "https" { static = 443 } port "ui" { static = 9998 } port "lb" { static = 9999 } } service { name = "fabio-lb" tags = ["fabio"] port = "http" check { type = "tcp" port = "http" path = "/" interval = "10s" timeout = "2s" } } service { name = "fabio-lb-tls" tags = ["fabio"] port = "https" check { type = "tcp" port = "https" path = "/" interval = "10s" timeout = "2s" } } service { name = "fabio-ui" tags = ["fabio"] port = "ui" check { type = "tcp" port = "ui" path = "/" interval = "10s" timeout = "2s" } } task "fabio" { driver = "docker" config { image = "fabiolb/fabio" volumes = ["/etc/fabio:/etc/fabio"] ports = ["http", "https", "ui", "lb"] } resources { cpu = 1000 memory = 70 } } } } ================================================ FILE: failing_jobs/README.md ================================================ Failing Jobs This directory contains jobs that will fail by design. They are useful for viewing the log events and behaviors when these are encountered in the wild ================================================ FILE: failing_jobs/failing_sidecar/README.md ================================================ Failing Sidecar This task is designed to demonstrate the behavior of a TaskGroup when a Task within it fails to start. ================================================ FILE: failing_jobs/failing_sidecar/example.nomad ================================================ job "example" { datacenters = ["dc1"] group "cache" { task "redis" { driver = "docker" config { image = "redis:7" port_map { db = 6379 } } resources { network { port "db" {} } } service { name = "redis" tags = ["cache"] port = "db" check { name = "alive" type = "tcp" interval = "10s" timeout = "2s" } } } task "faily-mcfailface" { driver = "exec" config { command = "/bin/bash" args = ["-c", "echo \"I don't feel so good....\"; sleep 5; echo \"see... I told you I was sick...\"; exit 1"] } } } } ================================================ FILE: failing_jobs/impossible_constratint/README.md ================================================ # Impossible Constraint This job demonstrates what happens when someone sets multiple node name constraints within a Nomad group (the smallest single placement) ================================================ FILE: failing_jobs/impossible_constratint/example.nomad ================================================ job "example" { datacenters = ["dc1"] type = "service" group "cache" { count = 1 task "redis1" { constraint { attribute = "${attr.unique.hostname}" value = "nomad-client-1.example.com" } driver = "docker" config { image = "redis:7" } } task "redis2" { constraint { attribute = "${attr.unique.hostname}" value = "nomad-client-2.example.com" } driver = "docker" config { image = "redis:7" } } } } ================================================ FILE: giant/example.nomad ================================================ job "giant" { datacenters = ["dc1"] group "mysql" { volume "mysql" { type="host" source = "mysql" } ephemeral_disk { migrate = false size = "2000" sticky = true } task "ls" { driver = "exec" volume_mount { volume="mysql" destination="/var/lib/mysql" } config { command="bash" args=["-c", "while true; do ls /var/lib/mysql; sleep 60; done"] } resources { cpu=100 memory=128 } } } } ================================================ FILE: guide/TUTORIAL_TEMPLATE.mdx ================================================ --- name: products_used: <products used in this tutorial; list primary product first and do not capitalize> - vault - terraform - consul - nomad description: |- Short description about what the reader will do/learn. Limit 250 characters; include keyword for SEO. redirects: <former URL(s) to be redirected, e.g., tutorials/terraform/intro-gcp> default_collection_context: <slug of primary collection, e.g., consul/datacenter-deploy> katacoda_scenario_id: <katacoda_scenario_id> <-- if there's no scenario to embed, remove this entry video_id: <video_id> <-- if there's no video, remove this entry video_host: `youtube` or `wistia` <-- if there's no video, remove this entry --- Introduction goes here... (e.g. what you'll learn in this tutorial) ## Challenge > **OPTIONAL:** If this is covered in the introduction, you don't need to have this > explicit header. State the common business challenge. Often times, you can get this information in the **Background** section of the RFC written by the PM. If not, reach out to the PM and ask for an example (customer story). ## Solution > **OPTIONAL:** If this is covered in the introduction, you don't need to have this > explicit header. How the product solves this challenge. This is where you explain why you should read this tutorial. ## Personas _If applicable_ > **OPTIONAL:** If this is covered in the introduction, you don't need to have this > explicit section. If the guided steps involve multiple roles, describe it here. Example: The end-to-end scenario described in this tutorial involves two personas: - `admin` with privileged permissions to write secrets - `apps` reads the secrets from Vault (client) ## Prerequisites Example: - Vault **version 1.2.0** or later - [Kubernetes comand-line interface (CLI)](https://kubernetes.io/docs/tasks/tools/install-kubectl/) - [Minikube](https://minikube.sigs.k8s.io) > If there is a corresponding Katacoda scenario, be sure to add [`<KatacodaToggleButton />`](https://github.com/hashicorp/learn/blob/master/components/katacoda-embed/README.md) to show the "Show Terminal" button. **NOTE:** An interactive tutorial is also available if you do not have an environment to perform the steps described in this tutorial. Click the **Show Terminal** button to start. <KatacodaToggleButton /> ## Action title 1 Short description of this step. If applicable, demonstrate the steps using CLI, API, and/or UI. Leverage the [tabs component](https://github.com/hashicorp/learn/blob/master/components/tabs/README.md) to organize the content. <Tabs> <Tab heading="CLI command"> Step by step instruction here. </Tab> <Tab heading="API call using cURL"> Step by step instruction here. </Tab> <Tab heading="Web UI"> Step by step instruction here. </Tab> </Tabs> ## Action title 2 Short description of this step. If applicable, demonstrate the steps using CLI, API, and/or UI. Leverage the [tabs component](https://github.com/hashicorp/learn/blob/master/components/tabs/README.md) to organize the content. <Tabs> <Tab heading="CLI command"> Step by step instruction here. </Tab> <Tab heading="API call using cURL"> Step by step instruction here. </Tab> <Tab heading="Web UI"> Step by step instruction here. </Tab> </Tabs> ## Action title 3 Short description of this step. If applicable, demonstrate the steps using CLI, API, and/or UI. Leverage the [tabs component](https://github.com/hashicorp/learn/blob/master/components/tabs/README.md) to organize the content. <Tabs> <Tab heading="CLI command"> Step by step instruction here. </Tab> <Tab heading="API call using cURL"> Step by step instruction here. </Tab> <Tab heading="Web UI"> Step by step instruction here. </Tab> </Tabs> ... ## Additional discussion _Optional_ Often times, support or TAMs ask you to add extra discussion to explain little more about cloud provider specific pitfalls, etc. You can add them here if it does not fit into anywhere else. ## Next steps In this section, start with a brief **_summary_** of what you have learned in this tutorial re-emphasizing the business value. Then provide some guidance on the next steps to extend the user's knowledge. Briefly describe what the user will do in the next tutorial if the current collection is sequential. Add cross-referencing links to get more information about the feature (e.g. product doc page, webinar links, blog post, etc.). ================================================ FILE: host_volume/README.md ================================================ ## Host Volume Examples These sample job files will exercise a simple host volume configuration. They assume that the following volumes are configured somewhere in your cluster: ```hcl host_volume "certs" { path = "/data/certs" read_only = "true" } host_volume "mysql" { path = "/data/mysql" read_only = "false" } host_volume "prometheus" { path = "/data/prometheus" read_only = "false" } host_volume "templates" { path = "/data/templates" read_only = "true" } ``` ================================================ FILE: host_volume/mariadb/mariadb.nomad ================================================ job "mariadb" { datacenters = ["dc1"] group "database" { volume "mysql" { type="host" source = "mysql" } task "maria" { driver = "docker" volume_mount { volume="mysql" destination="/var/lib/mysql" } env { "MYSQL_ROOT_PASSWORD" ="mypass" } config { image = "mariadb/server:10.3" port_map { db=3306 } } resources { cpu=500 memory=256 network { port "db" {} } } service { name = "mariadb" tags = ["persist"] port = "db" check { name="alive" type="tcp" interval="10s" timeout="2s" } } } } } ================================================ FILE: host_volume/prometheus/README.md ================================================ # Prometheus On the client, you will need a rule to allow the docker containers to talk to the local consul agents. ``` firewall-cmd --permanent --zone=public --add-rich-rule='rule family=ipv4 source address=172.17.0.0/16 accept' && firewall-cmd --reload ``` ## Connecting to the instances ================================================ FILE: host_volume/prometheus/grafana/README.md ================================================ Thanks to [Nextty](https://grafana.com/orgs/derekamz) for two great grafana dashboards to start with: * Nomad Jobs - https://grafana.com/dashboards/6281 * Nomad Cluster - ================================================ FILE: host_volume/prometheus/grafana/nomad_jobs.json ================================================ { "__inputs": [ { "name": "DS_PROMETHEUS", "label": "prometheus", "description": "", "type": "datasource", "pluginId": "prometheus", "pluginName": "Prometheus" } ], "__requires": [ { "type": "grafana", "id": "grafana", "name": "Grafana", "version": "5.1.2" }, { "type": "panel", "id": "graph", "name": "Graph", "version": "5.0.0" }, { "type": "datasource", "id": "prometheus", "name": "Prometheus", "version": "5.0.0" } ], "annotations": { "list": [ { "builtIn": 1, "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "type": "dashboard" } ] }, "editable": true, "gnetId": 6281, "graphTooltip": 0, "id": null, "iteration": 1527401878265, "links": [], "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_PROMETHEUS}", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 0, "y": 0 }, "id": 2, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "repeat": "host", "repeatDirection": "v", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "avg(nomad_client_allocs_cpu_total_percent{host=~\"$host\"}) by(exported_job, task)", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "{{task}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeShift": null, "title": "CPU Usage Percent - $host", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": 3, "format": "percentunit", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_PROMETHEUS}", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 12, "y": 0 }, "id": 3, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "repeat": "host", "repeatDirection": "v", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "avg(nomad_client_allocs_cpu_total_ticks{host=~\"$host\"}) by(exported_job, task)", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "{{task}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeShift": null, "title": "CPU Total Ticks - $host", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": 3, "format": "timeticks", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_PROMETHEUS}", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 0, "y": 6 }, "id": 6, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "repeat": "host", "repeatDirection": "v", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "avg(nomad_client_allocs_memory_rss{host=~\"$host\"}) by(exported_job, task)", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "{{task}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeShift": null, "title": "RSS - $host", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": 3, "format": "decbytes", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "${DS_PROMETHEUS}", "fill": 1, "gridPos": { "h": 6, "w": 12, "x": 12, "y": 6 }, "id": 7, "legend": { "avg": false, "current": false, "max": false, "min": false, "show": true, "total": false, "values": false }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null", "percentage": false, "pointradius": 5, "points": false, "renderer": "flot", "repeat": "host", "repeatDirection": "v", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "avg(nomad_client_allocs_memory_cache{host=~\"$host\"}) by(exported_job, task)", "format": "time_series", "interval": "", "intervalFactor": 1, "legendFormat": "{{task}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeShift": null, "title": "Memory Cache - $host", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "decimals": 3, "format": "decbytes", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "schemaVersion": 16, "style": "dark", "tags": [], "templating": { "list": [ { "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", "hide": 0, "includeAll": false, "label": "DC", "multi": false, "name": "datacenter", "options": [], "query": "label_values(nomad_client_uptime, datacenter)", "refresh": 1, "regex": "", "sort": 0, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false }, { "allValue": null, "current": {}, "datasource": "${DS_PROMETHEUS}", "hide": 0, "includeAll": true, "label": "Host", "multi": true, "name": "host", "options": [], "query": "label_values(nomad_client_uptime{datacenter=~\"$datacenter\"}, host)", "refresh": 2, "regex": "", "sort": 0, "tagValuesQuery": "", "tags": [], "tagsQuery": "", "type": "query", "useTags": false } ] }, "time": { "from": "now-6h", "to": "now" }, "timepicker": { "refresh_intervals": [ "5s", "10s", "30s", "1m", "5m", "15m", "30m", "1h", "2h", "1d" ], "time_options": [ "5m", "15m", "1h", "6h", "12h", "24h", "2d", "7d", "30d" ] }, "timezone": "", "title": "Nomad Jobs", "uid": "TvqbbhViz", "version": 12, "description": "Nomad Jobs metrics" } ================================================ FILE: host_volume/prometheus/prometheus.nomad ================================================ job "prometheus" { datacenters = ["dc1"] type = "service" update { max_parallel = 1 min_healthy_time = "10s" healthy_deadline = "3m" auto_revert = false canary = 0 } group "monitoring" { volume "prometheus" { type="host" config { source="prometheus" } } count = 1 restart { attempts = 10 interval = "5m" delay = "25s" mode = "delay" } ephemeral_disk { size = 1000 } task "grafana" { volume_mount { volume="prometheus" destination="/mnt/prometheus" } artifact { source="https://gist.githubusercontent.com/angrycub/046cee11bd3d8c4ab9a3819646c9660c/raw/c699095c2cb25b896e2c709da588b668ce82f8b5/prometheus_nomad.json" destination="local/provisioning/dashboards/dashs" } template { change_mode="noop" destination="local/provisioning/dashboards/file_provider.yml" data = <<EOH apiVersion: 1 providers: - name: 'default' orgId: 1 folder: '' type: file disableDeletion: false updateIntervalSeconds: 10 #how often Grafana will scan for changed dashboards options: path: {{ env "NOMAD_TASK_DIR" }}/provisioning/dashboards/dashs EOH } template { change_mode="noop" destination="local/provisioning/datasources/prometheus_datasource.yml" data = <<EOH apiVersion: 1 datasources: - name: Prometheus type: prometheus access: proxy url: http://{{ env "NOMAD_ADDR_prometheus_prometheus_ui" }} EOH } env { "GF_SERVER_ROOT_URL"="http://127.0.0.1:9999/grafana/" "GF_PATHS_PROVISIONING"="/${NOMAD_TASK_DIR}/provisioning" } driver = "docker" config { image = "grafana/grafana:6.1.4" port_map { grafana_ui = 3000 } } resources { network { port "grafana_ui" {} } } service { name = "grafana-ui" port = "grafana_ui" tags = ["urlprefix-/grafana strip=/grafana"] check { name = "grafana-ui port alive" type = "tcp" interval = "10s" timeout = "2s" } } } task "prometheus" { volume_mount { volume="prometheus" destination="/prometheus/data" } template { change_mode = "noop" destination="local/prometheus.yml" data = <<EOH --- global: scrape_interval: 15s scrape_configs: - job_name: 'prometheus' scrape_interval: 5s static_configs: - targets: ['localhost:9090'] - job_name: 'nomad' scrape_interval: 10s metrics_path: /v1/metrics params: format: ['prometheus'] consul_sd_configs: - server: '{{ env "NOMAD_IP_prometheus_ui" }}:8500' token: "3ef34421-1b20-e543-65d4-54067560d377" services: - "nomad" - "nomad-client" relabel_configs: - source_labels: ['__meta_consul_tags'] regex: .*,http,.* action: keep EOH } driver = "docker" config { image = "prom/prometheus:v2.9.1" args = [ "--web.external-url=http://127.0.0.1:9999/prometheus", "--web.route-prefix=/", "--config.file=/local/prometheus.yml" ] port_map { prometheus_ui = 9090 } } resources { cpu = 500 memory = 256 network { port "prometheus_ui" {} } } service { name = "prometheus-ui" #tags = ["urlprefix-/prometheus"] tags = ["urlprefix-/prometheus strip=/prometheus"] port = "prometheus_ui" check { name = "prometheus_ui port alive" type = "tcp" interval = "10s" timeout = "2s" } } } } } ================================================ FILE: host_volume/read_only/read_only.nomad ================================================ job "example" { datacenters = ["dc1"] group "database" { volume "mysql" { type="host" config { source="mysql" } } volume "certs" { type="host" read_only=true config { source="certs" } } task "maria" { driver = "docker" volume_mount { volume="mysql" destination="/var/lib/mysql" } volume_mount { volume="certs" destination="/certs" } env { "MYSQL_ROOT_PASSWORD" ="mypass" } config { image = "mariadb/server:10.3" port_map { db=3306 } } resources { cpu=500 memory=256 network { port "db" {} } } service { name = "mariadb" tags = ["persist"] port = "db" check { name="alive" type="tcp" interval="10s" timeout="2s" } } } } } ================================================ FILE: http_echo/arm-service.nomad ================================================ job "bar-service" { datacenters = ["dc1"] group "example" { network { port "http" {} } service { name = "bar-service" tags = ["urlprefix-/bar"] port = "http" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } task "server" { driver = "exec" config { command = "http-echo" args = [ "-listen", ":${NOMAD_PORT_http}", "-text", "<html><body><h1>Welcome to the Bar Service.</h1><hr />You are on ${NOMAD_IP_http}.</body></html>", ] } artifact { source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" options { checksum = "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" } } } } } ================================================ FILE: http_echo/bar-service.nomad ================================================ job "bar-service" { datacenters = ["dc1"] group "example" { count = 6 network { port "http" {} } service { name = "bar-service" tags = ["urlprefix-/bar"] port = "http" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } task "server" { driver = "exec" config { command = "http-echo" args = [ "-listen", ":${NOMAD_PORT_http}", "-text", "<html><body><h1>Welcome to the Bar Service.</h1><hr />You are on ${NOMAD_IP_http}.</body></html>", ] } artifact { source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" options { checksum = "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" } } } } } ================================================ FILE: http_echo/car-service-broken-check.nomad ================================================ job "car-service" { datacenters = ["dc1"] update { max_parallel = 1 health_check = "checks" min_healthy_time = "10s" healthy_deadline = "30s" progress_deadline = "2m" auto_revert = false stagger = "30s" } group "example" { count = 3 network { port "http" {} port "supernotreal" {} } service { name = "car-service" tags = ["urlprefix-/car"] port = "supernotreal" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } task "server" { driver = "exec" config { command = "http-echo" args = [ "-listen", ":${NOMAD_PORT_http}", "-text", "<html><body><h1>Welcome to the Car Service.</h1><hr />You are on ${NOMAD_IP_http}.</body></html>", ] } resources { memory = 10 } artifact { source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" options { checksum = "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" } } } } } ================================================ FILE: http_echo/foo-service.deployment.nomad ================================================ job "foo-service" { datacenters = ["dc1"] meta { foo-service = "true" } group "example" { count = 3 meta { "foo"="bar" } update { max_parallel = 1 min_healthy_time = "10s" healthy_deadline = "3m" auto_revert = false canary = 1 } network { port "http" {} } service { name = "foo-service" tags = ["urlprefix-/foo"] port = "http" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } service { name = "foo-service-2" tags = ["urlprefix-/foo2"] port = "http" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } task "server" { driver = "exec" config { command = "http-echo" args = [ "-listen", ":${NOMAD_PORT_http}", "-text", "<html><body><h1>Welcome to the Foo Service.</h1><hr />You are on ${NOMAD_IP_http}.</body></html>", ] } artifact { source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" options { checksum = "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" } } } } } ================================================ FILE: http_echo/foo-service.nomad ================================================ job "foo-service" { datacenters = ["dc1"] meta { foo-service = "true" } group "example" { count = 3 network { port "http" {} } service { name = "foo-service" tags = ["urlprefix-/foo"] port = "http" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } service { name = "foo-service-2" tags = ["urlprefix-/foo2"] port = "http" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } task "server" { driver = "exec" config { command = "http-echo" args = [ "-listen", ":${NOMAD_PORT_http}", "-text", "<html><body><h1>Welcome to the Foo Service.</h1><hr />You are on ${NOMAD_IP_http}.</body></html>", ] } artifact { source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" options { checksum = "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" } } } } } ================================================ FILE: http_echo/foo-test.nomad ================================================ job "foo-service" { datacenters = ["dc1"] meta { foo-service = "true" } group "example" { count = 3 network { port "http" {} } service { name = "foo-service" tags = ["urlprefix-/foo"] port = "http" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } service { name = "foo-service-2" tags = ["urlprefix-/foo2"] port = "http" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } task "server" { driver = "exec" config { command = "usr/sbin/http-echo" args = [ "-listen", ":${NOMAD_PORT_http}", "-text", "<html><body><h1>Welcome to the Foo Service.</h1><hr />You are on ${NOMAD_IP_http}.</body></html>", ] } } } } ================================================ FILE: http_echo/template/echo_template.nomad ================================================ job "http-echo" { datacenters = ["dc1"] update { max_parallel = 1 } group "web" { constraint { distinct_hosts = true } restart { attempts = 10 interval = "5m" delay = "25s" mode = "delay" } network { port "http" { static = 8080 to = 8080 } } service { name = "http-echo" port = "http" check { name = "alive" type = "http" interval = "10s" timeout = "2s" path = "/" } } task "http-echo" { driver = "docker" config { image = "hashicorp/http-echo" args = ["-text", "$content", "-listen",":8080"] ports = ["http"] } template { destination = "local/template.out" env = true data = <<EOH content = " node.unique.id: {{ env "node.unique.id" }} node.datacenter: {{ env "node.datacenter" }} node.unique.name: {{ env "node.unique.name" }} node.class: {{ env "node.class" }} attr.cpu.arch: {{ env "attr.cpu.arch" }} attr.cpu.numcores: {{ env "attr.cpu.numcores" }} attr.cpu.totalcompute: {{ env "attr.cpu.totalcompute" }} attr.consul.datacenter: {{ env "attr.consul.datacenter" }} attr.unique.hostname: {{ env "attr.unique.hostname" }} attr.unique.network.ip-address: {{ env "attr.unique.network.ip-address" }} attr.kernel.name: {{ env "attr.kernel.name" }} attr.kernel.version: {{ env "attr.kernel.version" }} attr.platform.aws.ami-id: {{ env "attr.platform.aws.ami-id" }} attr.platform.aws.instance-type: {{ env "attr.platform.aws.instance-type" }} attr.os.name: {{ env "attr.os.name" }} attr.os.version: {{ env "attr.os.version" }} NOMAD_ALLOC_DIR: {{env "NOMAD_ALLOC_DIR"}} NOMAD_TASK_DIR: {{env "NOMAD_TASK_DIR"}} NOMAD_SECRETS_DIR: {{env "NOMAD_SECRETS_DIR"}} NOMAD_MEMORY_LIMIT: {{env "NOMAD_MEMORY_LIMIT"}} NOMAD_CPU_LIMIT: {{env "NOMAD_CPU_LIMIT"}} NOMAD_ALLOC_ID: {{env "NOMAD_ALLOC_ID"}} NOMAD_ALLOC_NAME: {{env "NOMAD_ALLOC_NAME"}} NOMAD_ALLOC_INDEX: {{env "NOMAD_ALLOC_INDEX"}} NOMAD_TASK_NAME: {{env "NOMAD_TASK_NAME"}} NOMAD_GROUP_NAME: {{env "NOMAD_GROUP_NAME"}} NOMAD_JOB_NAME: {{env "NOMAD_JOB_NAME"}} NOMAD_DC: {{env "NOMAD_DC"}} NOMAD_REGION: {{env "NOMAD_REGION"}} VAULT_TOKEN: {{env "VAULT_TOKEN"}} GOMAXPROCS: {{env "GOMAXPROCS"}} HOME: {{env "HOME"}} LANG: {{env "LANG"}} LOGNAME: {{env "LOGNAME"}} NOMAD_ADDR_export: {{env "NOMAD_ADDR_export"}} NOMAD_ADDR_exstat: {{env "NOMAD_ADDR_exstat"}} NOMAD_ALLOC_DIR: {{env "NOMAD_ALLOC_DIR"}} NOMAD_ALLOC_ID: {{env "NOMAD_ALLOC_ID"}} NOMAD_ALLOC_INDEX: {{env "NOMAD_ALLOC_INDEX"}} NOMAD_ALLOC_NAME: {{env "NOMAD_ALLOC_NAME"}} NOMAD_CPU_LIMIT: {{env "NOMAD_CPU_LIMIT"}} NOMAD_DC: {{env "NOMAD_DC"}} NOMAD_GROUP_NAME: {{env "NOMAD_GROUP_NAME"}} NOMAD_HOST_PORT_export: {{env "NOMAD_HOST_PORT_export"}} NOMAD_HOST_PORT_exstat: {{env "NOMAD_HOST_PORT_exstat"}} NOMAD_IP_export: {{env "NOMAD_IP_export"}} NOMAD_IP_exstat: {{env "NOMAD_IP_exstat"}} NOMAD_JOB_NAME: {{env "NOMAD_JOB_NAME"}} NOMAD_MEMORY_LIMIT: {{env "NOMAD_MEMORY_LIMIT"}} NOMAD_PORT_export: {{env "NOMAD_PORT_export"}} NOMAD_PORT_exstat: {{env "NOMAD_PORT_exstat"}} NOMAD_REGION: {{env "NOMAD_REGION"}} NOMAD_SECRETS_DIR: {{env "NOMAD_SECRETS_DIR"}} NOMAD_TASK_DIR: {{env "NOMAD_TASK_DIR"}} NOMAD_TASK_NAME: {{env "NOMAD_TASK_NAME"}} PATH: {{env "PATH"}} PWD: {{env "PWD"}} SHELL: {{env "SHELL"}} SHLVL: {{env "SHLVL"}} USER: {{env "USER"}} VAULT_TOKEN: {{env "VAULT_TOKEN"}} concat key: service/fabio/{{ env "NOMAD_JOB_NAME" }}/listeners key: {{ keyOrDefault ( printf "service/fabio/%s/listeners" ( env "NOMAD_JOB_NAME" ) ) ":9999" }} {{ define "custom" }}service/fabio/{{env "NOMAD_JOB_NAME" }}/listeners{{ end }} key: {{ keyOrDefault (executeTemplate "custom") ":9999" }} math - alloc_id + 1: {{env "NOMAD_ALLOC_INDEX" | parseInt | add 1}} " EOH } } } } ================================================ FILE: http_echo/template/ets.nomad ================================================ job "http-echo" { datacenters = ["dc1"] type = "service" update { max_parallel = 1 } group "web" { constraint { distinct_hosts = true } network { port "http" { static = 8080 to = 8080 } } restart { attempts = 10 interval = "5m" delay = "25s" mode = "delay" } task "http-echo" { driver = "docker" config { image = "hashicorp/http-echo" args = ["-text", "${content}", "-listen", ":8080"] ports = ["http"] } template { destination = "local/template.out" env = true data = <<EOH content='<table><tr><td>node.unique.id</td><td>{{ env "node.unique.id" }}</td></tr><tr><td>node.datacenter</td><td>{{ env "node.datacenter" }}</td></tr><tr><td>node.unique.name</td><td>{{ env "node.unique.name" }}</td></tr><tr><td>node.class</td><td>{{ env "node.class" }}</td></tr><tr><td>attr.cpu.arch</td><td>{{ env "attr.cpu.arch" }}</td></tr><tr><td>attr.cpu.numcores</td><td>{{ env "attr.cpu.numcores" }}</td></tr><tr><td>attr.cpu.totalcompute</td><td>{{ env "attr.cpu.totalcompute" }}</td></tr><tr><td>attr.consul.datacenter</td><td>{{ env "attr.consul.datacenter" }}</td></tr><tr><td>attr.unique.hostname</td><td>{{ env "attr.unique.hostname" }}</td></tr><tr><td>attr.unique.network.ip-address</td><td>{{ env "attr.unique.network.ip-address" }}</td></tr><tr><td>attr.kernel.name</td><td>{{ env "attr.kernel.name" }}</td></tr><tr><td>attr.kernel.version</td><td>{{ env "attr.kernel.version" }}</td></tr><tr><td>attr.platform.aws.ami-id</td><td>{{ env "attr.platform.aws.ami-id" }}</td></tr><tr><td>attr.platform.aws.instance-type</td><td>{{ env "attr.platform.aws.instance-type" }}</td></tr><tr><td>attr.os.name</td><td>{{ env "attr.os.name" }}</td></tr><tr><td>attr.os.version</td><td>{{ env "attr.os.version" }}</td></tr><tr><td>NOMAD_ALLOC_DIR</td><td>{{env "NOMAD_ALLOC_DIR"}}</td></tr><tr><td>NOMAD_TASK_DIR</td><td>{{env "NOMAD_TASK_DIR"}}</td></tr><tr><td>NOMAD_SECRETS_DIR</td><td>{{env "NOMAD_SECRETS_DIR"}}</td></tr><tr><td>NOMAD_MEMORY_LIMIT</td><td>{{env "NOMAD_MEMORY_LIMIT"}}</td></tr><tr><td>NOMAD_CPU_LIMIT</td><td>{{env "NOMAD_CPU_LIMIT"}}</td></tr><tr><td>NOMAD_ALLOC_ID</td><td>{{env "NOMAD_ALLOC_ID"}}</td></tr><tr><td>NOMAD_ALLOC_NAME</td><td>{{env "NOMAD_ALLOC_NAME"}}</td></tr><tr><td>NOMAD_ALLOC_INDEX</td><td>{{env "NOMAD_ALLOC_INDEX"}}</td></tr><tr><td>NOMAD_TASK_NAME</td><td>{{env "NOMAD_TASK_NAME"}}</td></tr><tr><td>NOMAD_GROUP_NAME</td><td>{{env "NOMAD_GROUP_NAME"}}</td></tr><tr><td>NOMAD_JOB_NAME</td><td>{{env "NOMAD_JOB_NAME"}}</td></tr><tr><td>NOMAD_DC</td><td>{{env "NOMAD_DC"}}</td></tr><tr><td>NOMAD_REGION</td><td>{{env "NOMAD_REGION"}}</td></tr><tr><td>VAULT_TOKEN</td><td>{{env "VAULT_TOKEN"}}</td></tr><tr><td>GOMAXPROCS</td><td>{{env "GOMAXPROCS"}}</td></tr><tr><td>HOME</td><td>{{env "HOME"}}</td></tr><tr><td>LANG</td><td>{{env "LANG"}}</td></tr><tr><td>LOGNAME</td><td>{{env "LOGNAME"}}</td></tr><tr><td>NOMAD_ADDR_export</td><td>{{env "NOMAD_ADDR_export"}}</td></tr><tr><td>NOMAD_ADDR_exstat</td><td>{{env "NOMAD_ADDR_exstat"}}</td></tr><tr><td>NOMAD_ALLOC_DIR</td><td>{{env "NOMAD_ALLOC_DIR"}}</td></tr><tr><td>NOMAD_ALLOC_ID</td><td>{{env "NOMAD_ALLOC_ID"}}</td></tr><tr><td>NOMAD_ALLOC_INDEX</td><td>{{env "NOMAD_ALLOC_INDEX"}}</td></tr><tr><td>NOMAD_ALLOC_NAME</td><td>{{env "NOMAD_ALLOC_NAME"}}</td></tr><tr><td>NOMAD_CPU_LIMIT</td><td>{{env "NOMAD_CPU_LIMIT"}}</td></tr><tr><td>NOMAD_DC</td><td>{{env "NOMAD_DC"}}</td></tr><tr><td>NOMAD_GROUP_NAME</td><td>{{env "NOMAD_GROUP_NAME"}}</td></tr><tr><td>NOMAD_HOST_PORT_export</td><td>{{env "NOMAD_HOST_PORT_export"}}</td></tr><tr><td>NOMAD_HOST_PORT_exstat</td><td>{{env "NOMAD_HOST_PORT_exstat"}}</td></tr><tr><td>NOMAD_IP_export</td><td>{{env "NOMAD_IP_export"}}</td></tr><tr><td>NOMAD_IP_exstat</td><td>{{env "NOMAD_IP_exstat"}}</td></tr><tr><td>NOMAD_JOB_NAME</td><td>{{env "NOMAD_JOB_NAME"}}</td></tr><tr><td>NOMAD_MEMORY_LIMIT</td><td>{{env "NOMAD_MEMORY_LIMIT"}}</td></tr><tr><td>NOMAD_PORT_export</td><td>{{env "NOMAD_PORT_export"}}</td></tr><tr><td>NOMAD_PORT_exstat</td><td>{{env "NOMAD_PORT_exstat"}}</td></tr><tr><td>NOMAD_REGION</td><td>{{env "NOMAD_REGION"}}</td></tr><tr><td>NOMAD_SECRETS_DIR</td><td>{{env "NOMAD_SECRETS_DIR"}}</td></tr><tr><td>NOMAD_TASK_DIR</td><td>{{env "NOMAD_TASK_DIR"}}</td></tr><tr><td>NOMAD_TASK_NAME</td><td>{{env "NOMAD_TASK_NAME"}}</td></tr><tr><td>PATH</td><td>{{env "PATH"}}</td></tr><tr><td>PWD</td><td>{{env "PWD"}}</td></tr><tr><td>SHELL</td><td>{{env "SHELL"}}</td></tr><tr><td>SHLVL</td><td>{{env "SHLVL"}}</td></tr><tr><td>USER</td><td>{{env "USER"}}</td></tr><tr><td>VAULT_TOKEN</td><td>{{env "VAULT_TOKEN"}}</td></tr></table>' EOH } service { name = "http-echo" port = "http" check { name = "alive" type = "http" interval = "10s" timeout = "2s" path = "/" } } } } } ================================================ FILE: http_echo/template/ets2.nomad ================================================ job "http-echo" { datacenters = ["dc1"] update { max_parallel = 1 } group "web" { constraint { distinct_hosts = true } network { port "http" {} } restart { attempts = 10 interval = "5m" delay = "25s" mode = "delay" } task "server" { driver = "exec" config { command = "/bin/bash" args = [ "-c", "local/http-echo -listen :${NOMAD_PORT_http} -text \"`cat local/template.out`\"" ] } artifact { source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" options { checksum = "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" } } template { data = <<EOH <html> <head> <title>Interpolation Demo

Interpolation Demo

                 node.unique.id: {{ env "node.unique.id" }}
                node.datacenter: {{ env "node.datacenter" }}
               node.unique.name: {{ env "node.unique.name" }}
                     node.class: {{ env "node.class" }}
                  attr.cpu.arch: {{ env "attr.cpu.arch" }}
              attr.cpu.numcores: {{ env "attr.cpu.numcores" }}
          attr.cpu.totalcompute: {{ env "attr.cpu.totalcompute" }}
         attr.consul.datacenter: {{ env "attr.consul.datacenter" }}
           attr.unique.hostname: {{ env "attr.unique.hostname" }}
 attr.unique.network.ip-address: {{ env "attr.unique.network.ip-address" }}
               attr.kernel.name: {{ env "attr.kernel.name" }}
            attr.kernel.version: {{ env "attr.kernel.version" }}
       attr.platform.aws.ami-id: {{ env "attr.platform.aws.ami-id" }}
attr.platform.aws.instance-type: {{ env "attr.platform.aws.instance-type" }}
                   attr.os.name: {{ env "attr.os.name" }}
                attr.os.version: {{ env "attr.os.version" }}

                NOMAD_ALLOC_DIR: {{env "NOMAD_ALLOC_DIR"}}
                 NOMAD_TASK_DIR: {{env "NOMAD_TASK_DIR"}}
              NOMAD_SECRETS_DIR: {{env "NOMAD_SECRETS_DIR"}}
             NOMAD_MEMORY_LIMIT: {{env "NOMAD_MEMORY_LIMIT"}}
                NOMAD_CPU_LIMIT: {{env "NOMAD_CPU_LIMIT"}}
                 NOMAD_ALLOC_ID: {{env "NOMAD_ALLOC_ID"}}
               NOMAD_ALLOC_NAME: {{env "NOMAD_ALLOC_NAME"}}
              NOMAD_ALLOC_INDEX: {{env "NOMAD_ALLOC_INDEX"}}
                NOMAD_TASK_NAME: {{env "NOMAD_TASK_NAME"}}
               NOMAD_GROUP_NAME: {{env "NOMAD_GROUP_NAME"}}
                 NOMAD_JOB_NAME: {{env "NOMAD_JOB_NAME"}}
                       NOMAD_DC: {{env "NOMAD_DC"}}
                   NOMAD_REGION: {{env "NOMAD_REGION"}}
                    VAULT_TOKEN: {{env "VAULT_TOKEN"}}

                     GOMAXPROCS: {{env "GOMAXPROCS"}}
                           HOME: {{env "HOME"}}
                           LANG: {{env "LANG"}}
                        LOGNAME: {{env "LOGNAME"}}
              NOMAD_ADDR_export: {{env "NOMAD_ADDR_export"}}
              NOMAD_ADDR_exstat: {{env "NOMAD_ADDR_exstat"}}
                NOMAD_ALLOC_DIR: {{env "NOMAD_ALLOC_DIR"}}
                 NOMAD_ALLOC_ID: {{env "NOMAD_ALLOC_ID"}}
              NOMAD_ALLOC_INDEX: {{env "NOMAD_ALLOC_INDEX"}}
               NOMAD_ALLOC_NAME: {{env "NOMAD_ALLOC_NAME"}}
                NOMAD_CPU_LIMIT: {{env "NOMAD_CPU_LIMIT"}}
                       NOMAD_DC: {{env "NOMAD_DC"}}
               NOMAD_GROUP_NAME: {{env "NOMAD_GROUP_NAME"}}
         NOMAD_HOST_PORT_export: {{env "NOMAD_HOST_PORT_export"}}
         NOMAD_HOST_PORT_exstat: {{env "NOMAD_HOST_PORT_exstat"}}
                NOMAD_IP_export: {{env "NOMAD_IP_export"}}
                NOMAD_IP_exstat: {{env "NOMAD_IP_exstat"}}
                 NOMAD_JOB_NAME: {{env "NOMAD_JOB_NAME"}}
             NOMAD_MEMORY_LIMIT: {{env "NOMAD_MEMORY_LIMIT"}}
              NOMAD_PORT_export: {{env "NOMAD_PORT_export"}}
              NOMAD_PORT_exstat: {{env "NOMAD_PORT_exstat"}}
                   NOMAD_REGION: {{env "NOMAD_REGION"}}
              NOMAD_SECRETS_DIR: {{env "NOMAD_SECRETS_DIR"}}
                 NOMAD_TASK_DIR: {{env "NOMAD_TASK_DIR"}}
                NOMAD_TASK_NAME: {{env "NOMAD_TASK_NAME"}}
                           PATH: {{env "PATH"}}
                            PWD: {{env "PWD"}}
                          SHELL: {{env "SHELL"}}
                          SHLVL: {{env "SHLVL"}}
                           USER: {{env "USER"}}
                    VAULT_TOKEN: {{env "VAULT_TOKEN"}}

   concat key:  service/fabio/{{ env "NOMAD_JOB_NAME" }}/listeners
    key:         {{ keyOrDefault ( printf "service/fabio/%s/listeners" ( env "NOMAD_JOB_NAME" ) ) ":9999" }}

{{ define "custom" }}service/fabio/{{env "NOMAD_JOB_NAME" }}/listeners{{ end }}
    key:         {{ keyOrDefault (executeTemplate "custom") ":9999" }}

   math - alloc_id + 1: {{env "NOMAD_ALLOC_INDEX" | parseInt | add 1}}
EOH destination = "local/template.out" } } } } ================================================ FILE: http_echo/template/ets3.nomad ================================================ job "http-echo" { datacenters = ["dc1"] type = "service" update { max_parallel = 1 } group "web" { restart { attempts = 10 interval = "5m" delay = "25s" mode = "delay" } network { port "http" {} } task "server" { driver = "exec" config { command = "/bin/bash" args = [ "-c", "local/http-echo -listen :${NOMAD_PORT_http} -text \"`env`\"" ] } artifact { source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" options { checksum = "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" } } template { destination = "local/template.out" data = < Interpolation Demo

Interpolation Demo

                 node.unique.id: {{ env "node.unique.id" }}
                node.datacenter: {{ env "node.datacenter" }}
               node.unique.name: {{ env "node.unique.name" }}
                     node.class: {{ env "node.class" }}
                  attr.cpu.arch: {{ env "attr.cpu.arch" }}
              attr.cpu.numcores: {{ env "attr.cpu.numcores" }}
          attr.cpu.totalcompute: {{ env "attr.cpu.totalcompute" }}
         attr.consul.datacenter: {{ env "attr.consul.datacenter" }}
           attr.unique.hostname: {{ env "attr.unique.hostname" }}
 attr.unique.network.ip-address: {{ env "attr.unique.network.ip-address" }}
               attr.kernel.name: {{ env "attr.kernel.name" }}
            attr.kernel.version: {{ env "attr.kernel.version" }}
       attr.platform.aws.ami-id: {{ env "attr.platform.aws.ami-id" }}
attr.platform.aws.instance-type: {{ env "attr.platform.aws.instance-type" }}
                   attr.os.name: {{ env "attr.os.name" }}
                attr.os.version: {{ env "attr.os.version" }}

                NOMAD_ALLOC_DIR: {{env "NOMAD_ALLOC_DIR"}}
                 NOMAD_TASK_DIR: {{env "NOMAD_TASK_DIR"}}
              NOMAD_SECRETS_DIR: {{env "NOMAD_SECRETS_DIR"}}
             NOMAD_MEMORY_LIMIT: {{env "NOMAD_MEMORY_LIMIT"}}
                NOMAD_CPU_LIMIT: {{env "NOMAD_CPU_LIMIT"}}
                 NOMAD_ALLOC_ID: {{env "NOMAD_ALLOC_ID"}}
               NOMAD_ALLOC_NAME: {{env "NOMAD_ALLOC_NAME"}}
              NOMAD_ALLOC_INDEX: {{env "NOMAD_ALLOC_INDEX"}}
                NOMAD_TASK_NAME: {{env "NOMAD_TASK_NAME"}}
               NOMAD_GROUP_NAME: {{env "NOMAD_GROUP_NAME"}}
                 NOMAD_JOB_NAME: {{env "NOMAD_JOB_NAME"}}
                       NOMAD_DC: {{env "NOMAD_DC"}}
                   NOMAD_REGION: {{env "NOMAD_REGION"}}
                    VAULT_TOKEN: {{env "VAULT_TOKEN"}}

                     GOMAXPROCS: {{env "GOMAXPROCS"}}
                           HOME: {{env "HOME"}}
                           LANG: {{env "LANG"}}
                        LOGNAME: {{env "LOGNAME"}}
              NOMAD_ADDR_export: {{env "NOMAD_ADDR_export"}}
              NOMAD_ADDR_exstat: {{env "NOMAD_ADDR_exstat"}}
                NOMAD_ALLOC_DIR: {{env "NOMAD_ALLOC_DIR"}}
                 NOMAD_ALLOC_ID: {{env "NOMAD_ALLOC_ID"}}
              NOMAD_ALLOC_INDEX: {{env "NOMAD_ALLOC_INDEX"}}
               NOMAD_ALLOC_NAME: {{env "NOMAD_ALLOC_NAME"}}
                NOMAD_CPU_LIMIT: {{env "NOMAD_CPU_LIMIT"}}
                       NOMAD_DC: {{env "NOMAD_DC"}}
               NOMAD_GROUP_NAME: {{env "NOMAD_GROUP_NAME"}}
         NOMAD_HOST_PORT_export: {{env "NOMAD_HOST_PORT_export"}}
         NOMAD_HOST_PORT_exstat: {{env "NOMAD_HOST_PORT_exstat"}}
                NOMAD_IP_export: {{env "NOMAD_IP_export"}}
                NOMAD_IP_exstat: {{env "NOMAD_IP_exstat"}}
                 NOMAD_JOB_NAME: {{env "NOMAD_JOB_NAME"}}
             NOMAD_MEMORY_LIMIT: {{env "NOMAD_MEMORY_LIMIT"}}
              NOMAD_PORT_export: {{env "NOMAD_PORT_export"}}
              NOMAD_PORT_exstat: {{env "NOMAD_PORT_exstat"}}
                   NOMAD_REGION: {{env "NOMAD_REGION"}}
              NOMAD_SECRETS_DIR: {{env "NOMAD_SECRETS_DIR"}}
                 NOMAD_TASK_DIR: {{env "NOMAD_TASK_DIR"}}
                NOMAD_TASK_NAME: {{env "NOMAD_TASK_NAME"}}
                           PATH: {{env "PATH"}}
                            PWD: {{env "PWD"}}
                          SHELL: {{env "SHELL"}}
                          SHLVL: {{env "SHLVL"}}
                           USER: {{env "USER"}}
                    VAULT_TOKEN: {{env "VAULT_TOKEN"}}

   concat key:  service/fabio/{{ env "NOMAD_JOB_NAME" }}/listeners
    key:         {{ keyOrDefault ( printf "service/fabio/%s/listeners" ( env "NOMAD_JOB_NAME" ) ) ":9999" }}

{{ define "custom" }}service/fabio/{{env "NOMAD_JOB_NAME" }}/listeners{{ end }}
    key:         {{ keyOrDefault (executeTemplate "custom") ":9999" }}

   math - alloc_id + 1: {{env "NOMAD_ALLOC_INDEX" | parseInt | add 1}}
EOH } } } } ================================================ FILE: httpd_site/README.md ================================================ # httpd site This job will download a website tarball into the allocation and spin up the Apache webserver docker image (2.4-alpine) and mount this container into place. ================================================ FILE: httpd_site/httpd.nomad ================================================ job "httpd_site" { datacenters = ["dc1"] type = "service" update { stagger = "5s" max_parallel = 1 } group "httpd" { count = 1 network { port "http" { to = 80 } } task "httpd-docker" { artifact { source = "https://raw.githubusercontent.com/angrycub/nomad_example_jobs/master/httpd_site/site-content.tgz" destination = "tarball" } driver = "docker" config { image = "httpd:2.4-alpine" volumes = [ "tarball:/usr/local/apache2/htdocs" ] ports = ["http"] } resources { cpu = 200 memory = 32 } } } } ================================================ FILE: httpd_site/make_site.sh ================================================ #!/bin/sh echo "📦 Creating site tarball..." cd site-content && tar -zcvf ../site-content.tgz * && cd .. ================================================ FILE: httpd_site/site-content/about.html ================================================ About the job

About the job

This repository that contains this job can be found on GitHub at angrycub/nomad_example_jobs/httpd_site. The specific site code is in the site-content folder.

Return to Home.

================================================ FILE: httpd_site/site-content/css/style.css ================================================ body { font-family: "Helvetica Neue","Helvetica","Arial", sans-serif; } h1 { color: white; text-shadow: 1px 1px 2px black, 0 0 25px blue, 0 0 5px darkblue; width: auto; border-bottom: 1px solid #333; } code { background: #EEE; border: 1px solid #CCC; border-radius: 5px; padding: 3px; } ================================================ FILE: httpd_site/site-content/index.html ================================================ Welcome to the site

Howdy!

This is an example site to demonstrate fetching a resource as a tarball into a Nomad job and mounting it to a Docker Container.

There's an About page too, for fun. ================================================ FILE: ipv6/SimpleHTTPServer/sample.nomad ================================================ # This job will create a SimpleHTTPServer that is IPV6 enabled. This will allow # a user to browse around in an alloc dir. Not spectacularly useful, but is a # reasonable facsimile of a real workload. job http6 { datacenters = ["dc1"] group "group" { count = 1 task "server" { template { data = < Monitoring evaluation "b2d818af" Evaluation triggered by job "jar-test.nomad" ==> Monitoring evaluation "b2d818af" Evaluation within deployment: "a2ba8e63" Allocation "6027314e" created: node "14ab9290", group "java" Evaluation status changed: "pending" -> "complete" ==> Evaluation "b2d818af" finished with status "complete" ``` ```shell-session $ nomad alloc logs 6027314e Counted 1515 chars. ``` ## Building the source ```shell-session $ javac --source=7 --target=7 -d bin src/Count.java $ jar cf jar/Count.jar -C bin . ``` Upload the jarfile where you like and update the source in the artifact stanza ================================================ FILE: java/jar-test/jar-test.nomad ================================================ job "jar-test.nomad" { datacenters = ["dc1"] type = "batch" group "java" { task "sample" { artifact { source = "https://github.com/angrycub/nomad_example_jobs/raw/main/java/jar-test/jar/Count.jar" destination = "local/artifact/" # mode = "any" # options { # archive = false # } } template { destination = "${NOMAD_TASK_DIR}/textfile.text" data = <= 1) countChars(new FileInputStream(args[0])); else System.err.println("Usage: Count filename"); } } ================================================ FILE: job_examples/base-batch.nomad ================================================ job "example" { datacenters = ["dc1"] // because the sample payload terminates, running it as a // `batch` job allows for that without having to sleep loop type = "batch" group "group" { task "task" { driver = "exec" config { command = "env" } } } } ================================================ FILE: job_examples/meta/README.md ================================================ ## The `meta` Stanza The meta stanza can be used to provide unstructured key-value data to a Nomad job as an automatically-exported environment variable. This variable can be used as provided or can be used for more complex expressions via the Nomad `template` stanza Documentation for the meta stanza can be found [here](https://www.nomadproject.io/docs/job-specification/meta) in the official Nomad documentation. ================================================ FILE: job_examples/meta/meta-batch.nomad ================================================ job "example" { datacenters = ["dc1"] // because the sample payload terminates, running it as a // `batch` job allows for that without having to sleep loop type = "batch" meta { "meta_key_1" = "meta_value_1" } group "group" { task "task" { driver = "exec" config { command = "env" } } } } ================================================ FILE: json-jobs/example.nomad ================================================ job "example" { datacenters = ["dc1"] group "cache" { network { port "db" { to = 6379 } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: json-jobs/job.json ================================================ { "Job": { "Region": null, "Namespace": null, "ID": "example", "Name": "example", "Type": null, "Priority": null, "AllAtOnce": null, "Datacenters": [ "dc1" ], "Constraints": null, "Affinities": null, "TaskGroups": [ { "Name": "cache", "Count": null, "Constraints": null, "Affinities": null, "Tasks": [ { "Name": "redis", "Driver": "docker", "User": "", "Lifecycle": null, "Config": { "image": "redis:7", "ports": [ "db" ] }, "Constraints": null, "Affinities": null, "Env": null, "Services": null, "Resources": { "CPU": 500, "MemoryMB": 256, "DiskMB": null, "Networks": null, "Devices": null, "IOPS": null }, "RestartPolicy": null, "Meta": null, "KillTimeout": null, "LogConfig": null, "Artifacts": null, "Vault": null, "Templates": null, "DispatchPayload": null, "VolumeMounts": null, "Leader": false, "ShutdownDelay": 0, "KillSignal": "", "Kind": "", "ScalingPolicies": null } ], "Spreads": null, "Volumes": null, "RestartPolicy": null, "ReschedulePolicy": null, "EphemeralDisk": null, "Update": null, "Migrate": null, "Networks": [ { "Mode": "", "Device": "", "CIDR": "", "IP": "", "DNS": null, "ReservedPorts": null, "DynamicPorts": [ { "Label": "db", "Value": 0, "To": 6379, "HostNetwork": "" } ], "MBits": null } ], "Meta": null, "Services": null, "ShutdownDelay": null, "StopAfterClientDisconnect": null, "Scaling": null } ], "Update": null, "Multiregion": null, "Spreads": null, "Periodic": null, "ParameterizedJob": null, "Reschedule": null, "Migrate": null, "Meta": null, "ConsulToken": null, "VaultToken": null, "Stop": null, "ParentID": null, "Dispatched": false, "Payload": null, "VaultNamespace": null, "NomadTokenID": null, "Status": null, "StatusDescription": null, "Stable": null, "Version": null, "SubmitTime": null, "CreateIndex": null, "ModifyIndex": null, "JobModifyIndex": null } } ================================================ FILE: load_balancers/traefik/README.md ================================================ ## Load Balancing with Traefik This material is from the HashiCorp [Learn tutorial][] [learn tutorial]: https://learn.hashicorp.com/nomad/load-balancing/traefik ================================================ FILE: load_balancers/traefik/traefik.nomad ================================================ job "traefik" { datacenters = ["dc1"] group "traefik" { network { port "http" { static = 8080 } port "api" { static = 8081 } } service { name = "traefik" check { name = "alive" type = "tcp" port = "http" interval = "10s" timeout = "2s" } } task "traefik" { driver = "docker" config { image = "traefik:v2.2" network_mode = "host" volumes = [ "local/traefik.toml:/etc/traefik/traefik.toml", ] } template { data = < /var/volume/eula.txt; cp local/server.jar /var/volume"] } artifact { source = "https://launcher.mojang.com/v1/objects/bb2b6b1aefcd70dfd1892149ac3a215f6c636b07/server.jar" destination = "local" } lifecycle { hook = "prestart" sidecar = false } } task "minecraft" { driver = "java" config { jar_path = "/var/volume/server.jar" args = ["--nogui"], jvm_options = ["-Xms1024M", "-Xmx2048M"] } resources { cpu = 500 memory = 500 } volume_mount { volume = "minecraft" destination = "/var/volume" } } } } ================================================ FILE: minecraft/minecraft_exec.nomad ================================================ job "minecraft" { datacenters = ["dc1"] type = "service" group "minecraft" { volume "minecraft" { type = "host" source = "minecraft" } task "eula" { driver = "exec" volume_mount { volume = "minecraft" destination = "/var/volume" } config { command = "/bin/sh" args = ["-c", "echo 'eula=true' > /var/volume/eula.txt"] } lifecycle { hook = "prestart" sidecar = false } } task "minecraft" { driver = "exec" config { command = "/bin/sh" args = ["-c", "cd /var/volume && exec java -Xms1024M -Xmx2048M -jar /local/server.jar --nogui; while true; do sleep 5; done"] } artifact { source = "https://launcher.mojang.com/v1/objects/bb2b6b1aefcd70dfd1892149ac3a215f6c636b07/server.jar" destination = "/var/volume" } resources { cpu = 500 memory = 500 } volume_mount { volume = "minecraft" destination = "/var/volume" } } } } ================================================ FILE: minecraft/plugin.nomad ================================================ job "csi-plugin" { datacenters = ["dc1"] group "csi" { task "plugin" { driver = "docker" config { image = "quay.io/k8scsi/hostpathplugin:v1.2.0" privileged = true args = [ "--drivername=csi-hostpath", "--v=5", "--endpoint=unix://csi/csi.sock", "--nodeid=foo", ] } csi_plugin { id = "hostpath-plugin0" type = "monolith" mount_dir = "/csi" } } } } ================================================ FILE: monitoring/sensu/fabio-docker.nomad ================================================ job "fabio" { datacenters = ["dc1"] type = "system" update { stagger = "5s" max_parallel = 1 } group "fabio" { network { port "proxy" { static = 9999 to = 9999 } port "ui" { static = 9998 to = 9998 } } task "fabio-docker" { driver = "docker" config { image = "fabiolb/fabio:latest" network_mode = "host" ports = ["proxy","ui"] } env { # FABIO_registry_consul_addr="${attr.unique.network.ip-address}:8500" } resources { cpu = 200 memory = 32 } } } } ================================================ FILE: monitoring/sensu/sensu.nomad ================================================ job "sensu" { datacenters = ["dc1"] type = "service" update { max_parallel = 1 min_healthy_time = "10s" healthy_deadline = "3m" progress_deadline = "10m" auto_revert = false canary = 0 } migrate { max_parallel = 1 health_check = "checks" min_healthy_time = "10s" healthy_deadline = "5m" } group "sensu-backend" { restart { attempts = 2 interval = "30m" delay = "15s" mode = "fail" } network { port "web_ui"{ to = 3000 } port "api" { to = 8080 } port "ws_api"{ to = 8081 } } service { name = "sensu" tags = ["ui", "urlprefix-/sensu strip=/sensu"] port = "web_ui" check { name = "alive" type = "tcp" interval = "10s" timeout = "2s" } } task "sensu-docker" { driver = "docker" config { image = "sensu/sensu:latest" command = "sensu-backend" args = [ "start", "--state-dir", "/var/lib/sensu/sensu-backend", "--log-level", "debug", ] ports = ["web_ui","api","ws_api"] } env { SENSU_BACKEND_CLUSTER_ADMIN_USERNAME = "sensu_admin" SENSU_BACKEND_CLUSTER_ADMIN_PASSWORD = "password" } } } } ================================================ FILE: nginx-fabio-clone/README.md ================================================ # Creating a nginx configuration from fabio-style tagging ### Files * foo-service.nomad - the foo job. Exercises the path stripping * bar-service.nomad - the bar job * tj.out = john's template rendered works, but fugly * tj.ct = john's template * e.ct example consul template trying to be fancy af * e.out rendered template ### Render template ``` consul-template --template="e.ct:e.out" --once ``` ================================================ FILE: nginx-fabio-clone/bar-service.nomad ================================================ job "bar-service" { datacenters = ["dc1"] group "example" { count = 3 network { port "http" {} } service { name = "bar-service" tags = ["urlprefix-/bar"] port = "http" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } task "server" { driver = "exec" config { command = "http-echo" args = [ "-listen", ":${NOMAD_PORT_http}", "-text", "

Welcome to the Bar Service.


You are on ${NOMAD_IP_http}.", ] } artifact { source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" options { checksum = "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" } } } } } ================================================ FILE: nginx-fabio-clone/e.ct ================================================ {{- range services -}} {{- $name := .Name -}} {{- $service := service .Name -}} {{- if ne $name "nginx-wdg-lb-aus" -}} {{- if ne $name "nginx-wdg-lb" }} upstream {{$name}} { {{- range $service }} server {{ .Address }}:{{ .Port }} max_fails=3 fail_timeout=60 weight=1; {{- end }} } {{- end -}} {{- end -}} {{- end }} {{- range $tag, $services := services | byTag -}} {{- if $tag | regexMatch "urlprefix-[^:]" -}} {{- $opts := ($tag | replaceAll "urlprefix-" "urlprefix=") -}} {{- range $opt := $opts | split " " -}} {{- $splitOpt := $opt | split "=" -}} {{- scratch.Set (index $splitOpt 0) (index $splitOpt 1) -}} {{- end -}} location {{ scratch.Get "urlprefix" }} {{- if scratch.Key "strip" -}} {{- $regex := ( scratch.Get "strip" | regexReplaceAll "(.*)" "^$1" ) -}} {{- scratch.Set "urlprefix" ( scratch.Get "urlprefix" | regexReplaceAll $regex "" ) -}} rewrite {{- end }} proxy-pass http://{{ scratch.Get "urlprefix" }} {{ end -}} {{- end -}} ================================================ FILE: nginx-fabio-clone/e.out ================================================ upstream bar-service { server 10.0.0.172:31815 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.108:24839 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.128:31970 max_fails=3 fail_timeout=60 weight=1; } upstream consul { server 10.0.0.52:8300 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.132:8300 max_fails=3 fail_timeout=60 weight=1; } upstream foo-service { server 10.0.0.172:24438 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.108:25861 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.128:24545 max_fails=3 fail_timeout=60 weight=1; } upstream foo-service-2 { server 10.0.0.172:24438 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.108:25861 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.128:24545 max_fails=3 fail_timeout=60 weight=1; } upstream nomad { server 10.0.0.89:4647 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.89:4648 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.89:4646 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.208:4646 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.208:4647 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.208:4648 max_fails=3 fail_timeout=60 weight=1; } upstream nomad-client { server 10.0.0.172:4646 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.108:4646 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.128:4646 max_fails=3 fail_timeout=60 weight=1; } ------------- upstream bar-service { server 10.0.0.172:31815 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.108:24839 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.128:31970 max_fails=3 fail_timeout=60 weight=1; } upstream consul { server 10.0.0.52:8300 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.132:8300 max_fails=3 fail_timeout=60 weight=1; } upstream foo-service { server 10.0.0.172:24438 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.108:25861 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.128:24545 max_fails=3 fail_timeout=60 weight=1; } upstream foo-service-2 { server 10.0.0.172:24438 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.108:25861 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.128:24545 max_fails=3 fail_timeout=60 weight=1; } upstream nomad { server 10.0.0.89:4647 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.89:4648 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.89:4646 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.208:4646 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.208:4647 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.208:4648 max_fails=3 fail_timeout=60 weight=1; } upstream nomad-client { server 10.0.0.172:4646 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.108:4646 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.128:4646 max_fails=3 fail_timeout=60 weight=1; } ================================================ FILE: nginx-fabio-clone/example.nomad ================================================ job "nginx" { datacenters = ["dc1"] update { max_parallel = 1 min_healthy_time = "10s" healthy_deadline = "3m" auto_revert = false canary = 0 } group "group" { restart { attempts = 10 interval = "5m" delay = "25s" mode = "delay" } network { port "http" { to = 80 } } service { name = "nginx" tags = ["lb"] port = "http" check { name = "alive" type = "tcp" interval = "10s" timeout = "2s" } } task "nginx_docker" { driver = "docker" config { image = "nginx:1.13.11" volumes = ["local/nginx.conf:/etc/nginx/conf.d/default.conf"] ports = ["http"] } template { destination = "local/nginx.conf" change_mode = "signal" change_signal = "SIGHUP" data = <

Welcome to the Foo Service.


You are on ${NOMAD_IP_http}.", ] } artifact { source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" options { checksum = "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" } } } } } ================================================ FILE: nginx-fabio-clone/tj.ct ================================================ {{range services}} {{$name := .Name}} {{$service := service .Name}}{{if ne $name "nginx-wdg-lb-aus"}}{{if ne $name "nginx-wdg-lb"}} upstream {{$name}} { {{range $service}} server {{.Address}}:{{.Port}} max_fails=3 fail_timeout=60 weight=1; {{end}}} {{end}}{{end}}{{end}} server { listen 80; location / { root /usr/share/nginx/html/; index index.html; } location /status { stub_status; } {{range $services := services}} {{$name := .Name}}{{range $s_index, $service := service $name}}{{if eq $s_index 0}}{{range $tags := .Tags}}{{$portmap := . | regexMatch "urlprefix-:"}}{{if not $portmap}}{{if . | regexMatch "urlprefix-"}} location {{$tags | regexReplaceAll "urlprefix-" "" | regexReplaceAll "strip=.*$" ""}} { rewrite {{ $tags | regexReplaceAll "urlprefix-" "" | regexReplaceAll "\\s*strip\\s*=.*\\s*$" "" }}/(.*)$ /$1 break; proxy_pass http://{{$name}}; }{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} } ================================================ FILE: nginx-fabio-clone/tj.out ================================================ upstream bar-service { server 10.0.0.172:31815 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.108:24839 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.128:31970 max_fails=3 fail_timeout=60 weight=1; } upstream consul { server 10.0.0.52:8300 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.132:8300 max_fails=3 fail_timeout=60 weight=1; } upstream foo-service { server 10.0.0.172:24438 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.108:25861 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.128:24545 max_fails=3 fail_timeout=60 weight=1; } upstream foo-service-2 { server 10.0.0.172:24438 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.108:25861 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.128:24545 max_fails=3 fail_timeout=60 weight=1; } upstream nomad { server 10.0.0.89:4647 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.89:4648 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.89:4646 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.208:4646 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.208:4647 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.208:4648 max_fails=3 fail_timeout=60 weight=1; } upstream nomad-client { server 10.0.0.172:4646 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.108:4646 max_fails=3 fail_timeout=60 weight=1; server 10.0.0.128:4646 max_fails=3 fail_timeout=60 weight=1; } server { listen 80; location / { root /usr/share/nginx/html/; index index.html; } location /status { stub_status; } location /bar { rewrite /bar/(.*)$ /$1 break; proxy_pass http://bar-service; } location /foo { rewrite /foo/(.*)$ /$1 break; proxy_pass http://foo-service; } location /foo/foo2 { rewrite /foo/foo2/(.*)$ /$1 break; proxy_pass http://foo-service-2; } } ================================================ FILE: oom/example.nomad ================================================ job "example" { datacenters = ["dc1"] group "cache" { network { port "db" { to = 6379 } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] auth_soft_fail = true } resources { memory = 10 } } } } ================================================ FILE: output.html ================================================ Nomad Job Tester Output
FilenameOutput
🔴./HCL2/add_local_file/raw_file_b64.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/add_local_file/raw_file_b64.nomad:
: Unset variable "input_file"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
🔴./HCL2/add_local_file/raw_file_delims.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/add_local_file/raw_file_delims.nomad:
: Unset variable "input_file"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
🔴./HCL2/add_local_file/raw_file_json.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/add_local_file/raw_file_json.nomad:
: Unset variable "input_file"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
🔴./HCL2/add_local_file/use_file.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/add_local_file/use_file.nomad:
: Unset variable "input_file"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
./HCL2/always_change/before.nomad
./HCL2/always_change/uuid.nomad
🔴./HCL2/always_change/variable.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/always_change/variable.nomad:
: Unset variable "run_index"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
./HCL2/dynamic/example.nomad
./HCL2/object_to_template/example.nomad
🔴./HCL2/variable_jobs/decode-external-file/job1.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/variable_jobs/decode-external-file/job1.nomad:
: Unset variable "config_file"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
🔴./HCL2/variable_jobs/decode-external-file/job2.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/variable_jobs/decode-external-file/job2.nomad:
: Unset variable "config_file"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
🔴./HCL2/variable_jobs/env-vars/job1.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/variable_jobs/env-vars/job1.nomad:
: Unset variable "datacenters"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
: Unset variable "docker_image_job1"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
🔴./HCL2/variable_jobs/env-vars/job2.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/variable_jobs/env-vars/job2.nomad:
: Unset variable "datacenters"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
: Unset variable "docker_image_job2"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
🔴./HCL2/variable_jobs/job.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/variable_jobs/job.nomad:
: Unset variable "docker_image"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
: Unset variable "image_version"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
🔴./HCL2/variable_jobs/multiple-var-files/job1.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/variable_jobs/multiple-var-files/job1.nomad:
: Unset variable "datacenters"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
: Unset variable "docker_image"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
: Unset variable "image_version_job1"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
🔴./HCL2/variable_jobs/multiple-var-files/job2.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/variable_jobs/multiple-var-files/job2.nomad:
: Unset variable "datacenters"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
: Unset variable "docker_image"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
: Unset variable "image_version_job2"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
🔴./HCL2/variable_jobs/multiple-var-files/job3.nomad
Show Output
Error getting job struct: Error parsing job file from ./HCL2/variable_jobs/multiple-var-files/job3.nomad:
: Unset variable "docker_image"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
: Unset variable "image_version_job3"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
: Unset variable "datacenters"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
./alloc_folder/mount_alloc.nomad
./alloc_folder/sidecar.nomad
./applications/artifactory_oss/registry.nomad
./applications/cluster-broccoli/example.nomad
./applications/docker_registry/registry.nomad
./applications/docker_registry_v2/registry.nomad
./applications/docker_registry_v3/registry.nomad
./applications/mariadb/mariadb.nomad
./applications/membrane-soa/soap-proxy-v1-linux.nomad
./applications/membrane-soa/soap-proxy-v1-windows.nomad
./applications/membrane-soa/soap-proxy.nomad
./applications/minio/minio.nomad
./applications/minio/secure-variables/minio.nomad
./applications/postgres/postgres.nomad
./applications/prometheus/fabio-service.nomad
./applications/prometheus/node-exporter.nomad
./applications/prometheus/prometheus.nomad
./applications/vms/freedos/freedos.nomad
./applications/vms/tinycore/tc_ssh.nomad
🔴./applications/wordpress/distributed/build-site.nomad
Show Output
Error getting job struct: Error parsing job file from ./applications/wordpress/distributed/build-site.nomad:
build-site.nomad:22,5-6: Invalid character; This character is not used within the language., and 15 other diagnostic(s)
./applications/wordpress/distributed/nginx.nomad
./applications/wordpress/distributed/wordpress-db.nomad
🔴./applications/wordpress/distributed/wordpress.nomad
Show Output
Error getting job struct: Error parsing job file from ./applications/wordpress/distributed/wordpress.nomad:
: Unset variable "site_name"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
./applications/wordpress/simple/wordpress.nomad
./artifact_sleepyecho/artifact_sleepyecho.nomad
./artifact_sleepyecho/vault_sleepyecho.nomad
./batch/batch_gc/example.nomad
./batch/dispatch/sleepy.nomad
./batch/dispatch/sleepy1.nomad
./batch/dispatch/sleepy10.nomad
./batch/dispatch/sleepy2.nomad
./batch/dispatch/sleepy3.nomad
./batch/dispatch/sleepy4.nomad
./batch/dispatch/sleepy5.nomad
./batch/dispatch/sleepy6.nomad
./batch/dispatch/sleepy7.nomad
./batch/dispatch/sleepy8.nomad
./batch/dispatch/sleepy9.nomad
./batch/dont_restart_fail/example.nomad
./batch/lost_batch/batch.nomad
./batch/lost_batch/periodic.nomad
./batch/periodic/prohibit-overlap.nomad
./batch/periodic/template.nomad
🔴./batch/spread_batch/example.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
example.nomad:6,5-6: Invalid argument name; Argument names must not be quoted.
🔴./batch/spread_batch/example2.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
example2.nomad:6,5-6: Invalid argument name; Argument names must not be quoted.
./batch_overload/example.nomad
./batch_overload/periodic.nomad
./blocked_eval/example.nomad
./cni/diy_brige/example.nomad
./cni/diy_brige/repro.nomad
./cni/example.nomad
./complex_meta/template_env.nomad
🔴./complex_meta/template_meta.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
template_meta.nomad:14,7-8: Invalid argument name; Argument names must not be quoted.
./connect/consul.nomad
🔴./connect/discuss/job.nomad
Show Output
Error getting job struct: Error parsing job file from ./connect/discuss/job.nomad:
: Unset variable "config_data"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
./connect/dns-via-mesh/consul-dns.nomad
./connect/dns-via-mesh/consul-dns2.nomad
./connect/ingress_gateways/ingress_gateway.nomad
./connect/native/cn-demo.nomad
./connect/nginx_ingress/countdash.nomad
./connect/nginx_ingress/ingress.nomad
./connect/sidecar/countdash.nomad
./connect/sidecar/countdash2.nomad
🔴./consul/add_check/e1.nomad
Show Output
Error getting job struct: Error parsing job file from ./consul/add_check/e1.nomad:
e1.nomad:21,1-1: Argument or block definition required; An argument or block definition is required here.
./consul/add_check/e2.nomad
./consul/add_check/e3.nomad
./consul/use_consul_for_kv_path/template.nomad
🔴./consul-template/coordination/sample.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
sample.nomad:19,24-25: Extra characters after interpolation expression; Expected a closing brace to end the interpolation expression, but found extra characters.
./consul-template/missing_vault_value/sample.nomad
./consul-template/my_first_kv/example.nomad
./countdash/connect/countdash.nomad
./countdash/simple/countdash.nomad
🔴./csi/aws/ebs/busybox.nomad
Show Output
Error during plan: Unexpected response code: 500 (1 error occurred:
	* Task group mysql validation failed: 1 error occurred:
	* Task group volume validation for mysql failed: 2 errors occurred:
	* CSI volumes must have an attachment mode
	* CSI volumes must have an access mode)
🔴./csi/aws/ebs/mysql-server.nomad
Show Output
Error during plan: Unexpected response code: 500 (1 error occurred:
	* Task group mysql-server validation failed: 1 error occurred:
	* Task group volume validation for mysql failed: 2 errors occurred:
	* CSI volumes must have an attachment mode
	* CSI volumes must have an access mode)
./csi/aws/ebs/plugin-ebs-controller.nomad
./csi/aws/ebs/plugin-ebs-nodes.nomad
🔴./csi/aws/efs/busybox.nomad
Show Output
Error during plan: Unexpected response code: 500 (1 error occurred:
	* Task group group validation failed: 1 error occurred:
	* Task group volume validation for jobVolume failed: 2 errors occurred:
	* CSI volumes must have an attachment mode
	* CSI volumes must have an access mode)
./csi/aws/efs/node.nomad
🔴./csi/gcp/gce-pd/config.nomad
Show Output
Error getting job struct: Error parsing job file from ./csi/gcp/gce-pd/config.nomad:
config.nomad:1,1-7: Unsupported block type; Blocks of type "plugin" are not expected here.
🔴./csi/gcp/gce-pd/controller.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
controller.nomad:13,12-13: Invalid argument name; Argument names must not be quoted.
🔴./csi/gcp/gce-pd/job.nomad
Show Output
Error during plan: Unexpected response code: 500 (1 error occurred:
	* Task group alloc validation failed: 1 error occurred:
	* Task group volume validation for jobVolume failed: 2 errors occurred:
	* CSI volumes must have an attachment mode
	* CSI volumes must have an access mode)
🔴./csi/gcp/gce-pd/nodes.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
nodes.nomad:13,13-14: Argument or block definition required; An argument or block definition is required here.
🔴./csi/hetzner/volume/config.nomad
Show Output
Error getting job struct: Error parsing job file from ./csi/hetzner/volume/config.nomad:
config.nomad:1,1-7: Unsupported block type; Blocks of type "plugin" are not expected here.
🔴./csi/hetzner/volume/job.nomad
Show Output
Error during plan: Unexpected response code: 500 (1 error occurred:
	* Task group alloc validation failed: 1 error occurred:
	* Task group volume validation for jobVolume failed: 2 errors occurred:
	* CSI volumes must have an attachment mode
	* CSI volumes must have an access mode)
./csi/hetzner/volume/node.nomad
./csi/hostpath/block/csi-hostpath-driver.nomad
🔴./csi/hostpath/block/job.nomad
Show Output
Error during plan: Unexpected response code: 500 (1 error occurred:
	* Task group alloc validation failed: 1 error occurred:
	* Task group volume validation for jobVolume failed: 2 errors occurred:
	* CSI volumes must have an attachment mode
	* CSI volumes must have an access mode)
./csi/hostpath/file/csi-hostpath-driver.nomad
🔴./csi/hostpath/file/job.nomad
Show Output
Error during plan: Unexpected response code: 500 (1 error occurred:
	* Task group alloc validation failed: 1 error occurred:
	* Task group volume validation for jobVolume failed: 2 errors occurred:
	* CSI volumes must have an attachment mode
	* CSI volumes must have an access mode)
./deployments/failing_deployment/example.nomad
./docker/auth_from_template/auth.nomad
🔴./docker/datadog/container_network.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
container_network.nomad:8,51-52: Unexpected comma after argument; Argument definitions must be separated by newlines, not commas. An argument definition must end with a newline.
🔴./docker/datadog/ex3.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
ex3.nomad:8,51-52: Unexpected comma after argument; Argument definitions must be separated by newlines, not commas. An argument definition must end with a newline.
🔴./docker/datadog/example2.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
example2.nomad:8,51-52: Unexpected comma after argument; Argument definitions must be separated by newlines, not commas. An argument definition must end with a newline.
🔴./docker/docker+host_volume/task_deps.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
task_deps.nomad:25,26-32: Invalid single-argument block definition; A single-line block definition must end with a closing brace immediately after its single argument definition., and 1 other diagnostic(s)
🔴./docker/docker+host_volume/unsafe.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
unsafe.nomad:26,1-27,1: Invalid single-argument block definition; An argument definition on the same line as its containing block creates a single-line block definition, which must also be closed on the same line. Place the block's closing brace immediately after the argument definition., and 1 other diagnostic(s)
./docker/docker_dynamic_hostname/finished.nomad
./docker/docker_entrypoint/example.nomad
./docker/docker_image_not_found/reschedule.nomad
🔴./docker/docker_image_not_found/restart.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
restart.nomad:4,5-6: Invalid argument name; Argument names must not be quoted.
./docker/docker_interpolated_image_name/example.nomad
./docker/docker_interpolated_image_name/hostname.nomad
./docker/docker_logging/example.nomad
./docker/docker_mac_address/example.nomad
🔴./docker/docker_network/example1.nomad
Show Output
Error during plan: Unexpected response code: 500 (1 error occurred:
	* Task group cache validation failed: 1 error occurred:
	* Task 2 redefines 'redis' from task 1)
./docker/docker_network/example2.nomad
🔴./docker/docker_nfs/example.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
example.nomad:15,28-29: Missing key/value separator; Expected an equals sign ("=") to mark the beginning of the attribute value., and 2 other diagnostic(s)
🔴./docker/docker_template/example.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
example.nomad:22,28-29: Missing key/value separator; Expected an equals sign ("=") to mark the beginning of the attribute value., and 1 other diagnostic(s)
🔴./docker/docker_twice_in_alloc/example.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
example.nomad:10,19-28: Argument definition required; A single-line block definition can contain only a single argument. If you meant to define argument "network", use an equals sign to assign it a value. To define a nested block, place it on a line of its own within its parent block., and 1 other diagnostic(s)
./docker/docker_windows_abs_mount/repro.nomad
./docker/env_var_args/start.nomad
./docker/env_var_args/test.nomad
./docker/get_fact_from_consul/args.nomad
./docker/get_fact_from_consul/image.nomad
🔴./docker/host-volumes-and-users/scratch.nomad
Show Output
Error getting job struct: Error parsing job file from ./docker/host-volumes-and-users/scratch.nomad:
scratch.nomad:17,7-12: Unsupported argument; An argument named "group" is not expected here.
scratch.nomad:34,7-12: Unsupported argument; An argument named "group" is not expected here.
🔴./docker/labels/heredoc.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
heredoc.nomad:15,11-14: Argument or block definition required; An argument or block definition is required here. To set an argument, use the equals sign "=" to introduce the argument value.
🔴./docker/labels/interpolation.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
interpolation.nomad:32,11-14: Argument or block definition required; An argument or block definition is required here. To set an argument, use the equals sign "=" to introduce the argument value.
🔴./docker/labels/literal.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
literal.nomad:11,11-12: Invalid argument name; Argument names must not be quoted.
./docker/mount_alloc/example.nomad
./drain/example.nomad
./dummy/example.nomad
./echo_stack/fabio-system.nomad
./echo_stack/login-service.nomad
./echo_stack/profile-service.nomad
./env/escaped_env_vars/example.nomad
./environment/example.nomad
./exec/host-volumes-and-users/scratch.nomad
./exec-zip/example.nomad
./fabio/fabio-docker.nomad
./fabio/fabio-service.nomad
./fabio/fabio-system.nomad
./fabio-ssl/fabio-ssl.nomad
🔴./failing_jobs/failing_sidecar/example.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
example.nomad:11,19-28: Argument definition required; A single-line block definition can contain only a single argument. If you meant to define argument "network", use an equals sign to assign it a value. To define a nested block, place it on a line of its own within its parent block.
./failing_jobs/impossible_constratint/example.nomad
🔴./giant/example.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
example.nomad:5,35-41: Invalid single-argument block definition; A single-line block definition must end with a closing brace immediately after its single argument definition.
🔴./host_volume/mariadb/mariadb.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
mariadb.nomad:5,35-41: Invalid single-argument block definition; A single-line block definition must end with a closing brace immediately after its single argument definition.
🔴./host_volume/prometheus/prometheus.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
prometheus.nomad:12,39-45: Invalid single-argument block definition; A single-line block definition must end with a closing brace immediately after its single argument definition., and 2 other diagnostic(s)
🔴./host_volume/read_only/read_only.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
read_only.nomad:5,34-40: Invalid single-argument block definition; A single-line block definition must end with a closing brace immediately after its single argument definition.
./http_echo/arm-service.nomad
./http_echo/bar-service.nomad
./http_echo/car-service-broken-check.nomad
🔴./http_echo/foo-service.deployment.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
foo-service.deployment.nomad:11,7-8: Invalid argument name; Argument names must not be quoted.
./http_echo/foo-service.nomad
./http_echo/foo-test.nomad
./http_echo/template/echo_template.nomad
./http_echo/template/ets.nomad
./http_echo/template/ets2.nomad
./http_echo/template/ets3.nomad
./httpd_site/httpd.nomad
🔴./ipv6/SimpleHTTPServer/sample.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
sample.nomad:36,31-34: Invalid single-argument block definition; A single-line block definition must end with a closing brace immediately after its single argument definition.
./java/JavaDriverTest/java-driver-test.nomad
./java/JavaDriverTest/test2.nomad
🔴./java/apache_camel/java_files.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
java_files.nomad:2,24-25: Unexpected comma after argument; Argument definitions must be separated by newlines, not commas. An argument definition must end with a newline.
./java/jar-test/jar-test.nomad
./job_examples/base-batch.nomad
🔴./job_examples/meta/meta-batch.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
meta-batch.nomad:8,5-6: Invalid argument name; Argument names must not be quoted.
./json-jobs/example.nomad
./load_balancers/traefik/traefik.nomad
./load_balancers/traefik/webapp.nomad
🟡./load_balancers/traefik/webapp2.nomad
Show Output
Job Warnings:
1 warning(s):
* Group "demo" has warnings: 1 error occurred:
	* 1 error occurred:
	* Task "server": task network resources have been deprecated as of Nomad 0.12.0. Please configure networking via group network block.
./meta/example.nomad
🔴./microservice/example.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
example.nomad:49,9-17: Argument or block definition required; An argument or block definition is required here. To set an argument, use the equals sign "=" to introduce the argument value.
🔴./minecraft/minecraft.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
minecraft.nomad:40,30-31: Unexpected comma after argument; Argument definitions must be separated by newlines, not commas. An argument definition must end with a newline.
./minecraft/minecraft_exec.nomad
./minecraft/plugin.nomad
./monitoring/sensu/fabio-docker.nomad
./monitoring/sensu/sensu.nomad
./nginx-fabio-clone/bar-service.nomad
./nginx-fabio-clone/example.nomad
./nginx-fabio-clone/foo-service.nomad
./oom/example.nomad
./parameterized/docker_hello_world/hello-world.nomad
./parameterized/template.nomad
./parameterized/to_specific_client/example.nomad
🔴./parameterized/to_specific_client/workaround/example.nomad
Show Output
Error getting job struct: Error parsing job file from ./parameterized/to_specific_client/workaround/example.nomad:
: Unset variable "node_id"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
./ports/example.nomad
🔴./qemu/hass/hass.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
hass.nomad:24,26-27: Unexpected comma after argument; Argument definitions must be separated by newlines, not commas. An argument definition must end with a newline.
./qemu/tc_ssh.nomad
./qemu/tc_ssh2.nomad
./qemu/tc_ssh_arm.nomad
./raw_exec/env.nomad
./raw_exec/mkdir/mkdir-bash.nomad
./raw_exec/mkdir/mkdir.nomad
./raw_exec/ps.nomad
./raw_exec/quoted_args/quoted_args.nomad
./raw_exec/quoted_args/quoted_args_2.nomad
./raw_exec/user/example.nomad
./reproductions/cpu_rescheduling/repro.nomad
./reschedule/ex.nomad
🔴./restart/restart.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
restart.nomad:32,13-14: Missing key/value separator; Expected an equals sign ("=") to mark the beginning of the attribute value.
./rolling_upgrade/cv-new.nomad
./rolling_upgrade/cv.nomad
./rolling_upgrade/example-new.nomad
./rolling_upgrade/example.nomad
./sentinel/example.nomad
🔴./sentinel/exampleGroupMissingNodeClass.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
exampleGroupMissingNodeClass.nomad:24,46-51: Invalid single-argument block definition; A single-line block definition must end with a closing brace immediately after its single argument definition.
🔴./sentinel/exampleGroupNodeClass.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
exampleGroupNodeClass.nomad:7,47-52: Invalid single-argument block definition; A single-line block definition must end with a closing brace immediately after its single argument definition.
./sentinel/exampleJobNodeClass.nomad
./sentinel/exampleNoNodeClass.nomad
🔴./server-variables/build-site.nomad
Show Output
Error getting job struct: Error parsing job file from ./server-variables/build-site.nomad:
build-site.nomad:22,5-6: Invalid character; This character is not used within the language., and 15 other diagnostic(s)
./server-variables/nginx.nomad
./server-variables/wordpress-db.nomad
🔴./server-variables/wordpress.nomad
Show Output
Error getting job struct: Error parsing job file from ./server-variables/wordpress.nomad:
: Unset variable "site_name"; A used variable must be set or have a default value; see https://www.nomadproject.io/docs/job-specification/hcl2/variables for details.
./sleepy/sleepy_bash/sleepy.nomad
🟡./sleepy/sleepy_python/batch_sleepy_python.nomad
Show Output
Job Warnings:
1 warning(s):
* Group "group" has warnings: 1 error occurred:
	* 1 error occurred:
	* Task "python": task network resources have been deprecated as of Nomad 0.12.0. Please configure networking via group network block.
./sleepy/sleepy_python/sleepy_python.nomad
./spread/example.nomad
🟡./stress/cpu_throttled_time/stress.nomad
Show Output
Job Warnings:
1 warning(s):
* Group "cache" has warnings: 1 error occurred:
	* 1 error occurred:
	* Task "redis": task network resources have been deprecated as of Nomad 0.12.0. Please configure networking via group network block.
./super_big/super_big.nomad
🟡./super_big/super_big2.nomad
Show Output
Job Warnings:
1 warning(s):
* Group "cache" has warnings: 1 error occurred:
	* 1 error occurred:
	* Task "redis": task network resources have been deprecated as of Nomad 0.12.0. Please configure networking via group network block.
🔴./system_jobs/sleepy/sleepy_bash/sleepy.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
sleepy.nomad:18,24-25: Extra characters after interpolation expression; Expected a closing brace to end the interpolation expression, but found extra characters.
🟡./system_jobs/sleepy/sleepy_python/batch_sleepy_python.nomad
Show Output
Job Warnings:
1 warning(s):
* Group "group" has warnings: 1 error occurred:
	* 1 error occurred:
	* Task "python": task network resources have been deprecated as of Nomad 0.12.0. Please configure networking via group network block.
./system_jobs/sleepy/sleepy_python/sleepy_python.nomad
./system_jobs/system_deployment/deploy_jdk.nomad
🟡./system_jobs/system_deployment/fabio-system.nomad
Show Output
Job Warnings:
1 warning(s):
* Group "linux-amd64" has warnings: 1 error occurred:
	* 1 error occurred:
	* Task "fabio": task network resources have been deprecated as of Nomad 0.12.0. Please configure networking via group network block.
🟡./system_jobs/system_deployment/foo-system.nomad
Show Output
Job Warnings:
1 warning(s):
* Group "example" has warnings: 1 error occurred:
	* 1 error occurred:
	* Task "server": task network resources have been deprecated as of Nomad 0.12.0. Please configure networking via group network block.
./system_jobs/system_filter/filtered.nomad
./system_jobs/system_filter/host_vol.nomad
./task_deps/consul-lock/myapp.nomad
./task_deps/disk_check/disk.nomad
🔴./task_deps/init_artifact/batch-init-artifact.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
batch-init-artifact.nomad:38,24-25: Extra characters after interpolation expression; Expected a closing brace to end the interpolation expression, but found extra characters.
🔴./task_deps/init_artifact/service-init-artifact.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
service-init-artifact.nomad:38,24-25: Extra characters after interpolation expression; Expected a closing brace to end the interpolation expression, but found extra characters.
./task_deps/interjob/myapp.nomad
./task_deps/interjob/myservice.nomad
./task_deps/k8sdoc/init.nomad
./task_deps/k8sdoc/k8sdoc1.nomad
./task_deps/k8sdoc/myapp.nomad
./task_deps/k8sdoc/myservice.nomad
🔴./task_deps/sidecar/example.nomad
Show Output
Error getting job struct: Error parsing job file from ./task_deps/sidecar/example.nomad:
example.nomad:56,1-2: Argument or block definition required; An argument or block definition is required here.
./template/batch/context.nomad
./template/batch/parameter.nomad
🔴./template/batch/services.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
services.nomad:1,40-2,1: Invalid single-argument block definition; An argument definition on the same line as its containing block creates a single-line block definition, which must also be closed on the same line. Place the block's closing brace immediately after the argument definition.
./template/batch/template.nomad
./template/from_consul/artifact.nomad
🔴./template/from_consul/init.nomad
Show Output
Error getting job struct: Error parsing job file from ./template/from_consul/init.nomad:
: Missing job block; A job block is required
./template/from_consul/issue.nomad
./template/rerender/example.nomad
./template/secure_variables/example.nomad
./template/secure_variables/multiregion/template.nomad
./template/secure_variables/template-playground.nomad
🔴./template/secure_variables/variable_view.nomad
Show Output
Error getting job struct: Error parsing job file from ./template/secure_variables/variable_view.nomad:
variable_view.nomad:44,16-21: Error in function call; Call to function "file" failed: no file exists at template.tmpl.
variable_view.nomad:44,16-21: Unsuitable value type; Unsuitable value: value must be known
🔴./template/services/byTag.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
byTag.nomad:7,19-28: Argument definition required; A single-line block definition can contain only a single argument. If you meant to define argument "network", use an equals sign to assign it a value. To define a nested block, place it on a line of its own within its parent block.
🔴./template/template-system/composed_keys.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
composed_keys.nomad:7,19-28: Argument definition required; A single-line block definition can contain only a single argument. If you meant to define argument "network", use an equals sign to assign it a value. To define a nested block, place it on a line of its own within its parent block.
🔴./template/template-system/services-on-nomad-client.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
services-on-nomad-client.nomad:6,30-33: Invalid single-argument block definition; A single-line block definition must end with a closing brace immediately after its single argument definition.
🔴./template/template-system/template.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
template.nomad:6,30-33: Invalid single-argument block definition; A single-line block definition must end with a closing brace immediately after its single argument definition.
./template/template_handoff/handoff.nomad
./template/template_handoff/handoff_restart.nomad
🔴./template/template_into_docker/example.nomad
Show Output
Error getting job struct: Failed to parse using HCL 2. Use the HCL 1 parser with `nomad run -hcl1`, or address the following issues:
example.nomad:23,26-27: Missing key/value separator; Expected an equals sign ("=") to mark the beginning of the attribute value.
./template/template_playground/composed_keys.nomad
./template/template_playground/template-exec.nomad
./template/template_playground/template-hcl2.nomad
./template/template_playground/template.nomad
./template/use_whitespace/byTag.nomad
./vault/deleted_policy/temp1.nomad
./vault/deleted_policy/workload.nomad
./vault/pki/sleepy_bash_pki.nomad
./vault/pki/test.nomad
./vault/sleepy_vault_bash/sleepy_bash.nomad
./vault/sleepy_vault_bash/test.nomad
./vault_reload_triggered_by_consul/sample.nomad
./victoriametrics/vm.nomad
./win_rawexec_restart/artifact_sleepyecho.nomad
./windows_docker/docker-iis.nomad
./windows_docker/windows-test.nomad
================================================ FILE: parameterized/README.md ================================================ --- name: Parameterized Jobs on Nomad products_used: - nomad description: |- Short description about what the reader will do/learn. Limit 250 characters; include keyword for SEO. --- # Parameterized Jobs on Nomad Parameterized Nomad jobs encapsulate a set of work that can be carried out on various input values. Jobs with the parameterized stanza register themselves to the cluster, but they do not run immediately. You must "dispatch" the job with the necessary values to run them. You dispatch a parameterized job using the `nomad job dispatch` command or the Nomad Job Dispatch API. While dispatching the job, you can supply an opaque payload and metadata variables to customize the dispatched instance of the job. ## The `parameterized` stanza ```json parameterized { payload = "required" meta_required = ["dispatcher_email"] meta_optional = ["pager_email"] } ``` ## Challenge In this tutorial, you will take a simple Nomad template job, enhance it with parameters, and dispatch it to your cluster. These basic practices can be used to create more complex batch workloads over time. ## Prerequisites - Nomad dev agent - Nomad cluster - You need either to have the `raw_exec` task driver enabled or to convert the job to use the `exec` driver. ## Build a basic batch job Create a file named `template.nomad`. Open it in a text editor and add the following minimal job specification. ```hcl job "«job_name»" { datacenters = ["«datacenter»"] group "«group_name»" { task "«job_name»" { driver = "«driver_type»" } } } ``` ### Populate the template placeholders For this tutorial, replace the placeholders in the minimal job template with these values. - **«job_name»** - `template` - **«group_name»** - `renderer` - **«task_name»** - `output` - **«driver_name»** - `raw_exec` ### Set `datacenters` ### Set job type to batch The default job type of a Nomad job is **service**. For a batch job, you need to explicitly add the type attribute to the **job** stanza. ```hcl type = "batch" ``` ### Configure the task Inside of the **task** stanza, add the following `config` stanza. This configuration uses the **cat** command to output a file named **out.txt** that this job creates. ```hcl config { command = "cat" args = ["local/out.txt] } ``` ### Add a `template` Next, add a **template** stanza inside of the **task** stanza. This template will write the words `This is my template` to the **local/out.txt** file. ```hcl template { destination = "local/out.txt" data =< ```hcl job "template" { datacenters = ["dc1"] type = "batch" group "renderer" { task "output" { driver = "raw_exec" config { command = "cat" args = ["local/out.txt"] } template { destination = "local/out.txt" data =< Run the job with the `nomad job run` command. ```shell-session $ nomad job run template.nomad ==> Monitoring evaluation "fe273062" Evaluation triggered by job "template_render" Allocation "bbae901c" created: node "3e34dbcd", group "renderer" ==> Monitoring evaluation "fe273062" Allocation "bbae901c" status changed: "pending" -> "complete" (All tasks have completed) Evaluation status changed: "pending" -> "complete" ==> Evaluation "fe273062" finished with status "complete" ``` View the output from the job by running the `nomad alloc logs` command on the allocation that Nomad created. In the above output, the allocation ID is "**bbae901c**." ```shell-session $ nomad alloc logs bbae901c This is my template. ``` ## Parameterize the job Add a `parameterized` stanza to the **job** stanza. This stanza instructs Nomad to store the job and wait for you to dispatch instances of it. ```hcl parameterized { } ``` An empty `parameterized` stanza creates a parameterized job that can't be customized, but allows you to dispatch the job when you would like to run it. Before making the job parameterized, you will need to purge the original batch version. Run `nomad job stop` with the `-purge` flag on the `template` job. ```shell-session $ nomad job stop -purge template ``` Run the parameterized version of the job. ```shell-session $ nomad run template.v3.nomad Job registration successful ``` Notice that the output doesn't show any scheduling activity—no evaluation or allocation information. You should expect this, since parameterized jobs are not run until they are dispatched. ### If you get an error If you will receive the following error, it indicates that you missed purging the non-parameterized version of the template job. Run `nomad job stop -purge template` to resolve it. ```shell-session $ nomad job run template.nomad Error submitting job: Unexpected response code: 500 (cannot update non-parameterized job to being parameterized) ``` Run the `nomad job status` command to verify your parameterized job is available for dispatch. ```shell-session $ nomad job status ID Type Priority Status Submit Date template batch/parameterized 50 running 2021-04-11T22:01:45-04:00 ``` ## Dispatch the job Run the `nomad job dispatch` command to dispatch an instance of the parameterized job. ```shell-session $ nomad job dispatch template Dispatched Job ID = template/dispatch-1618193196-1044eb97 Evaluation ID = 00084465 ==> Monitoring evaluation "00084465" Evaluation triggered by job "template/dispatch-1618193196-1044eb97" Allocation "c842df26" created: node "9e9342f5", group "renderer" Evaluation status changed: "pending" -> "complete" ==> Evaluation "00084465" finished with status "complete" ``` Examine the output of the command. There are some key differences from the typical output of the `nomad job run` command. Notice that Nomad generates a **Dispatched Job ID**. This ID is used to refer to this specific instance of the parameterized job, and they will show in the output of `nomad job status` as well. The output also provides scheduling information. Collect the allocation ID from your output. In the above output it is "**c842df26**." As before, run the `nomad alloc logs` command for your allocation ID. ```hcl $ nomad alloc logs c842df26 This is my template. ``` Parameterized jobs without variables can be used to provide a means for running a batch job without having to supply the job specification. ## Add a dispatch variable Parameterized jobs also provide a the ability to send variables as part of dispatching the job. These variables can be optional or required. For example, the following parameterized stanza adds a required variable named `dispatcher_email` and an optional variable named `pager_email`. ```hcl parameterized { meta_required = ["dispatcher_email"] meta_optional = ["pager_email"] } ``` Add two variables to the template job's parameterized stanza—one required variable named `my_name` and an optional variable named `my_title`—by adding the following attributes inside of the parameterized stanza. ```hcl meta_required = ["my_name"] meta_optional = ["my_title"] ``` ### Add the variables to the template Update the template content inside of the HEREDOC markers(`< Monitoring evaluation "0803be44" Evaluation triggered by job "template/dispatch-1618195132-3d59eda3" Allocation "0f1c6c7a" created: node "9e9342f5", group "renderer" ==> Monitoring evaluation "0803be44" Allocation "0f1c6c7a" status changed: "pending" -> "complete" (All tasks have completed) Evaluation status changed: "pending" -> "complete" ==> Evaluation "0803be44" finished with status "complete" ``` ```shell-session $ nomad alloc logs 0f1c6c7a This is my template. Hello Learner. ``` ### Test the requirement of `my_name` Because you put the **my_name** variable in the meta_required attribute's value list, the job will not run unless you provide it when dispatching. If you do not, you will receive an error. Try it now. ```shell-session $ nomad job dispatch template Failed to dispatch job: Unexpected response code: 500 (Dispatch did not provide required meta keys: [my_name]) ``` ### Use the optional variable ```shell-session $nomad job dispatch -meta my_name=Learner -meta my_title=awesome template Dispatched Job ID = template/dispatch-1618195957-6256077e Evaluation ID = fdfb6827 ==> Monitoring evaluation "fdfb6827" Evaluation triggered by job "template/dispatch-1618195957-6256077e" Allocation "2b2ebdc1" created: node "9e9342f5", group "renderer" ==> Monitoring evaluation "fdfb6827" Allocation "2b2ebdc1" status changed: "pending" -> "complete" (All tasks have completed) Evaluation status changed: "pending" -> "complete" ==> Evaluation "fdfb6827" finished with status "complete" ``` ```shell-session $ nomad alloc logs 2b2ebdc1 This is my template. Hello awesome Learner. ``` ### Set default values for optional variables You set default values for optional variables by adding the `meta` stanza inside the **job** stanza. Create a default of "diligent" for my_title by adding the following meta stanza. ```hcl meta { my_title = "diligent" } ``` ```shell-session $ nomad job dispatch -meta my_name=Learner template Dispatched Job ID = template/dispatch-1618196625-aa9ba981 Evaluation ID = 999e5266 ==> Monitoring evaluation "999e5266" Evaluation triggered by job "template/dispatch-1618196625-aa9ba981" Allocation "ea32501e" created: node "9e9342f5", group "renderer" ==> Monitoring evaluation "999e5266" Allocation "ea32501e" status changed: "pending" -> "complete" (All tasks have completed) Evaluation status changed: "pending" -> "complete" ==> Evaluation "999e5266" finished with status "complete"``` ```shell-session $ nomad alloc logs ea32501e This is my template. Hello diligent Learner. ``` ```shell-session $ nomad job dispatch -meta my_name=Learner -meta my_title=fantastic template Dispatched Job ID = template/dispatch-1618196752-eb39d032 Evaluation ID = c9c455b3 ==> Monitoring evaluation "c9c455b3" Evaluation triggered by job "template/dispatch-1618196752-eb39d032" Allocation "8c04f35c" created: node "9e9342f5", group "renderer" ==> Monitoring evaluation "c9c455b3" Allocation "8c04f35c" status changed: "pending" -> "complete" (All tasks have completed) Evaluation status changed: "pending" -> "complete" ==> Evaluation "c9c455b3" finished with status "complete" ``` ```shell-session $ nomad alloc logs 8c04f35c This is my template. Hello fantastic Learner. ``` ## Use dispatch payloads ```hcl dispatch_payload { file = "config.json" } ``` ## Additional discussion _Optional_ Often times, support or TAMs ask you to add extra discussion to explain little more about cloud provider specific pitfalls, etc. You can add them here if it does not fit into anywhere else. ## Next steps In this section, start with a brief **_summary_** of what you have learned in this tutorial re-emphasizing the business value. Then provide some guidance on the next steps to extend the user's knowledge. Briefly describe what the user will do in the next tutorial if the current collection is sequential. Add cross-referencing links to get more information about the feature (e.g. product doc page, webinar links, blog post, etc.). ================================================ FILE: parameterized/docker_hello_world/hello-world.nomad ================================================ job "hello-world.nomad" { datacenters = ["dc1"] type = "batch" parameterized { } group "containers" { task "hello" { driver = "docker" config { image = "hello-world:latest" } } } } ================================================ FILE: parameterized/template.nomad ================================================ job "«job_name»" { datacenters = ["«datacenter»"] group "«group_name»" { task "«job_name»" { driver = "«driver_type»" } } } ================================================ FILE: parameterized/to_specific_client/example.nomad ================================================ job "example.nomad" { datacenters = ["dc1"] type = "batch" parameterized { meta_required = ["input_node_id"] meta_optional = [] payload = "forbidden" } group "cache" { constraint { attribute = "${node.unique.id}" value = "${NOMAD_META_INPUT_NODE_ID}" } task "task" { driver = "docker" config { image = "alpine" command = "sh" args = [ "-c", "env; while true; do sleep 300; done" ] } } } } ================================================ FILE: parameterized/to_specific_client/workaround/README.md ================================================ # A gross workaround This is a very gross workaround to synthesize some things I have learned recently. It leverages: - ugly shell script - python - Nomad HCL2 ```bash RunOutput=`nomad job run -var node_id=f7bc1f2d-34b1-eaf8-b7d3-253f2e7de4d6 example.nomad` AllocId=$(echo "$RunOutput" | awk '/Allocation/{ print $2}'| tr -d "\"") if [] then echo "No allocation found" exit 1 fi FullAllocId=$(nomad alloc status -verbose $AllocId | grep -e '^ID' | awk '{print $3}') ================================================ FILE: parameterized/to_specific_client/workaround/example.nomad ================================================ variable "node_id" { type = string description = "The destination's Nomad node ID. Must be the full ID from `nomad node status -verbose`" } job "example.nomad" { datacenters = ["dc1"] type = "batch" group "cache" { constraint { attribute = "${node.unique.id}" value = var.node_id } task "task" { driver = "docker" config { image = "alpine" command = "sh" args = [ "-c", "env; sleep 5;" ] } } } } ================================================ FILE: parameterized/to_specific_client/workaround/rolling_run.sh ================================================ #!/bin/bash ClientNodeIds=$(nomad node status -t '{{ range .}}{{printf "%s\n" .ID}}{{end}}') RunOutput=$(nomad job run -var node_id=f7bc1f2d-34b1-eaf8-b7d3-253f2e7de4d6 example.nomad) AllocId=$(echo "$RunOutput" | awk '/Allocation/{ print $2}'| tr -d "\" \t") if [ "$AllocId" == "" ] then echo "No allocation found" exit 1 fi FullAllocId=$(nomad alloc status -verbose $AllocId | grep -e '^ID' | awk '{print $3}') ExitCode=./watch.py $FullAllocId if [ $ExitCode -ne 0 ] then echo "Bailing out because of an error..." exit 2 fi ================================================ FILE: parameterized/to_specific_client/workaround/watch.py ================================================ #!/usr/local/bin/python3 import json import os import requests import sys _url = "" _alloc_id = "" def build_url(alloc_id): # Check for NOMAD_ADDR, if found set the base of the URL to it. if os.environ.get('NOMAD_ADDR'): nomad_addr = os.environ.get('NOMAD_ADDR') # ... well, unless it's HTTPS. if nomad_addr.startswith("https"): raise ValueError("HTTPS is not implemented") url_base = os.environ.get('NOMAD_ADDR') else: url_base = "http://127.0.0.1:4646" URL_API_PATH = "/v1/event/stream" #URL_QUERY_STRING = "" URL_QUERY_STRING = "?topic=Allocation:"+alloc_id _url = url_base + URL_API_PATH + URL_QUERY_STRING return _url def eprint(string): sys.stderr.write(string) sys.stderr.flush() def is_final(event): if event["Payload"]["Allocation"]["ClientStatus"] == "complete": eprint("Allocation complete\n") sys.exit(0) if event["Payload"]["Allocation"]["ClientStatus"] == "failed": eprint("Allocation failed\n") sys.exit(1) def print_tasks(event): tasks = event["Payload"]["Allocation"]["TaskStates"] # print(json.dumps(tasks, sort_keys=True, indent=2)) if tasks: for task_name, task in tasks.items(): print("--- "+task_name+"\t"+task["State"]+"\t"+str(task["Failed"])) def handle_event(event): # print(json.dumps(event["Payload"], sort_keys=True, indent=2)) # print(json.dumps(event["Allocation"], sort_keys=True, indent=2)) allocation = event["Payload"]["Allocation"] print(str(event["Index"])+"\t"+ event["Type"]+"\t"+allocation["DesiredStatus"]+"\t"+ allocation["ClientStatus"]) # print_tasks(event) is_final(event) def handle_data(response): ''' Handle a single line of data from the HTTP stream. ''' for line in response.iter_lines(): if line: # filter out keep-alive new lines object = json.loads(line.decode('utf-8')) if len(object) > 1: # has Events for event in object["Events"]: handle_event(event) def connect(url): try: eprint("Connecting to '"+url+"'\n") response = requests.get(url, stream=True) response.raise_for_status() handle_data(response) except requests.exceptions.RequestException as e: # This is the correct syntax raise SystemExit(e) def start(): try: connect(build_url(check_args())) except KeyboardInterrupt: eprint("Received keyboard interrupt. Stopping.\n") SystemExit() def check_args(): # look for 2 items, because argv[0] is always the script's name. :\ if len(sys.argv) != 2: raise ValueError("Must supply a full Nomad alloc id.") alloc_id = sys.argv[1] return alloc_id start() ================================================ FILE: ports/README.md ================================================ # Mapping ports into Nomad This example will show a job that uses both static and dynamic ports ================================================ FILE: ports/example.nomad ================================================ job "example" { datacenters = ["dc1"] group "cache" { network { # the label for the `port` block is used to refer to that port in the rest of the job: # interpolation, docker port maps, etc. port "dynamic" { to = 6379 } port "_443" { static = 443 to = 6379 } port "444" { static = 444 to = 6379 } } service { name = "redis-cache" tags = ["global", "cache"] port = "db" check { name = "alive" type = "tcp" interval = "10s" timeout = "2s" } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["dynamic","_443", "444"] } } } } ================================================ FILE: preserve_state/bar-service.jsonjob ================================================ { "Job": { "AllAtOnce": false, "Constraints": null, "CreateIndex": 11412, "Datacenters": [ "dc1" ], "ID": "bar-service", "JobModifyIndex": 11412, "Meta": null, "ModifyIndex": 11415, "Name": "bar-service", "Namespace": "default", "ParameterizedJob": null, "ParentID": "", "Payload": null, "Periodic": null, "Priority": 50, "Region": "global", "Stable": false, "Status": "running", "StatusDescription": "", "Stop": false, "SubmitTime": 1522707675977824527, "TaskGroups": [ { "Constraints": null, "Count": 6, "EphemeralDisk": { "Migrate": false, "SizeMB": 300, "Sticky": false }, "Meta": null, "Name": "example", "RestartPolicy": { "Attempts": 2, "Delay": 15000000000, "Interval": 60000000000, "Mode": "delay" }, "Tasks": [ { "Artifacts": [ { "GetterMode": "any", "GetterOptions": { "checksum": "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" }, "GetterSource": "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz", "RelativeDest": "local/" } ], "Config": { "args": [ "-listen", ":${NOMAD_PORT_http}", "-text", "

Welcome to the Bar Service.


You are on ${NOMAD_IP_http}." ], "command": "http-echo" }, "Constraints": null, "DispatchPayload": null, "Driver": "exec", "Env": null, "KillSignal": "", "KillTimeout": 5000000000, "Leader": false, "LogConfig": { "MaxFileSizeMB": 10, "MaxFiles": 10 }, "Meta": null, "Name": "server", "Resources": { "CPU": 100, "DiskMB": 0, "IOPS": 0, "MemoryMB": 300, "Networks": [ { "CIDR": "", "Device": "", "DynamicPorts": [ { "Label": "http", "Value": 0 } ], "IP": "", "MBits": 10, "ReservedPorts": null } ] }, "Services": [ { "AddressMode": "auto", "CheckRestart": null, "Checks": [ { "AddressMode": "", "Args": null, "CheckRestart": null, "Command": "", "Header": null, "Id": "", "InitialStatus": "", "Interval": 15000000000, "Method": "", "Name": "health-check", "Path": "/", "PortLabel": "", "Protocol": "", "TLSSkipVerify": false, "Timeout": 5000000000, "Type": "http" } ], "Id": "", "Name": "bar-service", "PortLabel": "http", "Tags": [ "urlprefix-/bar" ] } ], "ShutdownDelay": 0, "Templates": null, "User": "", "Vault": null } ], "Update": null } ], "Type": "service", "Update": { "AutoRevert": false, "Canary": 0, "HealthCheck": "", "HealthyDeadline": 0, "MaxParallel": 0, "MinHealthyTime": 0, "Stagger": 0 }, "VaultToken": "", "Version": 0 } } ================================================ FILE: preserve_state/example.jsonjob ================================================ { "Job": { "AllAtOnce": false, "Constraints": null, "CreateIndex": 11414, "Datacenters": [ "dc1" ], "ID": "example", "JobModifyIndex": 11414, "Meta": null, "ModifyIndex": 11414, "Name": "example", "Namespace": "default", "ParameterizedJob": null, "ParentID": "", "Payload": null, "Periodic": { "Enabled": true, "ProhibitOverlap": true, "Spec": "*/15 * * * * *", "SpecType": "cron", "TimeZone": "UTC" }, "Priority": 50, "Region": "global", "Stable": false, "Status": "running", "StatusDescription": "", "Stop": false, "SubmitTime": 1522707676229857749, "TaskGroups": [ { "Constraints": null, "Count": 5, "EphemeralDisk": { "Migrate": false, "SizeMB": 300, "Sticky": false }, "Meta": null, "Name": "sleepers", "RestartPolicy": { "Attempts": 15, "Delay": 15000000000, "Interval": 604800000000000, "Mode": "delay" }, "Tasks": [ { "Artifacts": null, "Config": { "command": "bash", "args": [ "-c", "echo Starting; sleep=`shuf -i5-10 -n1`; echo Sleeping $sleep seconds.; sleep $sleep; echo Done; exit 0" ] }, "Constraints": null, "DispatchPayload": null, "Driver": "raw_exec", "Env": null, "KillSignal": "", "KillTimeout": 5000000000, "Leader": false, "LogConfig": { "MaxFileSizeMB": 10, "MaxFiles": 10 }, "Meta": null, "Name": "wait", "Resources": { "CPU": 100, "DiskMB": 0, "IOPS": 0, "MemoryMB": 200, "Networks": null }, "Services": null, "ShutdownDelay": 0, "Templates": null, "User": "", "Vault": null } ], "Update": null } ], "Type": "batch", "Update": { "AutoRevert": false, "Canary": 0, "HealthCheck": "", "HealthyDeadline": 0, "MaxParallel": 0, "MinHealthyTime": 0, "Stagger": 0 }, "VaultToken": "", "Version": 0 } } ================================================ FILE: preserve_state/fabio.jsonjob ================================================ { "Job": { "AllAtOnce": false, "Constraints": [ { "LTarget": "${attr.cpu.arch}", "Operand": "!=", "RTarget": "arm" }, { "LTarget": "${attr.kernel.name}", "Operand": "!=", "RTarget": "windows" } ], "CreateIndex": 11416, "Datacenters": [ "dc1" ], "ID": "fabio", "JobModifyIndex": 11416, "Meta": null, "ModifyIndex": 11416, "Name": "fabio", "Namespace": "default", "ParameterizedJob": null, "ParentID": "", "Payload": null, "Periodic": null, "Priority": 50, "Region": "global", "Stable": false, "Status": "running", "StatusDescription": "", "Stop": false, "SubmitTime": 1522707676470085364, "TaskGroups": [ { "Constraints": null, "Count": 1, "EphemeralDisk": { "Migrate": false, "SizeMB": 300, "Sticky": false }, "Meta": null, "Name": "fabio", "RestartPolicy": { "Attempts": 2, "Delay": 15000000000, "Interval": 60000000000, "Mode": "delay" }, "Tasks": [ { "Artifacts": [ { "GetterMode": "any", "GetterOptions": { "checksum": "sha256:7dc786c3dfd8c770d20e524629d0d7cd2cf8bb84a1bf98605405800b28705198" }, "GetterSource": "https://github.com/fabiolb/fabio/releases/download/v1.5.0/fabio-1.5.0-go1.8.3-linux_amd64", "RelativeDest": "local/" } ], "Config": { "command": "fabio-1.5.0-go1.8.3-linux_amd64" }, "Constraints": null, "DispatchPayload": null, "Driver": "exec", "Env": null, "KillSignal": "", "KillTimeout": 5000000000, "Leader": false, "LogConfig": { "MaxFileSizeMB": 10, "MaxFiles": 10 }, "Meta": null, "Name": "fabio", "Resources": { "CPU": 500, "DiskMB": 0, "IOPS": 0, "MemoryMB": 64, "Networks": [ { "CIDR": "", "Device": "", "DynamicPorts": null, "IP": "", "MBits": 1, "ReservedPorts": [ { "Label": "http", "Value": 9999 }, { "Label": "ui", "Value": 9998 } ] } ] }, "Services": null, "ShutdownDelay": 0, "Templates": null, "User": "", "Vault": null } ], "Update": { "AutoRevert": false, "Canary": 0, "HealthCheck": "checks", "HealthyDeadline": 300000000000, "MaxParallel": 1, "MinHealthyTime": 10000000000, "Stagger": 5000000000 } } ], "Type": "system", "Update": { "AutoRevert": false, "Canary": 0, "HealthCheck": "", "HealthyDeadline": 0, "MaxParallel": 1, "MinHealthyTime": 0, "Stagger": 5000000000 }, "VaultToken": "", "Version": 0 } } ================================================ FILE: preserve_state/foo-service.jsonjob ================================================ { "Job": { "AllAtOnce": false, "Constraints": null, "CreateIndex": 11420, "Datacenters": [ "dc1" ], "ID": "foo-service", "JobModifyIndex": 11420, "Meta": { "foo-service": "true" }, "ModifyIndex": 11424, "Name": "foo-service", "Namespace": "default", "ParameterizedJob": null, "ParentID": "", "Payload": null, "Periodic": null, "Priority": 50, "Region": "global", "Stable": false, "Status": "running", "StatusDescription": "", "Stop": false, "SubmitTime": 1522707676494575505, "TaskGroups": [ { "Constraints": null, "Count": 3, "EphemeralDisk": { "Migrate": false, "SizeMB": 300, "Sticky": false }, "Meta": null, "Name": "example", "RestartPolicy": { "Attempts": 2, "Delay": 15000000000, "Interval": 60000000000, "Mode": "delay" }, "Tasks": [ { "Artifacts": [ { "GetterMode": "any", "GetterOptions": { "checksum": "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" }, "GetterSource": "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz", "RelativeDest": "local/" } ], "Config": { "command": "http-echo", "args": [ "-listen", ":${NOMAD_PORT_http}", "-text", "

Welcome to the Foo Service.


You are on ${NOMAD_IP_http}." ] }, "Constraints": null, "DispatchPayload": null, "Driver": "exec", "Env": null, "KillSignal": "", "KillTimeout": 5000000000, "Leader": false, "LogConfig": { "MaxFileSizeMB": 10, "MaxFiles": 10 }, "Meta": null, "Name": "server", "Resources": { "CPU": 100, "DiskMB": 0, "IOPS": 0, "MemoryMB": 300, "Networks": [ { "CIDR": "", "Device": "", "DynamicPorts": [ { "Label": "http", "Value": 0 } ], "IP": "", "MBits": 10, "ReservedPorts": null } ] }, "Services": [ { "AddressMode": "auto", "CheckRestart": null, "Checks": [ { "AddressMode": "", "Args": null, "CheckRestart": null, "Command": "", "Header": null, "Id": "", "InitialStatus": "", "Interval": 15000000000, "Method": "", "Name": "health-check", "Path": "/", "PortLabel": "", "Protocol": "", "TLSSkipVerify": false, "Timeout": 5000000000, "Type": "http" } ], "Id": "", "Name": "foo-service", "PortLabel": "http", "Tags": [ "urlprefix-/foo" ] } ], "ShutdownDelay": 0, "Templates": null, "User": "", "Vault": null } ], "Update": null } ], "Type": "service", "Update": { "AutoRevert": false, "Canary": 0, "HealthCheck": "", "HealthyDeadline": 0, "MaxParallel": 0, "MinHealthyTime": 0, "Stagger": 0 }, "VaultToken": "", "Version": 0 } } ================================================ FILE: preserve_state/hashi-ui.jsonjob ================================================ { "Job": { "AllAtOnce": false, "Constraints": null, "CreateIndex": 11423, "Datacenters": [ "dc1" ], "ID": "hashi-ui", "JobModifyIndex": 11423, "Meta": null, "ModifyIndex": 11423, "Name": "hashi-ui", "Namespace": "default", "ParameterizedJob": null, "ParentID": "", "Payload": null, "Periodic": null, "Priority": 50, "Region": "global", "Stable": false, "Status": "running", "StatusDescription": "", "Stop": false, "SubmitTime": 1522707676888714780, "TaskGroups": [ { "Constraints": null, "Count": 1, "EphemeralDisk": { "Migrate": false, "SizeMB": 300, "Sticky": false }, "Meta": null, "Name": "nomad-ui", "RestartPolicy": { "Attempts": 2, "Delay": 15000000000, "Interval": 60000000000, "Mode": "delay" }, "Tasks": [ { "Artifacts": null, "Config": { "port_map": [ { "ui": 3000.0 } ], "image": "jippi/hashi-ui" }, "Constraints": [ { "LTarget": "${attr.cpu.arch}", "Operand": "=", "RTarget": "amd64" }, { "LTarget": "${attr.kernel.name}", "Operand": "=", "RTarget": "linux" } ], "DispatchPayload": null, "Driver": "docker", "Env": null, "KillSignal": "", "KillTimeout": 5000000000, "Leader": false, "LogConfig": { "MaxFileSizeMB": 10, "MaxFiles": 10 }, "Meta": null, "Name": "nomad-ui-linux-amd64", "Resources": { "CPU": 100, "DiskMB": 0, "IOPS": 0, "MemoryMB": 128, "Networks": [ { "CIDR": "", "Device": "", "DynamicPorts": null, "IP": "", "MBits": 1, "ReservedPorts": [ { "Label": "ui", "Value": 8000 } ] } ] }, "Services": [ { "AddressMode": "auto", "CheckRestart": null, "Checks": [ { "AddressMode": "", "Args": null, "CheckRestart": null, "Command": "", "Header": null, "Id": "", "InitialStatus": "", "Interval": 10000000000, "Method": "", "Name": "service: \"hashi-ui-nomad-ui-nomad-ui-linux-amd64\" check", "Path": "/", "PortLabel": "ui", "Protocol": "", "TLSSkipVerify": false, "Timeout": 2000000000, "Type": "tcp" } ], "Id": "", "Name": "hashi-ui-nomad-ui-nomad-ui-linux-amd64", "PortLabel": "ui", "Tags": null } ], "ShutdownDelay": 0, "Templates": [ { "ChangeMode": "restart", "ChangeSignal": "", "DestPath": "secrets/file.env", "EmbeddedTmpl": " NOMAD_ADDR = \"{{with service \"nomad\"}}http://{{.Address}}:{{.Port}}{{end}}\"\n NOMAD_ENABLE = 1\n CONSUL_ENABLE = 1\n CONSUL_ADDR = \"{{with service \"consul\"}}http://{{.Address}}:{{.Port}}{{end}}\"\n LOG_LEVEL = \"info\"\n NOMAD_READ_ONLY = 0\n CONSUL_READ_ONLY = 0\n", "Envvars": true, "LeftDelim": "{{", "Perms": "0644", "RightDelim": "}}", "SourcePath": "", "Splay": 5000000000, "VaultGrace": 15000000000 } ], "User": "", "Vault": null } ], "Update": null } ], "Type": "system", "Update": { "AutoRevert": false, "Canary": 0, "HealthCheck": "", "HealthyDeadline": 0, "MaxParallel": 0, "MinHealthyTime": 0, "Stagger": 0 }, "VaultToken": "", "Version": 0 } } ================================================ FILE: preserve_state/jam.sh ================================================ #! /bin/bash jobs=$(ls *.jsonjob) for I in ${jobs}; do echo "Jamming $I" curl -X PUT -d @$I http://127.0.0.1:4646/v1/jobs echo "" done ================================================ FILE: preserve_state/nomad_debug ================================================ #! /usr/bin/python import urllib, json baseUrl = "http://127.0.0.1:4646" url = baseUrl+"/v1/jobs" response = urllib.urlopen(url) data = json.loads(response.read()) for job in data: print(job['Name'], job['Status'], job['Stop']) ================================================ FILE: preserve_state/preserve.sh ================================================ #! /bin/bash jobs=$(nomad status | grep ing | grep -v "/periodic-" |awk '{print $1}') echo $(echo "${jobs}" |wc -l) for I in ${jobs}; do echo "Exporting $I" nomad inspect $I > $I.jsonjob done ================================================ FILE: qemu/README.md ================================================ # TinyCore QEMU example This sample will start a TinyCore Linux VM configured with the SSH daemon enabled. It performs port forwarding using the QEMU commands so that Nomad can dynamically assign a HTTP and SSH port for the VM. You will need to serve the image some place so that it can be retrieved using the artifact stanza. The default SSH user is `tc` with `tinycore` as password. ================================================ FILE: qemu/hass/hass.nomad ================================================ job "home-assistant"{ datacenters = ["dc1"] type = "service" priority = "100" group "hass-vm" { task "home-assistant" { driver = "qemu" artifact { source = "https://github.com/home-assistant/operating-system/releases/download/4.16/hassos_ova-4.16.qcow2.gz" destination ="hassos_ova-4.16.qcow2" mode = "file" } config { image_path = "hassos_ova-4.16.qcow2" accelerator = "kvm" graceful_shutdown = true args = ["nodefaults", "nodefconfig", "net nic,model=e1000", "smbios type=0,uefi=on", ] } resources { cpu = 100, memory = 800 } } network { mode = "host" port "hasswebui" { static = 8223 } } } } ================================================ FILE: qemu/imagebuilder/Core-current.iso ================================================ [File too large to display: 15.3 MB] ================================================ FILE: qemu/imagebuilder/Dockerfile ================================================ FROM ubuntu RUN export DEBIAN_FRONTEND=noninteractive && \ apt update && \ apt install -y \ qemu \ qemu-utils \ libguestfs-tools \ linux-image-generic \ nbdfuse \ nbd-client \ nbdkit \ nbdkit-plugin-guestfs RUN mkdir -p /mnt/cdrom /mnt/tinycore ================================================ FILE: qemu/imagebuilder/NOTES.md ================================================ # Some notes that need to be formatted and properly attended to You will need to serve the image someplace so that it can be retrieved using the artifact stanza. ## Creating the image Download the boot image - Original article docker run -v $(pwd)/working:/working --privileged --name=imagebuilder --rm -it ubuntu /bin/bash ```bash apt update; apt install -y \ qemu \ qemu-utils \ libguestfs-tools \ linux-image-generic \ nbdfuse \ nbd-client ``` ```bash cd working wget http://tinycorelinux.net/12.x/x86/release/Core-current.iso mkdir /mnt/cdrom mkdir /mnt/tinycore mount Core-current.iso /mnt/cdrom ``` ``` docker run -v $(pwd):/working --privileged --rm --name=imagebuilder -it imagebuilder /bin/bash ``` ### Using qemu-img to make the disk This requires a nbd-capable kernel so that you can mount the qcow as a block device for more standard manipulation Create the qcow and create the block device for it with `qemu-nbd` ```bash qemu-img create -f qcow2 /working/core-image.qcow2 64M qemu-nbd -c /dev/nbd0 /working/core-image.qcow2 ``` Create a partition table ```bash fdisk /dev/nbd0 ``` Remove the NBD device ```bash qemu-nbd -d /dev/nbd0 ``` ```bash guestfish -a /working/core-image.qcow2 ``` run ### Using nbdfuse for systems that don't have kernel nbd support ```bash qemu-img create -f qcow2 /working/core-image.qcow2 64M mkdir -p /block nbdfuse /block/nbd0 --socket-activation qemu-nbd -f qcow2 /working/core-image.qcow2 & ``` ```bash fusermount3 -u dir rmdir dir ``` ### Using guestfish tools to build an image ```bash guestfish -N core-image.qcow2=fs:ext4:64M:mbr exit guestmount -a /working/core-image.qcow2 -m /dev/sda1 /mnt/tinycore ``` ## Prepare image ```bash rm -rf /mnt/tinycore/lost+found mkdir -p /mnt/tinycore/boot mkdir -p /mnt/tinycore/tce/optional touch /mnt/tinycore/tce/onboot.lst grub-install --boot-directory=/mnt/tinycore/boot cp /mnt/cdrom/boot/vmlinuz ``` ================================================ FILE: qemu/job.json ================================================ { "Job": { "Affinities": null, "AllAtOnce": false, "Constraints": null, "ConsulToken": "", "CreateIndex": 170289, "Datacenters": [ "dc1" ], "Dispatched": false, "ID": "example", "JobModifyIndex": 170289, "Meta": null, "Migrate": null, "ModifyIndex": 170290, "Multiregion": null, "Name": "example", "Namespace": "default", "NomadTokenID": "", "ParameterizedJob": null, "ParentID": "", "Payload": null, "Periodic": null, "Priority": 50, "Region": "global", "Reschedule": null, "Spreads": null, "Stable": false, "Status": "dead", "StatusDescription": "", "Stop": true, "SubmitTime": 1621343037528980394, "TaskGroups": [ { "Affinities": null, "Constraints": null, "Count": 1, "EphemeralDisk": { "Migrate": false, "SizeMB": 300, "Sticky": false }, "Meta": null, "Migrate": { "HealthCheck": "checks", "HealthyDeadline": 300000000000, "MaxParallel": 1, "MinHealthyTime": 10000000000 }, "Name": "cache", "Networks": [ { "CIDR": "", "DNS": null, "Device": "", "DynamicPorts": [ { "HostNetwork": "default", "Label": "db", "To": 6379, "Value": 0 } ], "IP": "", "MBits": 0, "Mode": "", "ReservedPorts": null } ], "ReschedulePolicy": { "Attempts": 0, "Delay": 30000000000, "DelayFunction": "exponential", "Interval": 0, "MaxDelay": 3600000000000, "Unlimited": true }, "RestartPolicy": { "Attempts": 2, "Delay": 15000000000, "Interval": 1800000000000, "Mode": "fail" }, "Scaling": null, "Services": null, "ShutdownDelay": null, "Spreads": null, "StopAfterClientDisconnect": null, "Tasks": [ { "Affinities": null, "Artifacts": null, "Config": { "image": "redis:7", "ports": [ "db" ] }, "Constraints": null, "DispatchPayload": null, "Driver": "docker", "Env": null, "KillSignal": "", "KillTimeout": 5000000000, "Kind": "", "Leader": false, "Lifecycle": null, "LogConfig": { "MaxFileSizeMB": 10, "MaxFiles": 10 }, "Meta": null, "Name": "redis", "Resources": { "CPU": 500, "Devices": null, "DiskMB": 0, "IOPS": 0, "MemoryMB": 256, "Networks": null }, "RestartPolicy": { "Attempts": 2, "Delay": 15000000000, "Interval": 1800000000000, "Mode": "fail" }, "ScalingPolicies": null, "Services": null, "ShutdownDelay": 0, "Templates": null, "User": "", "Vault": null, "VolumeMounts": null } ], "Update": { "AutoPromote": false, "AutoRevert": false, "Canary": 0, "HealthCheck": "checks", "HealthyDeadline": 300000000000, "MaxParallel": 1, "MinHealthyTime": 10000000000, "ProgressDeadline": 600000000000, "Stagger": 30000000000 }, "Volumes": null } ], "Type": "service", "Update": { "AutoPromote": false, "AutoRevert": false, "Canary": 0, "HealthCheck": "", "HealthyDeadline": 0, "MaxParallel": 1, "MinHealthyTime": 0, "ProgressDeadline": 0, "Stagger": 30000000000 }, "VaultNamespace": "", "VaultToken": "", "Version": 0 } } ================================================ FILE: qemu/tc.qcow2 ================================================ [File too large to display: 25.6 MB] ================================================ FILE: qemu/tc_ssh.nomad ================================================ job "j1" { datacenters = ["dc1"] group "g1" { network { port "http" { to = -1 } port "ssh" { to = -1 } } service { tags = ["tag1"] port = "http" check { type = "http" port = "http" path = "/index.html" interval = "10s" timeout = "2s" } } task "t1" { template { data = <thenon @thenon:matrix.org [m] Apr 23 08:16 >Hi all. So I've got a job scheduled in nomad, requiring 200 CPU, and its running on node X. Node X has plenty of resources available, let's say 1000 CPU. If I update the definition of the job to require 201 CPU, it of course triggers another evaluation. But if the result of that evaluation is zero changes (job remains on node X, and zero other changes to any jobs etc. etc. ) I would expect no actions to happen. Instead, my job is stopped, and started. This is bad for me to interrupt the job for no reason. Is there any way to avoid this behaviour? Thanks for any pointers. >Florian Apolloner @apollo13 Apr 23 08:55 >changing cpu limits sounds like a change to me and not zero changes ;) >thenon @thenon:matrix.org [m] Apr 23 08:57 >:) but there are zero effective changes. The same jobs end up running on the same nodes. Its a null op in terms of allocations.... b >thenon @thenon:matrix.org [m] Apr 23 09:04 >(what we're tryign to do here is achieve dynamic bin packing. Something out of band is watching the resource usage + other stuff, and updating things like cpu/memory usage, for a job. Most of the time we'd expect no changes, jobs are find on current nodes. When something has changed (e.g. job gets busier) enough that a reallocation results in jobs moving to another node, that's fine. Great even ! That's Nomad doing the hard job of pin backing properly. But right now, jobs restart, for no reason.... ) >manveru @manveru:matrix.org [m] Apr 23 10:24 >thenon: i think the initial idea was that nomad would somehow enforce the cpu reservation like it does with the memory one... but i guess that never got implemented, just the restarts remain :( >thenon @thenon:matrix.org [m] Apr 23 10:26 >manveru: I don't know, I think my comment applies to anything. let's say you changed a meta constraint value. and the end result of the re-evaluation of everything was: no changes - every job is already running where it should be, based on new constraint values. why restart the job? ### Let's do a repro 1. I created the example Nomad job using `nomad init --short repro.nomad` 1. Started up a Nomad dev agent in another window ================================================ FILE: reproductions/cpu_rescheduling/repro.nomad ================================================ job "example" { datacenters = ["dc1"] group "cache" { network { port "db" { to = 6379 } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: reschedule/ex.nomad ================================================ job "example" { datacenters = ["dc1"] update { healthy_deadline = "3m" } group "cache" { network { port "db" { to = 6379 } } reschedule { attempts = 15 interval = "1h" max_delay = "120s" unlimited = false } service { name = "redis-cache" tags = ["global", "cache"] port = "db" check { name = "alive" type = "tcp" interval = "10s" timeout = "2s" check_restart { limit = 2 grace = "10s" ignore_warnings = false } } } task "redis" { driver = "raw_exec" config { command = "bash" args = [ "-c", "SLEEP_SECS=2; while true; do echo $(date) -- Alive... going back to sleep for ${SLEEP_SECS}; sleep ${SLEEP_SECS}; done" ] } resources { memory = 10 } } } } ================================================ FILE: restart/restart.nomad ================================================ job "fail-service" { datacenters = ["dc1"] type = "service" reschedule { delay = "15s" delay_function = "constant" unlimited = true } group "api" { count = 1 restart { attempts = 3 interval = "30s" delay = "5s" mode = "fail" } network { mode = "bridge" port "http" { to = 8080 } } service = { name = "fail-service-nomad" port = "http" check { type = "http" port = "http" path = "/health" interval = "10s" timeout = "2s" check_restart { limit = 1 grace = "10s" ignore_warnings = false } } } task "main" { driver = "docker" config { image = "thobe/fail_service:v0.1.0" ports = ["http"] } env = { HEALTHY_FOR = 20 UNHEALTHY_FOR = 120 } resources = { cpu = 100 memory = 128 } } } } ================================================ FILE: rolling_upgrade/README.md ================================================ ## Rolling Upgrades This sample demonstrates the behavior of rolling upgrades in a Nomad cluster. Instructions: Run the sample job: ``` nomad run example.nomad ``` This will deploy three instances of the sample redis container to the cluster. Upgrade the instances: ``` nomad run example-new.nomad ``` Nomad should perform a rolling upgrade of the three instances. It should wait for an instance to be healthy for one minute before moving to the next instance. > **NOTE:** The example job is currently sad and will not upgrade properly. The cv version presents an alternative configuration file structure that upgrades as expected. ================================================ FILE: rolling_upgrade/cv-new.nomad ================================================ job "rolling-upgrade-test" { datacenters = ["dc1"] type = "service" update { max_parallel = 1 min_healthy_time = "1m" health_check = "task_states" } group "zookeeper" { restart { attempts = 2 delay = "15s" interval = "1m" mode = "delay" } count = 3 task "redis" { driver = "docker" config { image = "redis:4.0" } } } } ================================================ FILE: rolling_upgrade/cv.nomad ================================================ job "rolling-upgrade-test" { datacenters = ["dc1"] type = "service" update { max_parallel = 1 min_healthy_time = "1m" health_check = "task_states" } group "zookeeper" { restart { attempts = 2 delay = "15s" interval = "1m" mode = "delay" } count = 3 task "redis" { driver = "docker" config { image = "redis:7" } } } } ================================================ FILE: rolling_upgrade/example-new.nomad ================================================ job "rolling-upgrade-test" { datacenters = ["dc1"] type = "service" update { max_parallel = 1 min_healthy_time = "1m" health_check = "task_states" } group "zookeeper-1" { restart { attempts = 2 delay = "15s" interval = "1m" mode = "delay" } ephemeral_disk { migrate = true size = "300" sticky = true } count = 1 task "zookeeper-1" { driver = "docker" config { image = "redis:4.0" } } } group "zookeeper-2" { restart { attempts = 2 delay = "15s" interval = "1m" mode = "delay" } ephemeral_disk { migrate = true size = "300" sticky = true } count = 1 task "zookeeper-2" { driver = "docker" config { image = "redis:4.0" } } } group "zookeeper-3" { restart { attempts = 2 delay = "15s" interval = "1m" mode = "delay" } ephemeral_disk { migrate = true size = "300" sticky = true } count = 1 task "zookeeper-3" { driver = "docker" config { image = "redis:4.0" } } } } ================================================ FILE: rolling_upgrade/example.nomad ================================================ job "rolling-upgrade-test" { datacenters = ["dc1"] type = "service" update { max_parallel = 1 min_healthy_time = "1m" health_check = "task_states" } group "zookeeper-1" { restart { attempts = 2 delay = "15s" interval = "1m" mode = "delay" } ephemeral_disk { migrate = true size = "300" sticky = true } count = 1 task "zookeeper-1" { driver = "docker" config { image = "redis:7" } } } group "zookeeper-2" { restart { attempts = 2 delay = "15s" interval = "1m" mode = "delay" } ephemeral_disk { migrate = true size = "300" sticky = true } count = 1 task "zookeeper-2" { driver = "docker" config { image = "redis:7" } } } group "zookeeper-3" { restart { attempts = 2 delay = "15s" interval = "1m" mode = "delay" } ephemeral_disk { migrate = true size = "300" sticky = true } count = 1 task "zookeeper-3" { driver = "docker" config { image = "redis:7" } } } } ================================================ FILE: sentinel/README.md ================================================ ## Sentinel Samples These jobs utilize Sentinel for enforcement. To use Sentinel, ACLs must be enabled on all of the nodes and bootstrapped. ================================================ FILE: sentinel/alwaysFalse.sentinel ================================================ # Test policy always fails for demonstration purposes main = rule { false } ================================================ FILE: sentinel/example.nomad ================================================ job "example" { datacenters = ["dc1"] constraint { distinct_hosts = true } constraint { attribute = "${node.class}" value = "gpu" } group "cache" { network { port "db" {} } service { name = "global-redis-check" tags = ["global", "cache"] port = "db" check { name = "alive" type = "tcp" interval = "10s" timeout = "2s" } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] } } } } ================================================ FILE: sentinel/exampleGroupMissingNodeClass.nomad ================================================ job "example" { datacenters = ["dc1"] type = "service" constraint { distinct_hosts = true } group "cache" { count = 1 task "redis" { driver = "docker" config { image = "redis:7" port_map { db = 6379 } } resources { network { port "db" {} } } } } group "cache2" { count = 1 constraint { attribute = "${node.class}" value = "gpu" } task "redis" { driver = "docker" config { image = "redis:7" port_map { db = 6379 } } resources { network { port "db" {} } } } } } ================================================ FILE: sentinel/exampleGroupNodeClass.nomad ================================================ job "example" { datacenters = ["dc1"] type = "service" constraint { distinct_hosts = true } group "cache" { count = 1 constraint { attribute = "${node.class}" value = "gpu" } task "redis" { driver = "docker" config { image = "redis:7" port_map { db = 6379 } } resources { network { port "db" {} } } } } group "cache2" { count = 1 constraint { attribute = "${node.class}" value = "gpu" } task "redis" { driver = "docker" config { image = "redis:7" port_map { db = 6379 } } resources { network { port "db" {} } } } } } ================================================ FILE: sentinel/exampleJobNodeClass.nomad ================================================ job "example" { datacenters = ["dc1"] constraint { distinct_hosts = true } constraint { attribute = "${node.class}" value = "gpu" } group "cache" { network { port "db" { to = 6379 } } service { name = "global-redis-check" tags = ["global", "cache"] port = "db" check { name = "alive" type = "tcp" interval = "10s" timeout = "2s" } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] } } } } ================================================ FILE: sentinel/exampleNoNodeClass.nomad ================================================ job "example" { datacenters = ["dc1"] constraint { distinct_hosts = true } group "cache" { network { port "db" { to = 6379 } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] } } } } ================================================ FILE: sentinel/payload.json ================================================ { "Name": "anonymous", "Description": "Allow read-only access for anonymous requests", "Rules": " namespace \"default\" { policy = \"read\" } agent { policy = \"read\" } node { policy = \"read\" } " } ================================================ FILE: sentinel/requireNodeClass.sentinel ================================================ REQUIRED_CONSTRAINT = "${node.class}" main = rule { job_has_required_constraint or all_groups_have_required_constraint } job_has_required_constraint = rule { has_job_required_constraint() } all_groups_have_required_constraint = rule { has_groups_required_constraint() } has_job_required_constraint = func() { for job.constraints as constraint { if constraint.l_target is REQUIRED_CONSTRAINT { return true } } return false } has_groups_required_constraint = func() { for job.task_groups as tg { group_has_required_constraint = false for tg.constraints as constraint { if constraint.l_target is REQUIRED_CONSTRAINT { group_has_required_constraint = true } } # if there is a group with no node_class, we can stop looking # and fail quickly if not(group_has_required_constraint) { print(tg.name) msg = "Test" + "." print(msg) return false } } # If we make it here, all of the task groups have node_class set return true } ================================================ FILE: server-variables/README.md ================================================ # WordPress This job demonstrates several useful patterns for creating Nomad jobs: - Nomad Host Volumes for persistent storage - Using a pre-start task to wait until a dependency is available - Template driven configuration to reduce static port references ## Prerequisites - **Consul** — This job leverages Consul service registrations to locate the supporting MySQL instance. ## Necessary configuration ### Create the host volume in the configuration Create a folder on one of your Nomad clients to host your registry files. This example uses `/opt/nomad/volumes/wordpress-db`. ```shell-session mkdir -p /opt/nomad/volumes/wordpress-db ``` Add the `host_volume` information to the client stanza in the Nomad configuration. If your `-config` flag points to a directory, you can create this as a standalone file in that same folder. ```hcl client { # ... host_volume "my-website-db" { path = "/opt/nomad/volumes/my-website-db" read_only = false } } ``` Restart Nomad to read the new configuration. ```shell systemctl restart nomad ``` ================================================ FILE: server-variables/build-site.nomad ================================================ job "build-site" { datacenters = ["dc1"] type = "batch" parameterized { meta_required = ["site_name"] } group "sitebuilder" { task "generate-password" { lifecycle { hook = "prestart" sidecar = false } template { destination = "secret/generate_keys.sh" env = true data =<< EOT #!/bin/bash {{- $NMSN := env "NOMAD_META_site_name" -}} {{- $UUID := "${uuidv4}" -}} Site={{ $NMSN }} UUID={{ $UUID }} CONSUL_HTTP_TOKEN=c62d8564-c0c5-8dfe-3e75-005debbd0e40 echo "Creating credentials for site $Site..." consul kv put wordpress/sites/$Site/db/user wp-site-$Site consul kv put wordpress/sites/$Site/db/pass $UUID consul kv put wordpress/sites/$Site/db/name wordpress-$Site EOT } driver = "raw_exec" command = "secret/generate_keys.sh" } task "make-database" { template { destination = "local/run.sql" data = << EOT CREATE DATABASE {{ printf "wordpress-%s" .Name }}; CREATE USER {{ .User }} identified by {{ .Pass }}; EOT } template { destination = "secrets/env.txt" env = true data = << EOT MYSQL_PASSWORD=somewordpress EOT } driver = "docker" config { image = "arey/mysql-client" args = [ "--host=${MYSQL_HOST}", "--port=${MYSQL_PORT}", "--user=root" "--password=${MYSQL_PASSWORD}", "--execute=\"source /local/run.sql\"" ] } } } } # $ docker run -v :/sql --link :mysql -it arey/mysql-client -h mysql -p -D -e "source /sql/" ================================================ FILE: server-variables/nginx.nomad ================================================ job "nginx" { datacenters = ["dc1"] type = "system" group "nginx" { network { port "http" { static = 80 } } service { name = "wp" port = "http" } task "nginx" { driver = "docker" config { image = "nginx" ports = ["http"] volumes = [ "local:/etc/nginx/conf.d", ] } template { data = <&1 >/dev/null do echo -n '.' sleep 2 # There is a good opportunity to add a loop counter and a bail-out too, but # this script waits forever. done echo " Done." EOT } config { image = "alpine:latest" command = "local/await-db.sh" network_mode = "host" } resources { cpu = 200 memory = 128 } lifecycle { hook = "prestart" sidecar = false } } task "wordpress" { driver = "docker" template { data = <

Welcome to the Foo Service.


You are on ${NOMAD_IP_http}.", ] } resources { network { port "http" {} } } service { name = "foo-service" tags = ["urlprefix-/foo"] canary_tags = ["urlprefix-/cfoo"] port = "http" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } } } } ================================================ FILE: system_jobs/system_deployment/foo-system.nomad2 ================================================ job "foo-service" { datacenters = ["dc1"] type = "system" update { max_parallel = 1 min_healthy_time = "10s" healthy_deadline = "2m" progress_deadline = "5m" canary = 1 } group "example" { ephemeral_disk { size = "110" } task "server" { artifact { source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" } driver = "exec" config { command = "http-echo" args = [ "-listen", ":${NOMAD_PORT_http}", "-text", "

Welcome to the NEW NEW NEW NEW Foo Service.


You are on ${NOMAD_IP_http}.", ] } resources { network { port "http" {} } } service { name = "foo-service" tags = ["urlprefix-/foo"] canary_tags = ["urlprefix-/cfoo"] port = "http" check { type = "http" name = "health-check" interval = "15s" timeout = "5s" path = "/" } } } } } ================================================ FILE: system_jobs/system_filter/filtered.nomad ================================================ job "filtered" { datacenters = ["dc1"] type = "system" group "cache" { constraint { attribute = "${attr.kernel.name}" operator = "=" value = "windows" } task "job" { driver = "raw_exec" config { command = "C:\\Windows\\System32\\notepad.exe" } resources { cpu = 100 memory = 256 } } } } ================================================ FILE: system_jobs/system_filter/host_vol.nomad ================================================ job "registry-system" { datacenters = ["dc1"] type = "system" priority = 80 group "docker" { network { port "registry" { to = 5000 static = 5000 } } service { name = "registry" port = "registry" check { type = "tcp" port = "registry" interval = "10s" timeout = "2s" } } volume "docker-registry" { type = "host" source = "docker-registry" read_only = false } task "container" { driver = "docker" volume_mount { volume = "docker-registry" destination = "/var/lib/registry" } config { image = "registry" ports = ["registry"] } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: task_deps/consul-lock/myapp.nomad ================================================ job "myapp" { datacenters = ["dc1"] type = "service" group "myapp" { # disable deployments update { max_parallel = 0 } task "await-myservice" { driver = "docker" config { image = "busybox:1.28" command = "sh" args = ["-c", "echo -n 'Waiting for service'; until nslookup myservice.service.consul 2>&1 >/dev/null; do echo '.'; sleep 2; done"] dns_servers = ["10.0.2.21"] } resources { cpu = 200 memory = 128 } lifecycle { hook = "prestart" sidecar = false } } task "myapp-container" { driver = "docker" config { image = "busybox" command = "sh" args = ["-c", "echo The app is running! && sleep 3600"] } resources { cpu = 200 memory = 128 } } } } ================================================ FILE: task_deps/disk_check/README.md ================================================ ## Task Dependency:Available Disk This demonstrates using a batch script to test for a resource, before starting a workload. This will also cause the job to fail which should stimulate rescheduling keywords: template, task dependency, reschedule, diskspace, disk ================================================ FILE: task_deps/disk_check/disk.nomad ================================================ # this job will hopefully die if the node doesn't have # enough disk space to service the job job "lifecycle" { datacenters = ["dc1"] type = "service" group "cache" { # disable deployments update { max_parallel = 0 } task "init" { template { data = <&1 >/dev/null; do echo '.'; sleep 2; done"] } resources { cpu = 200 memory = 128 } lifecycle { hook = "prestart" sidecar = false } } task "myapp-container" { driver = "docker" config { image = "busybox" command = "sh" args = ["-c", "echo The app is running! && sleep 3600"] } resources { cpu = 200 memory = 128 } } } } ``` This job contains a prestart task that will query a Consul DNS API endpoint for the "myservice" service. Note, you might need to add the `dns_servers` value to the config stanza of the await-myservice task in the myapp.nomad file to direct the query to a DNS server that can receive queries on port 53 for your Consul DNS query root domain. ## Run the myapp job Run `nomad run myapp.nomad`. ```shell $ nomad run myapp.nomad ``` The job will launch and provide you an allocation ID in the output. ```plaintext $ nomad run myapp.nomad ==> Monitoring evaluation "01c73d5a" Evaluation triggered by job "myapp" Allocation "3044dda0" created: node "f26809e6", group "myapp" Evaluation status changed: "pending" -> "complete" ==> Evaluation "01c73d5a" finished with status "complete" ``` Run the `nomad alloc status` command with the provided allocation ID. ```shell $ nomad alloc status 3044dda0 ``` ## Verify myapp-container is blocked You will receive a lot of information back. For this guide, focus on the status of each task. Each task's status is output in lines that look like `Task "await-myservice" is "running"`. ```plaintext $ nomad alloc status 3044dda0 ID = 3044dda0-8dc1-1bac-86ea-66a3557c67d3 Eval ID = 01c73d5a Name = myapp.myapp[0] Node ID = f26809e6 Node Name = nomad-client-2.node.consul Job ID = myapp Job Version = 0 Client Status = running Client Description = Tasks are running Desired Status = run Desired Description = Created = 43s ago Modified = 42s ago Task "await-myservice" is "running" Task Resources CPU Memory Disk Addresses 3/200 MHz 80 KiB/128 MiB 300 MiB Task Events: Started At = 2020-03-18T17:07:26Z Finished At = N/A Total Restarts = 0 Last Restart = N/A Recent Events: Time Type Description 2020-03-18T13:07:26-04:00 Started Task started by client 2020-03-18T13:07:26-04:00 Task Setup Building Task Directory 2020-03-18T13:07:26-04:00 Received Task received by client Task "myapp-container" is "pending" Task Resources CPU Memory Disk Addresses 200 MHz 128 MiB 300 MiB Task Events: Started At = N/A Finished At = N/A Total Restarts = 0 Last Restart = N/A Recent Events: Time Type Description 2020-03-18T13:07:26-04:00 Received Task received by client ``` Notice that the await-myservice task is running and that the myapp-container task is pending. The myapp-container will remain in pending until the await-myservice container completes successfully. ## Start myservice job You can run the myservice.nomad job to create a job that creates a "myservice" service in Consul. This will allow the await-myservice task to terminate successfully. Run `nomad run myservice.nomad`. ```shell $ nomad run myservice.nomad ``` Nomad will start the job and return information about the scheduling information. ```plaintext $ nomad run myservice.nomad ==> Monitoring evaluation "f31f8eb1" Evaluation triggered by job "myservice" Allocation "d7767adf" created: node "f26809e6", group "myservice" Evaluation within deployment: "3d86e09a" Evaluation status changed: "pending" -> "complete" ==> Evaluation "f31f8eb1" finished with status "complete" ``` Re-check the allocation status of your myapp allocation. ```shell $ nomad alloc status 3044dda0 ``` ## Verify myapp-container is running Finally, check the output of the alloc status command for the task statuses. ```plaintext $ nomad alloc status 3044dda0 ID = 3044dda0-8dc1-1bac-86ea-66a3557c67d3 Eval ID = 01c73d5a Name = myapp.myapp[0] Node ID = f26809e6 Node Name = nomad-client-2.node.consul Job ID = myapp Job Version = 0 Client Status = running Client Description = Tasks are running Desired Status = run Desired Description = Created = 21m38s ago Modified = 7m27s ago Task "await-myservice" is "dead" Task Resources CPU Memory Disk Addresses 0/200 MHz 80 KiB/128 MiB 300 MiB Task Events: Started At = 2020-03-18T17:07:26Z Finished At = 2020-03-18T17:21:35Z Total Restarts = 0 Last Restart = N/A Recent Events: Time Type Description 2020-03-18T13:21:35-04:00 Terminated Exit Code: 0 2020-03-18T13:07:26-04:00 Started Task started by client 2020-03-18T13:07:26-04:00 Task Setup Building Task Directory 2020-03-18T13:07:26-04:00 Received Task received by client Task "myapp-container" is "running" Task Resources CPU Memory Disk Addresses 0/200 MHz 32 KiB/128 MiB 300 MiB Task Events: Started At = 2020-03-18T17:21:37Z Finished At = N/A Total Restarts = 0 Last Restart = N/A Recent Events: Time Type Description 2020-03-18T13:21:37-04:00 Started Task started by client 2020-03-18T13:21:35-04:00 Driver Downloading image 2020-03-18T13:21:35-04:00 Task Setup Building Task Directory 2020-03-18T13:07:26-04:00 Received Task received by client ``` Notice, the await-myservice task is dead and based on the Recent Events table terminated with "Exit Code: 0"—this indicates that it completed successfully. The myapp-container has now moved to the "running" status and the container is running. ================================================ FILE: task_deps/interjob/myapp.nomad ================================================ job "myapp" { datacenters = ["dc1"] type = "service" group "myapp" { # disable deployments update { max_parallel = 0 } task "await-myservice" { driver = "docker" config { image = "busybox:1.28" command = "sh" args = ["-c", "echo -n 'Waiting for service'; until nslookup myservice.service.consul 2>&1 >/dev/null; do echo '.'; sleep 2; done"] dns_servers = ["10.0.2.21"] } resources { cpu = 200 memory = 128 } lifecycle { hook = "prestart" sidecar = false } } task "myapp-container" { driver = "docker" config { image = "busybox" command = "sh" args = ["-c", "echo The app is running! && sleep 3600"] } resources { cpu = 200 memory = 128 } } } } ================================================ FILE: task_deps/interjob/myservice.nomad ================================================ job "myservice" { datacenters = ["dc1"] type = "service" group "myservice" { task "myservice" { driver = "docker" config { image = "busybox" command = "sh" args = ["-c", "echo The service is running! && while true; do sleep 2; done"] } resources { cpu = 200 memory = 128 } service { name = "myservice" } } } } ================================================ FILE: task_deps/k8sdoc/README.md ================================================ # Task Dependencies: Kubernetes init containers doc comparison This looks at the [Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) documentation for Kubernetes and attempts to reproduce the examples for Nomad using Task Dependencies. ================================================ FILE: task_deps/k8sdoc/init.nomad ================================================ # this job will hopefully die if the node doesn't have # enough disk space to service the job job "lifecycle" { datacenters = ["dc1"] type = "service" group "myservice" { task "myservice" { driver = "docker" config { image = "busybox" command = "sh" args = ["-c", "echo The service is running! && while true; do sleep 2; done"] } resources { cpu = 200 memory = 128 } } } group "cache" { # disable deployments update { max_parallel = 0 } task "init-myservice" { driver = "docker" config { image = "busybox" command = "sh" args = ["-c", "echo -n 'Waiting for service...'; until nslookup myservice.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo -n '.'; sleep 2; done"] } lifecycle { hook = "prestart" sidecar = true } resources { cpu = 200 memory = 128 } } task "init-mydb" { driver = "docker" config { image = "busybox" command = "sh" args = ["-c", "until nslookup mydb.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for mydb; sleep 2; done"] } lifecycle { hook = "prestart" } resources { cpu = 200 memory = 128 } } task "myapp-container" { driver = "docker" config { image = "busybox" command = "sh" args = ["-c", "echo The app is running! && sleep 3600"] } resources { cpu = 200 memory = 128 } } } } ================================================ FILE: task_deps/k8sdoc/k8sdoc1.nomad ================================================ # this job will hopefully die if the node doesn't have # enough disk space to service the job job "lifecycle" { datacenters = ["dc1"] type = "service" group "cache" { # disable deployments update { max_parallel = 0 } task "init-myservice" { driver = "docker" config { image = "busybox:1.28" command = "sh" dns_servers = [ "10.0.2.21" ] args = ["-c", "echo -n 'Waiting for service...'; until nslookup myservice.service.consul; do echo '.'; sleep 2; done"] } lifecycle { hook = "prestart" sidecar = false } resources { cpu = 200 memory = 128 } } task "myapp-container" { driver = "docker" config { image = "busybox" command = "sh" args = ["-c", "echo The app is running! && sleep 3600"] } resources { cpu = 200 memory = 128 } } } } ================================================ FILE: task_deps/k8sdoc/myapp.nomad ================================================ job "myapp" { datacenters = ["dc1"] type = "service" group "myapp" { # disable deployments update { max_parallel = 0 } task "await-myservice" { driver = "docker" config { image = "busybox:1.28" command = "sh" dns_servers = [ "10.0.2.21" ] args = ["-c", "echo -n 'Waiting for service'; until nslookup myservice.service.consul 2>&1 >/dev/null; do echo '.'; sleep 2; done"] } lifecycle { hook = "prestart" sidecar = false } resources { cpu = 200 memory = 128 } } task "myapp-container" { driver = "docker" config { image = "busybox" command = "sh" args = ["-c", "echo The app is running! && sleep 3600"] } resources { cpu = 200 memory = 128 } } } } ================================================ FILE: task_deps/k8sdoc/myservice.nomad ================================================ job "myservice" { datacenters = ["dc1"] type = "service" group "myservice" { task "myservice" { driver = "docker" config { image = "busybox" command = "sh" args = ["-c", "echo The service is running! && while true; do sleep 2; done"] } resources { cpu = 200 memory = 128 } service { name = "myservice" } } } } ================================================ FILE: task_deps/sidecar/example.nomad ================================================ job "example" { datacenters = ["dc1"] group "cache" { network { port "db" {} } task "remote_syslog_stdout" { driver = "docker" config { image = "octohost/remote_syslog" args = [ "-p", "29655", "-d", "logs5.papertrailapp.com", "/alloc/logs/redis.stdout.0" ] } lifecycle { sidecar = true hook = "prestart" } } task "remote_syslog_stderr" { driver = "docker" config { image = "octohost/remote_syslog" args = [ "-p", "29655", "-d", "logs5.papertrailapp.com", "/alloc/logs/redis.stderr.0" ] } lifecycle { sidecar = true hook = "prestart" } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] } } resources { cpu = 500 memory = 256 } } } } ================================================ FILE: template/batch/README.md ================================================ ## Batch Templates Using batch jobs can provide a way to experiment with templates. * **parameter.nomad** - This job demonstrates using a provided meta variable to create a composed key which could be used in another template tag, like key, service, secret, etc. ================================================ FILE: template/batch/context.nomad ================================================ job "parameter" { datacenters = ["dc1"] type = "batch" group "group" { count = 1 task "command" { driver = "exec" config { command = "bash" args = ["-c", "cat local/template.out"] } template { data = < **NOTE:** While the container is restarting, you might get the following error. > > ```text > failed to exec into task: task "redis" is not running. > ``` > > If you do, try running the command again in a few seconds. ```shell $ nomad alloc exec 64c66418-7b01-db43-02f9-eb169ce99921 redis-server -v Redis server v=7.0.4 sha=00000000:0 malloc=jemalloc-5.2.1 bits=64 build=eed36d5f4a2dd39c ``` ================================================ FILE: template/secure_variables/example.nomad ================================================ job "example" { datacenters = ["dc1"] group "cache" { network { port "db" { to = 6379 } } task "redis" { driver = "docker" config { image = "redis:7" ports = ["db"] auth_soft_fail = true } resources { cpu = 500 memory = 256 } template { destination = "local/template.out" data = < **NOTE:** While the container is restarting, you might get the following error. > > ```text > failed to exec into task: task "redis" is not running. > ``` > > If you do, try running the command again in a few seconds. ```shell $ nomad alloc exec 64c66418-7b01-db43-02f9-eb169ce99921 redis-server -v Redis server v=7.0.4 sha=00000000:0 malloc=jemalloc-5.2.1 bits=64 build=eed36d5f4a2dd39c ``` ================================================ FILE: template/secure_variables/interpolated_job/interpolated_job.hcl ================================================ job "example" { datacenters = ["dc1"] group "cache" { network { port "db" { to = 6379 } } // service { // tags = ["redis", "cache"] // port = "db" // check { // name = "alive" // type = "tcp" // interval = "10s" // timeout = "2s" // } // } task "redis" { template { data = < .state/config.${Region}/logging.hcl echo 'log_level = "DEBUG"' >> .state/config.${Region}/logging.hcl echo "data_dir = \"$(pwd)/.state/data.${Region}/\"" > .state/config.${Region}/data_dir.hcl echo "name = \"test_${Region}\"" > .state/config.${Region}/name.hcl echo "region = \"${Region}\"" >> .state/config.${Region}/name.hcl done echo "" I=1 for Region in global dc1 do echo "server { enabled=true bootstrap_expect=1 }" > .state/config.${Region}/server.hcl echo "client { enabled=true }" > .state/config.${Region}/client.hcl echo "plugin \"raw_exec\" { config { enabled = true }}" > .state/config.${Region}/raw_exec.hcl echo "addresses {" > .state/config.${Region}/address.hcl echo "advertise {" > .state/config.${Region}/advertise.hcl echo "ports {" > .state/config.${Region}/ports.hcl P=6 for Proto in http rpc serf do echo " ${Proto} = \"127.0.0.1\"" >> .state/config.${Region}/address.hcl echo " ${Proto} = \"${I}464${P}\"" >> .state/config.${Region}/ports.hcl echo " ${Proto} = \"127.0.0.1:${I}464${P}\"" >> .state/config.${Region}/advertise.hcl P=$((P+1)) done echo "}" >> .state/config.${Region}/address.hcl echo "}" >> .state/config.${Region}/advertise.hcl echo "}" >> .state/config.${Region}/ports.hcl I=$((I+1)) done echo "" echo "🚀 Starting clusters..." for Region in global dc1 do echo " - \"${Region}\"" nomad agent -config=$(pwd)/.state/config.${Region} > /dev/null 2>.state/log.${Region}/stderr.out & echo -n $! > .state/${Region}.pid done echo "" echo "⏳ Waiting for clusters to stabilize" while [ -x "$globalUp" ] || [ -z "$dc1Up" ] do if [ -z "$globalMsg" ]; then # First pass through the loop globalMsg=" - checking global: " dc1Msg=" - checking dc1: " else # move back up 2 lines tput el1; tput cuu1; tput cuu1; tput ed fi sleep 1 if [ "$globalUp" == "" ]; then curl -q -s -f http://127.0.0.1:14646/v1/agent/health > /dev/null if [ $? -eq 0 ] then globalMsg="${globalMsg}✅" globalUp=true else globalMsg="${globalMsg}." fi fi if [ "$dc1Up" == "" ]; then curl -q -s -f http://127.0.0.1:24646/v1/agent/health > /dev/null if [ $? -eq 0 ] then dc1Msg="${dc1Msg}✅" dc1Up=true else dc1Msg="${dc1Msg}." fi fi echo "${globalMsg}" echo "${dc1Msg}" done echo "" echo "🔗 Joining clusters" export NOMAD_ADDR=http://127.0.0.1:14646 nomad server join 127.0.0.1:24648 echo "" echo "🎉 The environment is running." echo "To connect to \"global\" region, run:" echo " export NOMAD_ADDR=http://127.0.0.1:14646" echo "To connect to \"dc1\" region, run:" echo " export NOMAD_ADDR=http://127.0.0.1:24646" ================================================ FILE: template/secure_variables/multiregion/stop.sh ================================================ #! /usr/bin/env bash for Region in global dc1 do echo "Stopping region \"${Region}..." kill $(cat .state/${Region}.pid) done echo "Purging test data"... rm -rf .state ================================================ FILE: template/secure_variables/multiregion/template.nomad ================================================ job "template" { datacenters = ["dc1"] type = "service" group "group" { count = 1 network { port "http" {} } task "template" { driver = "raw_exec" config { command = "python" args = ["-m", "http.server", "--bind ${NOMAD_IP_http}", "${NOMAD_PORT_http}" ] } template { data = < Secure Variables

Nomad

Secure Variables

{{- range $I, $P := nomadVarList }} {{else}} {{end}}
PathMetadata
{{$P}}
Namespace{{$P.Namespace}}
Path{{$P.Path}}
Create Time{{$P.CreateTime}}
Create Index{{$P.CreateIndex}}
Modify Time{{$P.ModifyTime}}
Modify Index{{$P.ModifyIndex}}
Items {{ with nomadVar $P.Path}}{{range $K, $V := .}}{{end}}
KeyValue
{{$K}}{{$V}}
{{end}}
No Secure Variables Found
================================================ FILE: template/secure_variables/template-playground.nomad ================================================ job "template" { datacenters = ["dc1"] type = "batch" group "group" { count = 1 network { port "export" {} port "exstat" { static=8080 } } task "env-output" { driver = "raw_exec" config { command = "env" } } task "date-output" { driver = "raw_exec" config { command = "date" } } task "template" { driver = "raw_exec" config { command = "bash" args = ["-c", "cat local/template.out"] } template { data = < Secure Variables

Nomad

Secure Variables

PathMetadata
my/var/a
Namespacedefault
Pathmy/var/a
Create Time2022-08-22 15:56:13.771313 -0400 EDT
Create Index28
Modify Time2022-08-22 15:56:13.771313 -0400 EDT
Modify Index28
Items
KeyValue
k1v1
k2v2
my/var/b
Namespacedefault
Pathmy/var/b
Create Time2022-08-22 15:56:13.934377 -0400 EDT
Create Index29
Modify Time2022-08-22 15:56:13.934377 -0400 EDT
Modify Index29
Items
KeyValue
k1v1
k2v2
other/var/a
Namespacedefault
Pathother/var/a
Create Time2022-08-22 15:56:14.10122 -0400 EDT
Create Index30
Modify Time2022-08-22 15:56:14.10122 -0400 EDT
Modify Index30
Items
KeyValue
k1v1
k2v2
================================================ FILE: template/secure_variables/template.tmpl ================================================ Secure Variables

Nomad

Secure Variables

{{- with nomadVar "nomad/jobs/variable" }}{{ $P := .Metadata }} {{else}} {{end}}
PathMetadata
{{$P}}
Namespace{{$P.Namespace}}
Path{{$P.Path}}
Create Time{{$P.CreateTime}}
Create Index{{$P.CreateIndex}}
Modify Time{{$P.ModifyTime}}
Modify Index{{$P.ModifyIndex}}
Items {{ with nomadVar $P.Path}}{{range $K, $V := .}}{{end}}
KeyValue
{{$K}}{{$V}}
{{end}}
No Secure Variables Found
================================================ FILE: template/secure_variables/variable_view.nomad ================================================ job "variable" { datacenters = ["dc1"] group "www" { network { port "www" { to = 8080 } } task "nginx" { driver = "docker" config { image = "nginx:1.23.1-alpine" ports = ["www"] auth_soft_fail = true volumes = [ "local/nginx.conf:/etc/nginx/conf.d/default.conf", "local/www/index.html:/usr/share/nginx/html/index.html", ] } resources { cpu = 500 memory = 256 } template { destination = "local/nginx.conf" data = <&1`; if ($? == -1) { print "failed to execute: $!\n"; } elsif ($? & 127) { printf "child died with signal %d, %s coredump\n", ($? & 127), ($? & 128) ? 'with' : 'without'; } else { $exitCode = $? >> 8; } if ($exitCode == 255) { print "I'm an exitcode 255!\n"; # try parsing the error some? if ($return_value =~ "Failed to parse using HCL 2" ) { print "I'm here!\n"; my $re = '/.*:\n([^:]+):(\d+),(\d+)-(\d+)\n(.*)/m'; my @matches = $return_value =~ $re; # Print the entire match result print Dumper(@matches); exit } } if ($exitCode == 0) { print "Success!\n"; } else { printf "Failed with error code %d\nError message: %s\n", $exitCode>>8, $return_value; } ================================================ FILE: vault/deleted_policy/README.md ================================================ ## Deleted Policy The Nomad Vault integration will shut down workload that depends on a specific vault policy once the server dertermines that it can not derive a token that contains the requested policy. These files will let you reproduce this yourself. I will need to come back and document how this _actually_ works tho. ================================================ FILE: vault/deleted_policy/break_it.sh ================================================ #!/bin/bash echo "Breaking the 'nomad-cluster' role" vault write /auth/token/roles/nomad-cluster @nomad-cluster-role.broken.json ================================================ FILE: vault/deleted_policy/nomad-cluster-role.broken.json ================================================ { "disallowed_policies": ["nomad-server"], "allowed_policies": ["nomad-client"], "explicit_max_ttl": 0, "name": "nomad-cluster", "orphan": true, "period": 600, "renewable": true } ================================================ FILE: vault/deleted_policy/nomad-cluster-role.json ================================================ { "disallowed_policies": ["nomad-server"], "allowed_policies": ["nomad-client","my-cool-policy"], "explicit_max_ttl": 0, "name": "nomad-cluster", "orphan": true, "period": 600, "renewable": true } ================================================ FILE: vault/deleted_policy/nomad-server-policy.hcl ================================================ path "auth/token/lookup-self" { capabilities = ["read"] } path "auth/token/lookup" { capabilities = ["update"] } path "auth/token/revoke-accessor" { capabilities = ["update"] } path "sys/capabilities-self" { capabilities = ["update"] } path "auth/token/renew-self" { capabilities = ["update"] } path "auth/token/create/nomad-cluster" { capabilities = ["update"] } path "auth/token/roles/nomad-cluster" { capabilities = ["read"] } path "auth/token/create/nomad-aaa" { capabilities = ["update"] } path "auth/token/roles/nomad-aaa" { capabilities = ["read"] } ================================================ FILE: vault/deleted_policy/setup.sh ================================================ #!/bin/bash wait() { read -n 1 -s -r -p " Press any key to continue..." echo "" } cuteSleep() { echo -n "Sleeping for $1 seconds" for i in $(seq 1 ${1}) do echo -n "." sleep 1 done echo "" } export VAULT_ADDR=http://127.0.0.1:8200 echo "Starting Vault Dev Server" vault server -dev &>vault.log & VAULT_PID=$! echo "Started Vault Dev Server (pid ${VAULT_PID})" cuteSleep 2 # Write the policy to Vault echo "Creating the vault policies..." echo " 'nomad-server'" vault policy write nomad-server nomad-server-policy.hcl echo " 'nomad-client'" vault policy write nomad-client nomad-server-policy.hcl echo " 'my-cool-policy'" vault policy write my-cool-policy nomad-server-policy.hcl # Create the token role with Vault echo "Creating the 'nomad-cluster' role" vault write /auth/token/roles/nomad-cluster @nomad-cluster-role.json vault token create -policy nomad-server -period 10m -orphan | tee > nomad-server.token.out grep -e "^token " nomad-server.token.out | awk '{print $2}' | tr -d '\n' > nomad-server.token DATA_DIR=`pwd`/data nomad agent -dev -vault-enabled=true -data-dir=${DATA_DIR} -vault-address="http://127.0.0.1:8200" -vault-token="`cat nomad-server.token`" -vault-create-from-role=nomad-cluster &>nomad.log & NOMAD_PID=$! echo "Started Nomad Dev Server (pid ${NOMAD_PID})" cuteSleep 8 wait echo "Killing Nomad Dev Server (pid ${NOMAD_PID})" kill ${NOMAD_PID} echo "Killing Vault Dev Server (pid ${VAULT_PID})" kill ${VAULT_PID} echo "Cleaning up data directory." rm -rf ${DATA_DIR} ================================================ FILE: vault/deleted_policy/temp1.nomad ================================================ job temp { datacenters = ["dc1"] group "group" { count = 1 ## You might want to constrain this, so here's one to help # constraint { # attribute = "${attr.unique.hostname}" # operator = "=" # value = "nomad-client-1.node.consul" # } task "sleepy-bash" { template { data = <