Repository: openconfig/gnmic Branch: main Commit: 0dd66c0340c8 Files: 666 Total size: 3.4 MB Directory structure: gitextract_xxir9y9t/ ├── .dockerignore ├── .github/ │ ├── dependabot.yml │ └── workflows/ │ ├── close_state_issues.yml │ ├── docs.yml │ ├── lint.yml │ ├── release.yml │ └── test.yml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yml ├── Dockerfile ├── LICENSE ├── README.md ├── cmd/ │ └── demo/ │ ├── getresponse.textproto │ ├── setrequest.textproto │ ├── setrequest2.textproto │ └── subscriberesponses.textproto ├── config.json ├── config.toml ├── config.yaml ├── docs/ │ ├── CNAME │ ├── basic_usage.md │ ├── blog/ │ │ └── index.md │ ├── changelog.md │ ├── cmd/ │ │ ├── capabilities.md │ │ ├── collector.md │ │ ├── diff/ │ │ │ ├── diff.md │ │ │ ├── diff_set_to_notifs.md │ │ │ └── diff_setrequest.md │ │ ├── generate/ │ │ │ ├── generate_path.md │ │ │ └── generate_set_request.md │ │ ├── generate.md │ │ ├── get.md │ │ ├── getset.md │ │ ├── listen.md │ │ ├── path.md │ │ ├── processor.md │ │ ├── prompt.md │ │ ├── proxy.md │ │ ├── set.md │ │ └── subscribe.md │ ├── deployments/ │ │ ├── clusters/ │ │ │ ├── containerlab/ │ │ │ │ ├── cluster_with_gnmi_server_and_prometheus_output.md │ │ │ │ ├── cluster_with_influxdb_output.md │ │ │ │ ├── cluster_with_nats_input_and_prometheus_output.md │ │ │ │ └── cluster_with_prometheus_output.md │ │ │ ├── docker-compose/ │ │ │ │ ├── cluster_with_influxdb_output.md │ │ │ │ ├── cluster_with_nats_input_and_prometheus_output.md │ │ │ │ └── cluster_with_prometheus_output.md │ │ │ └── kubernetes/ │ │ │ └── cluster_with_prometheus_output.md │ │ ├── deployments_intro.md │ │ ├── pipelines/ │ │ │ └── docker-compose/ │ │ │ ├── forked_pipeline.md │ │ │ ├── gnmic_cluster_nats_prometheus.md │ │ │ ├── nats_influxdb.md │ │ │ └── nats_prometheus.md │ │ └── single-instance/ │ │ ├── containerlab/ │ │ │ ├── influxdb-output.md │ │ │ ├── kafka-output.md │ │ │ ├── multiple-outputs.md │ │ │ ├── nats-output.md │ │ │ ├── prometheus-output.md │ │ │ └── prometheus-remote-write-output.md │ │ └── docker-compose/ │ │ ├── influxdb-output.md │ │ ├── kafka-output.md │ │ ├── multiple-outputs.md │ │ ├── nats-output.md │ │ └── prometheus-output.md │ ├── global_flags.md │ ├── index.md │ ├── install.md │ ├── stylesheets/ │ │ └── extra.css │ └── user_guide/ │ ├── HA.md │ ├── actions/ │ │ └── actions.md │ ├── api/ │ │ ├── api_intro.md │ │ ├── cluster.md │ │ ├── configuration.md │ │ ├── other.md │ │ └── targets.md │ ├── caching.md │ ├── collector/ │ │ ├── collector_api.md │ │ ├── collector_configuration.md │ │ └── collector_intro.md │ ├── configuration_env.md │ ├── configuration_file.md │ ├── configuration_flags.md │ ├── configuration_intro.md │ ├── event_processors/ │ │ ├── event_add_tag.md │ │ ├── event_allow.md │ │ ├── event_combine.md │ │ ├── event_convert.md │ │ ├── event_data_convert.md │ │ ├── event_date_string.md │ │ ├── event_delete.md │ │ ├── event_drop.md │ │ ├── event_duration_convert.md │ │ ├── event_extract_tags.md │ │ ├── event_group_by.md │ │ ├── event_ieeefloat32.md │ │ ├── event_jq.md │ │ ├── event_merge.md │ │ ├── event_override_ts.md │ │ ├── event_plugin.md │ │ ├── event_rate_limit.md │ │ ├── event_starlark.md │ │ ├── event_strings.md │ │ ├── event_time_epoch.md │ │ ├── event_to_tag.md │ │ ├── event_trigger.md │ │ ├── event_value_tag.md │ │ ├── event_write.md │ │ └── intro.md │ ├── gnmi_server.md │ ├── golang_package/ │ │ ├── examples/ │ │ │ ├── capabilities.md │ │ │ ├── get.md │ │ │ ├── set.md │ │ │ └── subscribe.md │ │ ├── gnmi_options.md │ │ ├── intro.md │ │ └── target_options.md │ ├── inputs/ │ │ ├── input_intro.md │ │ ├── jetstream_input.md │ │ ├── kafka_input.md │ │ ├── nats_input.md │ │ └── stan_input.md │ ├── outputs/ │ │ ├── asciigraph_output.md │ │ ├── file_output.md │ │ ├── gnmi_output.md │ │ ├── influxdb_output.md │ │ ├── jetstream_output.md │ │ ├── kafka_output.md │ │ ├── nats_output.md │ │ ├── otlp_output.md │ │ ├── output_intro.md │ │ ├── prometheus_output.md │ │ ├── prometheus_write_output.md │ │ ├── snmp_output.md │ │ ├── stan_output.md │ │ ├── tcp_output.md │ │ └── udp_output.md │ ├── prompt_suggestions.md │ ├── subscriptions.md │ ├── targets/ │ │ ├── target_discovery/ │ │ │ ├── consul_discovery.md │ │ │ ├── discovery_intro.md │ │ │ ├── docker_discovery.md │ │ │ ├── file_discovery.md │ │ │ └── http_discovery.md │ │ ├── targets.md │ │ └── targets_session_sec.md │ └── tunnel_server.md ├── examples/ │ ├── deployments/ │ │ ├── 1.single-instance/ │ │ │ ├── 1.nats-output/ │ │ │ │ ├── containerlab/ │ │ │ │ │ ├── gnmic.yaml │ │ │ │ │ └── nats.clab.yaml │ │ │ │ └── docker-compose/ │ │ │ │ ├── docker-compose.yaml │ │ │ │ └── gnmic1.yaml │ │ │ ├── 10.prometheus-with-cache/ │ │ │ │ └── containerlab/ │ │ │ │ ├── gnmic.yaml │ │ │ │ ├── grafana/ │ │ │ │ │ └── datasources/ │ │ │ │ │ └── datasource.yaml │ │ │ │ ├── prometheus/ │ │ │ │ │ └── prometheus.yaml │ │ │ │ └── prometheus.clab.yaml │ │ │ ├── 11.kafka-kraft-output/ │ │ │ │ └── containerlab/ │ │ │ │ ├── gnmic.yaml │ │ │ │ └── kafka.clab.yaml │ │ │ ├── 2.kafka-output/ │ │ │ │ ├── containerlab/ │ │ │ │ │ ├── gnmic.yaml │ │ │ │ │ └── kafka.clab.yaml │ │ │ │ └── docker-compose/ │ │ │ │ ├── docker-compose.yaml │ │ │ │ └── gnmic1.yaml │ │ │ ├── 3.influxdb-output/ │ │ │ │ ├── containerlab/ │ │ │ │ │ ├── gnmic.yaml │ │ │ │ │ ├── grafana/ │ │ │ │ │ │ └── datasources/ │ │ │ │ │ │ └── datasource.yaml │ │ │ │ │ └── influxdb.clab.yaml │ │ │ │ └── docker-compose/ │ │ │ │ ├── docker-compose.yaml │ │ │ │ └── gnmic1.yaml │ │ │ ├── 4.prometheus-output/ │ │ │ │ ├── containerlab/ │ │ │ │ │ ├── gnmic.yaml │ │ │ │ │ ├── grafana/ │ │ │ │ │ │ └── datasources/ │ │ │ │ │ │ └── datasource.yaml │ │ │ │ │ ├── prometheus/ │ │ │ │ │ │ └── prometheus.yaml │ │ │ │ │ └── prometheus.clab.yaml │ │ │ │ └── docker-compose/ │ │ │ │ ├── docker-compose.yaml │ │ │ │ ├── gnmic1.yaml │ │ │ │ └── prometheus/ │ │ │ │ └── prometheus.yaml │ │ │ ├── 5.multiple-outputs/ │ │ │ │ ├── containerlab/ │ │ │ │ │ ├── gnmic.yaml │ │ │ │ │ ├── grafana/ │ │ │ │ │ │ └── datasources/ │ │ │ │ │ │ └── datasource.yaml │ │ │ │ │ ├── multiple-outputs.clab.yaml │ │ │ │ │ └── prometheus/ │ │ │ │ │ └── prometheus.yaml │ │ │ │ └── docker-compose/ │ │ │ │ ├── docker-compose.yaml │ │ │ │ ├── gnmic1.yaml │ │ │ │ └── prometheus/ │ │ │ │ └── prometheus.yaml │ │ │ ├── 6.prometheus-write-output/ │ │ │ │ └── containerlab/ │ │ │ │ ├── gnmic.yaml │ │ │ │ ├── grafana/ │ │ │ │ │ └── datasources/ │ │ │ │ │ └── datasource.yaml │ │ │ │ ├── prometheus/ │ │ │ │ │ └── prometheus.yaml │ │ │ │ └── prometheus.clab.yaml │ │ │ ├── 7.cortex-output/ │ │ │ │ └── containerlab/ │ │ │ │ ├── cortex/ │ │ │ │ │ └── single-process-config-blocks.yaml │ │ │ │ ├── cortexmetrics.clab.yaml │ │ │ │ ├── gnmic.yaml │ │ │ │ └── grafana/ │ │ │ │ └── datasources/ │ │ │ │ └── datasource.yaml │ │ │ ├── 8.victoria-metrics-output/ │ │ │ │ └── containerlab/ │ │ │ │ ├── gnmic.yaml │ │ │ │ ├── grafana/ │ │ │ │ │ └── datasources/ │ │ │ │ │ └── datasource.yaml │ │ │ │ └── victoriametrics.clab.yaml │ │ │ └── 9.jetstream-output/ │ │ │ └── containerlab/ │ │ │ ├── gnmic.yaml │ │ │ └── jetstream.clab.yaml │ │ ├── 2.clusters/ │ │ │ ├── 1.influxdb-output/ │ │ │ │ ├── containerlab/ │ │ │ │ │ ├── gnmic.yaml │ │ │ │ │ ├── grafana/ │ │ │ │ │ │ └── datasources/ │ │ │ │ │ │ └── datasource.yaml │ │ │ │ │ └── lab21.clab.yaml │ │ │ │ └── docker-compose/ │ │ │ │ ├── docker-compose.yaml │ │ │ │ └── gnmic.yaml │ │ │ ├── 2.prometheus-output/ │ │ │ │ ├── containerlab/ │ │ │ │ │ ├── gnmic.yaml │ │ │ │ │ ├── grafana/ │ │ │ │ │ │ └── datasources/ │ │ │ │ │ │ └── datasource.yaml │ │ │ │ │ ├── lab22.clab.yaml │ │ │ │ │ └── prometheus/ │ │ │ │ │ └── prometheus.yaml │ │ │ │ ├── docker-compose/ │ │ │ │ │ ├── docker-compose.yaml │ │ │ │ │ ├── gnmic.yaml │ │ │ │ │ └── prometheus/ │ │ │ │ │ └── prometheus.yaml │ │ │ │ └── kubernetes/ │ │ │ │ ├── consul/ │ │ │ │ │ ├── deployment.yaml │ │ │ │ │ └── service.yaml │ │ │ │ ├── gnmic-app/ │ │ │ │ │ ├── configmap.yaml │ │ │ │ │ ├── secret.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ └── statefulset.yaml │ │ │ │ └── prometheus/ │ │ │ │ └── servicemonitor.yaml │ │ │ ├── 3.nats-input-prometheus-output/ │ │ │ │ ├── containerlab/ │ │ │ │ │ ├── gnmic.yaml │ │ │ │ │ ├── grafana/ │ │ │ │ │ │ └── datasources/ │ │ │ │ │ │ └── datasource.yaml │ │ │ │ │ ├── lab23.clab.yaml │ │ │ │ │ └── prometheus/ │ │ │ │ │ └── prometheus.yaml │ │ │ │ └── docker-compose/ │ │ │ │ ├── docker-compose.yaml │ │ │ │ ├── gnmic.yaml │ │ │ │ └── prometheus/ │ │ │ │ └── prometheus.yaml │ │ │ ├── 4.gnmi-server/ │ │ │ │ └── containerlab/ │ │ │ │ ├── gnmi-server.clab.yaml │ │ │ │ ├── gnmic-agg.yaml │ │ │ │ ├── gnmic.yaml │ │ │ │ ├── grafana/ │ │ │ │ │ ├── dashboards/ │ │ │ │ │ │ └── gNMIc/ │ │ │ │ │ │ ├── gnmic_compute_metrics.json │ │ │ │ │ │ └── gnmic_grpc_metrics.json │ │ │ │ │ ├── dashboards.yaml │ │ │ │ │ └── datasources/ │ │ │ │ │ └── datasource.yaml │ │ │ │ └── prometheus/ │ │ │ │ └── prometheus.yaml │ │ │ └── 5.shared-cache/ │ │ │ ├── jetstream/ │ │ │ │ └── containerlab/ │ │ │ │ ├── gnmic.yaml │ │ │ │ ├── grafana/ │ │ │ │ │ └── datasources/ │ │ │ │ │ └── datasource.yaml │ │ │ │ ├── lab25-1.clab.yaml │ │ │ │ └── prometheus/ │ │ │ │ └── prometheus.yaml │ │ │ ├── nats/ │ │ │ │ └── containerlab/ │ │ │ │ ├── gnmic.yaml │ │ │ │ ├── grafana/ │ │ │ │ │ └── datasources/ │ │ │ │ │ └── datasource.yaml │ │ │ │ ├── lab25-2.clab.yaml │ │ │ │ └── prometheus/ │ │ │ │ └── prometheus.yaml │ │ │ └── redis/ │ │ │ └── containerlab/ │ │ │ ├── gnmic.yaml │ │ │ ├── grafana/ │ │ │ │ └── datasources/ │ │ │ │ └── datasource.yaml │ │ │ ├── lab25-3.clab.yaml │ │ │ └── prometheus/ │ │ │ └── prometheus.yaml │ │ └── 3.pipelines/ │ │ ├── 1.gnmic-nats-gnmic-prometheus/ │ │ │ ├── containerlab/ │ │ │ │ ├── gnmic-collector.yaml │ │ │ │ ├── gnmic-relay.yaml │ │ │ │ ├── grafana/ │ │ │ │ │ └── datasources/ │ │ │ │ │ └── datasource.yaml │ │ │ │ ├── lab31.clab.yaml │ │ │ │ └── prometheus/ │ │ │ │ └── prometheus.yaml │ │ │ └── docker-compose/ │ │ │ ├── docker-compose.yaml │ │ │ ├── gnmic-collector.yaml │ │ │ ├── gnmic-relay.yaml │ │ │ └── prometheus/ │ │ │ └── prometheus.yaml │ │ ├── 2.gnmic-nats-gnmic-influxdb/ │ │ │ ├── containerlab/ │ │ │ │ ├── gnmic-collector.yaml │ │ │ │ ├── gnmic-relay.yaml │ │ │ │ ├── grafana/ │ │ │ │ │ └── datasources/ │ │ │ │ │ └── datasource.yaml │ │ │ │ └── lab32.clab.yaml │ │ │ └── docker-compose/ │ │ │ ├── docker-compose.yaml │ │ │ ├── gnmic-collector.yaml │ │ │ └── gnmic-relay.yaml │ │ ├── 3a.gnmic-cluster-nats-gnmic-prometheus/ │ │ │ ├── containerlab/ │ │ │ │ ├── gnmic-relay.yaml │ │ │ │ ├── gnmic.yaml │ │ │ │ ├── grafana/ │ │ │ │ │ └── datasources/ │ │ │ │ │ └── datasource.yaml │ │ │ │ ├── lab33a.clab.yaml │ │ │ │ └── prometheus/ │ │ │ │ └── prometheus.yaml │ │ │ └── docker-compose/ │ │ │ ├── docker-compose.yaml │ │ │ ├── gnmic-collector.yaml │ │ │ ├── gnmic-relay.yaml │ │ │ └── prometheus/ │ │ │ └── prometheus.yaml │ │ ├── 3b.gnmic-cluster-nats-gnmic-cluster-prometheus/ │ │ │ ├── containerlab/ │ │ │ │ ├── gnmic-relay.yaml │ │ │ │ ├── gnmic.yaml │ │ │ │ ├── grafana/ │ │ │ │ │ └── datasources/ │ │ │ │ │ └── datasource.yaml │ │ │ │ ├── lab33b.clab.yaml │ │ │ │ └── prometheus/ │ │ │ │ └── prometheus.yaml │ │ │ └── docker-compose/ │ │ │ ├── docker-compose.yaml │ │ │ ├── gnmic-collector.yaml │ │ │ ├── gnmic-relay.yaml │ │ │ └── prometheus/ │ │ │ └── prometheus.yaml │ │ └── 4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/ │ │ └── docker-compose/ │ │ ├── docker-compose.yaml │ │ ├── gnmic-collector.yaml │ │ ├── gnmic-relay1.yaml │ │ ├── gnmic-relay2.yaml │ │ └── prometheus/ │ │ └── prometheus.yaml │ ├── pkg/ │ │ ├── capabilities_rpc/ │ │ │ └── main.go │ │ ├── get_rpc/ │ │ │ └── main.go │ │ ├── set_rpc/ │ │ │ └── main.go │ │ └── subscribe_rpc/ │ │ └── main.go │ ├── plugins/ │ │ ├── demo/ │ │ │ └── main.go │ │ ├── event-add-hostname/ │ │ │ ├── README.md │ │ │ └── event-add-hostname.go │ │ ├── event-gnmi-get/ │ │ │ ├── README.md │ │ │ └── event-gnmi-get.go │ │ ├── go-event-plugin/ │ │ │ └── event-go-plugin.go │ │ └── minimal/ │ │ └── event-my-processor.go │ └── set-request-templates/ │ └── Nokia/ │ └── SRL/ │ ├── 1.interfaces/ │ │ ├── interfaces_template.gotmpl │ │ ├── interfaces_template_vars.yaml │ │ └── subinterfaces_template.gotmpl │ ├── 2.network-instance/ │ │ ├── network_instance_bgp_evpn_template.gotmpl │ │ ├── network_instance_bgp_template.gotmpl │ │ ├── network_instance_bgp_vpn_template.gotmpl │ │ ├── network_instance_template.gotmpl │ │ └── network_instance_template_vars.yaml │ └── 3.acl/ │ ├── acl_template.gotmpl │ └── acl_template_vars.yaml ├── go.mod ├── go.sum ├── goreleaser-alpine.dockerfile ├── goreleaser-scratch.dockerfile ├── install.sh ├── main.go ├── mkdocs.yml ├── pkg/ │ ├── actions/ │ │ ├── action.go │ │ ├── all/ │ │ │ └── all.go │ │ ├── gnmi_action/ │ │ │ ├── gnmi_action.go │ │ │ ├── gnmi_action_test.go │ │ │ └── options.go │ │ ├── http_action/ │ │ │ ├── http_action.go │ │ │ └── http_action_test.go │ │ ├── script_action/ │ │ │ ├── options.go │ │ │ └── script_action.go │ │ └── template_action/ │ │ ├── options.go │ │ └── template_action.go │ ├── api/ │ │ ├── gnmi_msgs.go │ │ ├── gnmi_msgs_test.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── path/ │ │ │ ├── path.go │ │ │ └── path_test.go │ │ ├── server/ │ │ │ ├── options.go │ │ │ └── server.go │ │ ├── target/ │ │ │ ├── subscribe.go │ │ │ └── target.go │ │ ├── target.go │ │ ├── target_test.go │ │ ├── testutils/ │ │ │ └── utils.go │ │ ├── tunnel.go │ │ ├── tunnel_test.go │ │ ├── types/ │ │ │ ├── sasl.go │ │ │ ├── subscription.go │ │ │ ├── target.go │ │ │ └── tls.go │ │ └── utils/ │ │ ├── tls.go │ │ ├── utils.go │ │ └── utils_test.go │ ├── app/ │ │ ├── api.go │ │ ├── app.go │ │ ├── capabilities.go │ │ ├── clustering.go │ │ ├── clustering_test.go │ │ ├── const.go │ │ ├── diff.go │ │ ├── generate.go │ │ ├── generatePath.go │ │ ├── get.go │ │ ├── getset.go │ │ ├── gnmi_client.go │ │ ├── gnmi_client_subscribe.go │ │ ├── gnmi_server.go │ │ ├── inputs.go │ │ ├── loaders.go │ │ ├── locker.go │ │ ├── logging.go │ │ ├── metrics.go │ │ ├── outputs.go │ │ ├── path.go │ │ ├── path_test.go │ │ ├── plugins.go │ │ ├── pprof.go │ │ ├── processor.go │ │ ├── prompt.go │ │ ├── proxy.go │ │ ├── routes.go │ │ ├── set-to-notifs.go │ │ ├── set.go │ │ ├── setrequest.go │ │ ├── subscribe.go │ │ ├── subscribe_once.go │ │ ├── subscribe_poll.go │ │ ├── subscribe_prompt.go │ │ ├── target.go │ │ ├── tree.go │ │ ├── tunnel.go │ │ ├── utils.go │ │ └── version.go │ ├── cache/ │ │ ├── cache.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── jetstream_cache.go │ │ ├── jetstream_cache_test.go │ │ ├── nats_cache.go │ │ ├── oc_cache.go │ │ ├── oc_cache_test.go │ │ ├── options.go │ │ └── redis_cache.go │ ├── cmd/ │ │ ├── capabilities/ │ │ │ └── capabilities.go │ │ ├── collector/ │ │ │ ├── collector.go │ │ │ ├── inputs.go │ │ │ ├── ouputs.go │ │ │ ├── processors.go │ │ │ ├── subscriptions.go │ │ │ └── targets.go │ │ ├── completion.go │ │ ├── diff/ │ │ │ └── diff.go │ │ ├── generate/ │ │ │ ├── generate.go │ │ │ ├── generatePath.go │ │ │ └── generateSetRequest.go │ │ ├── get/ │ │ │ └── get.go │ │ ├── getset/ │ │ │ └── getset.go │ │ ├── listener/ │ │ │ └── listener.go │ │ ├── path/ │ │ │ └── path.go │ │ ├── processor/ │ │ │ └── processor.go │ │ ├── prompt.go │ │ ├── prompt_test.go │ │ ├── proxy/ │ │ │ └── proxy.go │ │ ├── root.go │ │ ├── set/ │ │ │ └── set.go │ │ ├── subscribe/ │ │ │ └── subscribe.go │ │ ├── tree/ │ │ │ └── tree.go │ │ └── version/ │ │ ├── version.go │ │ └── versionUpgrade.go │ ├── collector/ │ │ ├── api/ │ │ │ ├── const/ │ │ │ │ └── const.go │ │ │ └── server/ │ │ │ ├── apiserver.go │ │ │ ├── apply.go │ │ │ ├── assignment.go │ │ │ ├── cluster.go │ │ │ ├── inputs.go │ │ │ ├── metrics.go │ │ │ ├── outputs.go │ │ │ ├── processors.go │ │ │ ├── routes.go │ │ │ ├── sse.go │ │ │ ├── subscriptions.go │ │ │ ├── targets.go │ │ │ └── tunnel_target_match.go │ │ ├── collector.go │ │ ├── env/ │ │ │ ├── env.go │ │ │ └── env_test.go │ │ ├── managers/ │ │ │ ├── cluster/ │ │ │ │ ├── assigner.go │ │ │ │ ├── cluster_manager.go │ │ │ │ ├── election.go │ │ │ │ ├── membership.go │ │ │ │ ├── placement.go │ │ │ │ ├── rebalance.go │ │ │ │ └── utils.go │ │ │ ├── inputs/ │ │ │ │ └── inputs_manager.go │ │ │ ├── outputs/ │ │ │ │ └── outputs_manager.go │ │ │ └── targets/ │ │ │ ├── cluster.go │ │ │ ├── loader.go │ │ │ ├── metrics.go │ │ │ ├── targets_manager.go │ │ │ └── tunnel_server.go │ │ └── store/ │ │ ├── store.go │ │ └── types.go │ ├── config/ │ │ ├── actions.go │ │ ├── api_server.go │ │ ├── clustering.go │ │ ├── config.go │ │ ├── config_test.go │ │ ├── diff.go │ │ ├── environment.go │ │ ├── gnmi_ext.go │ │ ├── gnmi_server.go │ │ ├── inputs.go │ │ ├── loader.go │ │ ├── locker.go │ │ ├── outputs.go │ │ ├── outputs_test.go │ │ ├── plugins.go │ │ ├── processors.go │ │ ├── processors_test.go │ │ ├── set.go │ │ ├── set_test.go │ │ ├── subscriptions.go │ │ ├── subscriptions_test.go │ │ ├── targets.go │ │ ├── targets_test.go │ │ └── tunnel_server.go │ ├── file/ │ │ └── file.go │ ├── formatters/ │ │ ├── all/ │ │ │ └── all.go │ │ ├── event.go │ │ ├── event_add_tag/ │ │ │ ├── event_add_tag.go │ │ │ └── event_add_tag_test.go │ │ ├── event_allow/ │ │ │ ├── event_allow.go │ │ │ └── event_allow_test.go │ │ ├── event_combine/ │ │ │ ├── event_combine.go │ │ │ └── event_combine_test/ │ │ │ └── event_combine_test.go │ │ ├── event_convert/ │ │ │ ├── event_convert.go │ │ │ └── event_convert_test.go │ │ ├── event_data_convert/ │ │ │ ├── event_data_convert.go │ │ │ └── event_data_convert_test.go │ │ ├── event_date_string/ │ │ │ ├── event_date_string.go │ │ │ └── event_date_string_test.go │ │ ├── event_delete/ │ │ │ ├── event_delete.go │ │ │ └── event_delete_test.go │ │ ├── event_drop/ │ │ │ ├── event_drop.go │ │ │ └── event_drop_test.go │ │ ├── event_duration_convert/ │ │ │ ├── event_duration_convert.go │ │ │ └── event_duration_convert_test.go │ │ ├── event_extract_tags/ │ │ │ ├── event_extract_tags.go │ │ │ └── event_extract_tags_test.go │ │ ├── event_group_by/ │ │ │ ├── event_group_by.go │ │ │ └── event_group_by_test.go │ │ ├── event_ieeefloat32/ │ │ │ ├── event_ieeefloat32.go │ │ │ └── event_ieeefloat32_test.go │ │ ├── event_jq/ │ │ │ ├── event_jq.go │ │ │ └── event_jq_test.go │ │ ├── event_merge/ │ │ │ ├── event_merge.go │ │ │ └── event_merge_test.go │ │ ├── event_override_ts/ │ │ │ ├── event_override_ts.go │ │ │ └── event_override_ts_test.go │ │ ├── event_plugin/ │ │ │ ├── plugin.go │ │ │ └── rpc.go │ │ ├── event_rate_limit/ │ │ │ ├── event_rate_limit.go │ │ │ └── event_rate_limit_test.go │ │ ├── event_starlark/ │ │ │ ├── dict.go │ │ │ ├── event.go │ │ │ ├── event_starlark.go │ │ │ └── event_starlark_test.go │ │ ├── event_strings/ │ │ │ ├── event_strings.go │ │ │ └── event_strings_test.go │ │ ├── event_test.go │ │ ├── event_time_epoch/ │ │ │ ├── event_time_epoch.go │ │ │ └── event_time_epoch_test.go │ │ ├── event_to_tag/ │ │ │ ├── event_to_tag.go │ │ │ └── event_to_tag_test.go │ │ ├── event_trigger/ │ │ │ ├── event_trigger.go │ │ │ └── event_trigger_test.go │ │ ├── event_value_tag/ │ │ │ ├── event_value_tag.go │ │ │ └── event_value_tag_test.go │ │ ├── event_value_tag_v2/ │ │ │ ├── event_value_tag_v2.go │ │ │ └── event_value_tag_v2_test.go │ │ ├── event_write/ │ │ │ ├── event_write.go │ │ │ └── event_write_test.go │ │ ├── flat.go │ │ ├── formats.go │ │ ├── json.go │ │ ├── msg.go │ │ ├── plugin_manager/ │ │ │ └── manager.go │ │ ├── processors.go │ │ └── processors_test.go │ ├── gtemplate/ │ │ ├── template.go │ │ └── template_funcs.go │ ├── inputs/ │ │ ├── all/ │ │ │ └── all.go │ │ ├── input.go │ │ ├── jetstream_input/ │ │ │ ├── jetstream_input.go │ │ │ └── jetstream_input_test.go │ │ ├── kafka_input/ │ │ │ ├── kafka_input.go │ │ │ └── kafka_scram_client.go │ │ └── nats_input/ │ │ └── nats_input.go │ ├── loaders/ │ │ ├── all/ │ │ │ └── all.go │ │ ├── consul_loader/ │ │ │ ├── consul_loader.go │ │ │ ├── consul_loader_metrics.go │ │ │ ├── consul_loader_test.go │ │ │ └── options.go │ │ ├── docker_loader/ │ │ │ ├── docker_loader.go │ │ │ ├── docker_loader_metrics.go │ │ │ └── options.go │ │ ├── file_loader/ │ │ │ ├── file_loader.go │ │ │ ├── file_loader_metrics.go │ │ │ └── options.go │ │ ├── http_loader/ │ │ │ ├── http_loader.go │ │ │ ├── http_loader_metrics.go │ │ │ ├── http_loader_test.go │ │ │ └── options.go │ │ ├── loaders.go │ │ ├── loaders_test.go │ │ └── option.go │ ├── lockers/ │ │ ├── all/ │ │ │ └── all.go │ │ ├── consul_locker/ │ │ │ ├── consul_locker.go │ │ │ └── consul_registration.go │ │ ├── k8s_locker/ │ │ │ ├── k8s_locker.go │ │ │ └── k8s_registration.go │ │ ├── locker.go │ │ └── redis_locker/ │ │ ├── redis_locker.go │ │ └── redis_registration.go │ ├── logging/ │ │ └── logging.go │ ├── outputs/ │ │ ├── all/ │ │ │ └── all.go │ │ ├── asciigraph_output/ │ │ │ └── asciigraph.go │ │ ├── file/ │ │ │ ├── file_metrics.go │ │ │ ├── file_output.go │ │ │ └── rotating_file.go │ │ ├── gnmi_output/ │ │ │ ├── gnmi_output.go │ │ │ ├── gnmi_server.go │ │ │ ├── gnmi_server_get.go │ │ │ ├── gnmi_server_set.go │ │ │ └── gnmi_server_subscribe.go │ │ ├── influxdb_output/ │ │ │ ├── influxdb_cache.go │ │ │ └── influxdb_output.go │ │ ├── kafka_output/ │ │ │ ├── kafka_metrics.go │ │ │ ├── kafka_output.go │ │ │ └── kafka_scram_client.go │ │ ├── nats_outputs/ │ │ │ ├── jetstream/ │ │ │ │ ├── jetstream_output.go │ │ │ │ ├── jetstream_output_metrics.go │ │ │ │ └── jetstream_output_test.go │ │ │ └── nats/ │ │ │ ├── nats_metrics.go │ │ │ └── nats_output.go │ │ ├── options.go │ │ ├── otlp_output/ │ │ │ ├── otlp_converter.go │ │ │ ├── otlp_metrics.go │ │ │ ├── otlp_output.go │ │ │ └── otlp_output_test.go │ │ ├── output.go │ │ ├── prometheus_output/ │ │ │ ├── prometheus_common.go │ │ │ ├── prometheus_common_test.go │ │ │ ├── prometheus_output/ │ │ │ │ ├── prometheus_cache.go │ │ │ │ ├── prometheus_metrics.go │ │ │ │ ├── prometheus_output.go │ │ │ │ └── prometheus_service_registration.go │ │ │ └── prometheus_write_output/ │ │ │ ├── prometheus_write_client.go │ │ │ ├── prometheus_write_metrics.go │ │ │ └── prometheus_write_output.go │ │ ├── protometa.go │ │ ├── snmp_output/ │ │ │ ├── snmp_metrics.go │ │ │ └── snmp_output.go │ │ ├── tcp_output/ │ │ │ └── tcp_output.go │ │ └── udp_output/ │ │ └── udp_output.go │ ├── pipeline/ │ │ └── pipeline.go │ ├── utils/ │ │ ├── authbrearer.go │ │ ├── gnmi.go │ │ ├── gnmiext.go │ │ └── store.go │ └── version/ │ └── version.go └── tests/ ├── api.sh ├── capabilities_cmd.sh ├── clab/ │ ├── labN.clab.yaml │ ├── loaders/ │ │ ├── gnmic-agg.yaml │ │ ├── gnmic-docker-loader.yaml │ │ ├── gnmic-file-loader.yaml │ │ ├── loaders.clab.yaml │ │ ├── loaders.clab_vars.yaml │ │ └── targets/ │ │ └── targets.yaml │ ├── telemetry/ │ │ ├── gnmic-agg.yaml │ │ ├── gnmic.yaml │ │ ├── grafana/ │ │ │ ├── dashboards.yaml │ │ │ └── datasources/ │ │ │ └── datasource.yaml │ │ ├── prometheus/ │ │ │ └── prometheus.yaml │ │ └── telemetry.clab.yaml │ └── test_lab1.clab.yaml ├── cleanup.sh ├── cluster_checks.sh ├── cluster_funcs.sh ├── configs/ │ ├── gnmic1.yaml │ ├── gnmic2.yaml │ ├── gnmic3.yaml │ ├── gnmic4.yaml │ ├── gnmic_env.yaml │ └── node/ │ ├── interface.json │ ├── interface.yaml │ ├── replace_request_file.yaml │ └── update_request_file.yaml ├── consul_templates/ │ └── all_services.tpl ├── dashboards/ │ └── gNMIc/ │ ├── gnmic_compute_metrics.json │ └── gnmic_grpc_metrics.json ├── deploy.sh ├── env_vars.sh ├── generate_cmd.sh ├── generate_path_cmd.sh ├── get_cmd.sh ├── loaders.sh ├── metrics/ │ ├── gnmic.yaml │ ├── grafana/ │ │ ├── dashboards.yaml │ │ └── datasources/ │ │ └── datasource.yaml │ ├── metrics.clab.yaml │ ├── prometheus/ │ │ └── prometheus.yaml │ └── run.sh ├── run.sh ├── run_tests.sh ├── set_cmd.sh ├── subscribe_once_cmd.sh ├── telemetry_labs.sh └── version_cmd.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .dockerignore ================================================ tests/ docs/ ================================================ FILE: .github/dependabot.yml ================================================ version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" - package-ecosystem: "gomod" directory: "/" schedule: interval: "weekly" ================================================ FILE: .github/workflows/close_state_issues.yml ================================================ # © 2024 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: Close inactive issues on: schedule: - cron: "30 1 * * *" jobs: close-issues: runs-on: ubuntu-latest permissions: issues: write pull-requests: write steps: - uses: actions/stale@v10 with: days-before-issue-stale: 365 days-before-issue-close: 30 stale-issue-label: "stale" stale-issue-message: "This issue is stale because it has been open for 12 months with no activity." close-issue-message: "This issue was closed because it has been inactive for 30 days since being marked as stale." days-before-pr-stale: -1 days-before-pr-close: -1 repo-token: ${{ secrets.GITHUB_TOKEN }} ================================================ FILE: .github/workflows/docs.yml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: docs on: workflow_dispatch: push: branches: - "docs-*" tags: - "v*" env: MKDOCS_MATERIAL_VER: 8.3.4 jobs: publish: runs-on: ubuntu-latest permissions: contents: write steps: - uses: actions/checkout@v4 - run: docker run -v $(pwd):/docs --entrypoint ash squidfunk/mkdocs-material:${MKDOCS_MATERIAL_VER} -c 'git config --global --add safe.directory /docs; mkdocs gh-deploy --force --strict' ================================================ FILE: .github/workflows/lint.yml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 --- name: Linter on: workflow_dispatch: # pull_request: # push: # branches: # - "main" # - "!releases/**" env: GOVER: 1.24.12 jobs: lint: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v6 - uses: actions/setup-go@v6 with: go-version: ${{ env.GOVER }} - name: golangci-lint uses: golangci/golangci-lint-action@v9 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. version: v1.46 # Optional: working directory, useful for monorepos # working-directory: somedir # Optional: golangci-lint command line arguments. args: --verbose --max-same-issues=0 --max-issues-per-linter=0 --out-format=github-actions # Optional: show only new issues if it's a pull request. The default value is `false`. # only-new-issues: true ================================================ FILE: .github/workflows/release.yml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 --- name: release on: push: tags: - v* env: GOVER: 1.24.12 GORELEASER_VER: v2.13.3 jobs: test: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v6 - uses: actions/setup-go@v6 with: go-version: ${{ env.GOVER }} - run: ./tests/run_tests.sh env: CGO_ENABLED: 0 release: runs-on: ubuntu-22.04 permissions: contents: write packages: write needs: - test steps: - uses: actions/checkout@v6 with: fetch-depth: 0 - uses: actions/setup-go@v6 with: go-version: ${{ env.GOVER }} - name: Login to github container registry uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Release with goreleaser uses: goreleaser/goreleaser-action@v6 with: version: ${{ env.GORELEASER_VER }} args: release --clean --verbose env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} ================================================ FILE: .github/workflows/test.yml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 --- name: Test on: workflow_dispatch: pull_request: push: branches: - "main" - "!releases/**" env: GOVER: 1.24.12 jobs: test: runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v6 - uses: actions/setup-go@v6 with: go-version: ${{ env.GOVER }} - run: ./tests/run_tests.sh env: CGO_ENABLED: 0 # run staticcheck - uses: reviewdog/action-staticcheck@73cfd0daa6fdbba9a858dcb0f62844012fa8317d with: github_token: ${{ secrets.GITHUB_TOKEN }} # Change reviewdog reporter if you need [github-pr-check,github-check,github-pr-review]. reporter: github-pr-review # Report all results. filter_mode: nofilter # Exit with 1 when it find at least one finding. fail_on_error: true ================================================ FILE: .gitignore ================================================ _test/ tests/clab-* tests/srl-* tests/.*clab.yaml tests/collector/suite/*/clab-* builds/ dist *.log gnmic *.tmp *.work* .idea tests/collector ================================================ FILE: .golangci.yml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 linters-settings: govet: # check-shadowing: true enable: - fieldalignment gocyclo: min-complexity: 20 dupl: threshold: 100 goconst: min-len: 2 min-occurrences: 4 lll: line-length: 140 nolintlint: # allow-leading-space: true # don't require machine-readable nolint directives (i.e. with no leading space) allow-unused: false # report any unused nolint directives require-explanation: false # don't require an explanation for nolint directives require-specific: false # don't require nolint directives to be specific about which linter is being skipped linters: disable-all: true enable: - asciicheck - bodyclose # - deadcode # - depguard - dogsled # - dupl # - errcheck # - exhaustive # - exportloopref # - funlen # - gci # - gochecknoglobals # - gochecknoinits # - gocognit - goconst # - gocritic # - gocyclo # - godox - gofmt # - gofumpt - goheader # - goimports # - gomnd # - gomodguard # - goprintffuncname # - gosec # - gosimple # - govet # - ineffassign # - lll - misspell # - nakedret # - nestif # - nlreturn # - noctx - nolintlint - prealloc # - revive # - rowserrcheck # - sqlclosecheck # - staticcheck # - structcheck # - stylecheck # - typecheck # - unconvert # - unparam - unused # - varcheck # - whitespace # - wsl run: concurrency: 4 timeout: 5m ================================================ FILE: .goreleaser.yml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: 2 project_name: gnmic builds: - env: - CGO_ENABLED=0 ldflags: - -s -w -X github.com/openconfig/gnmic/pkg/version.Version={{.Version}} -X github.com/openconfig/gnmic/pkg/version.Commit={{.ShortCommit}} -X github.com/openconfig/gnmic/pkg/version.Date={{.Date}} -X github.com/openconfig/gnmic/pkg/version.GitURL={{.GitURL}} goos: - linux - darwin goarch: - amd64 - "386" - arm - arm64 dockers: - goos: linux goarch: amd64 ids: - gnmic image_templates: - &amd64_latest_image "ghcr.io/openconfig/gnmic:latest-amd64" - &amd64_versioned_image 'ghcr.io/openconfig/gnmic:{{ replace .Version "v" ""}}-amd64' dockerfile: goreleaser-alpine.dockerfile skip_push: false use: buildx build_flag_templates: - "--platform=linux/amd64" - "--provenance=false" - "--sbom=false" - goos: linux goarch: arm64 ids: - gnmic image_templates: - &arm64_latest_image "ghcr.io/openconfig/gnmic:latest-arm64" - &arm64_versioned_image 'ghcr.io/openconfig/gnmic:{{ replace .Version "v" ""}}-arm64' dockerfile: goreleaser-alpine.dockerfile skip_push: false use: buildx build_flag_templates: - "--platform=linux/arm64" - "--provenance=false" - "--sbom=false" - goos: linux goarch: amd64 ids: - gnmic image_templates: - "ghcr.io/openconfig/gnmic:latest-scratch" - 'ghcr.io/openconfig/gnmic:{{ replace .Version "v" ""}}-scratch' dockerfile: goreleaser-scratch.dockerfile skip_push: false use: buildx build_flag_templates: - "--platform=linux/amd64" - "--provenance=false" - "--sbom=false" docker_manifests: - name_template: 'ghcr.io/openconfig/gnmic:{{ replace .Version "v" "" }}' image_templates: - *amd64_versioned_image - *arm64_versioned_image - name_template: "{{- if not .IsSnapshot}}ghcr.io/openconfig/gnmic:latest{{- end}}" image_templates: - *amd64_latest_image - *arm64_latest_image archives: - name_template: >- {{ .ProjectName }}_ {{- .Version }}_ {{- title .Os }}_ {{- if eq .Arch "amd64" }}x86_64 {{- else if eq .Arch "386" }}i386 {{- else if eq .Arch "arm" }}armv7 {{- else if eq .Arch "arm64" }}aarch64 {{- else }}{{ .Arch }}{{ end }} checksum: name_template: "checksums.txt" snapshot: name_template: "{{ .Tag }}" changelog: use: github-native nfpms: - id: gnmic file_name_template: >- {{ .ProjectName }}_ {{- .Version }}_ {{- title .Os }}_ {{- if eq .Arch "amd64" }}x86_64 {{- else if eq .Arch "386" }}i386 {{- else if eq .Arch "arm" }}armv7 {{- else if eq .Arch "arm64" }}aarch64 {{- else }}{{ .Arch }}{{ end }} vendor: gnmic homepage: https://gnmic.openconfig.net maintainer: Karim Radhouani , Roman Dodin description: gNMI CLI client and collector license: Apache 2.0 formats: - deb - rpm bindir: /usr/local/bin ================================================ FILE: Dockerfile ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 FROM golang:1.24.12 AS builder WORKDIR /build COPY go.mod go.sum /build/ COPY pkg/api/go.mod pkg/api/go.sum /build/pkg/api/ COPY pkg/cache/go.mod pkg/cache/go.sum /build/pkg/cache/ RUN go mod download ADD . /build #RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o gnmic . RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o gnmic . FROM alpine LABEL org.opencontainers.image.source=https://github.com/openconfig/gnmic COPY --from=builder /build/gnmic /app/ WORKDIR /app ENTRYPOINT [ "/app/gnmic" ] CMD [ "help" ] ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================

[![github release](https://img.shields.io/github/release/openconfig/gnmic.svg?style=flat-square&color=00c9ff&labelColor=bec8d2)](https://github.com/openconfig/gnmic/releases/) [![Github all releases](https://img.shields.io/github/downloads/openconfig/gnmic/total.svg?style=flat-square&color=00c9ff&labelColor=bec8d2)](https://github.com/openconfig/gnmic/releases/) [![Go Report](https://img.shields.io/badge/go%20report-A%2B-blue?style=flat-square&color=00c9ff&labelColor=bec8d2)](https://goreportcard.com/report/github.com/openconfig/gnmic) [![Doc](https://img.shields.io/badge/Docs-gnmic.openconfig.net-blue?style=flat-square&color=00c9ff&labelColor=bec8d2)](https://gnmic.openconfig.net) [![build](https://img.shields.io/github/actions/workflow/status/openconfig/gnmic/test.yml?branch=main&style=flat-square&labelColor=bec8d2)](https://github.com/openconfig/gnmic/releases/) --- `gnmic` (_pronoun.: gee·en·em·eye·see_) is a gNMI CLI client that provides full support for Capabilities, Get, Set and Subscribe RPCs with collector capabilities. Documentation available at [https://gnmic.openconfig.net](https://gnmic.openconfig.net) ## Features * **Full support for gNMI RPCs** Every gNMI RPC has a [corresponding command](https://gnmic.openconfig.net/basic_usage/) with all of the RPC options configurable by means of the local and global flags. * **Flexible collector deployment** `gnmic` can be deployed as a gNMI collector that supports multiple output types ([NATS](https://gnmic.openconfig.net/user_guide/outputs/nats_output/), [Kafka](https://gnmic.openconfig.net/user_guide/outputs/kafka_output/), [Prometheus](https://gnmic.openconfig.net/user_guide/outputs/prometheus_output/), [InfluxDB](https://gnmic.openconfig.net/user_guide/outputs/influxdb_output/),...). The collector can be deployed either as a [single instance](https://gnmic.openconfig.net/deployments/deployments_intro/#single-instance), as part of a [cluster](https://gnmic.openconfig.net/user_guide/HA/), or used to form [data pipelines](https://gnmic.openconfig.net/deployments/deployments_intro/#pipelines). * **Support gRPC tunnel based dialout telemetry** `gnmic` can be deployed as a gNMI collector with an [embedded tunnel server](https://gnmic.openconfig.net/user_guide/tunnel_server/). * **gNMI data manipulation** `gnmic` collector has [data transformation](https://gnmic.openconfig.net/user_guide/event_processors/intro/) capabilities that can be used to adapt the collected data to your specific use case. * **Dynamic targets loading** `gnmic` support [target loading at runtime](https://gnmic.openconfig.net/user_guide/targets/target_discovery/discovery_intro/) based on input from external systems. * **YANG-based path suggestions** Your CLI magically becomes a YANG browser when `gnmic` is executed in [prompt](https://gnmic.openconfig.net/user_guide/prompt_suggestions/) mode. In this mode the flags that take XPATH values will get auto-suggestions based on the provided YANG modules. In other words - voodoo magic :exploding_head: * **Multi-target operations** Commands can operate on [multiple gNMI targets](https://gnmic.openconfig.net/user_guide/targets/) for bulk configuration/retrieval/subscription. * **Multiple configuration sources** gnmic supports [flags](https://gnmic.openconfig.net/user_guide/configuration_flags), [environment variables](https://gnmic.openconfig.net/user_guide/configuration_env/) as well as [file based]((https://gnmic.openconfig.net/user_guide/configuration_file/)) configurations. * **Inspect raw gNMI messages** With the `prototext` output format you can see the actual gNMI messages being sent/received. Its like having a gNMI looking glass! * **(In)secure gRPC connection** gNMI client supports both TLS and [non-TLS](https://gnmic.openconfig.net/global_flags/#insecure) transports so you can start using it in a lab environment without having to care about the PKI. * **Dial-out telemetry** The [dial-out telemetry server](https://gnmic.openconfig.net/cmd/listen/) is provided for Nokia SR OS. * **Pre-built multi-platform binaries** Statically linked [binaries](https://github.com/openconfig/gnmic/releases) made in our release pipeline are available for major operating systems and architectures. Making [installation](https://gnmic.openconfig.net/install/) a breeze! * **Extensive and friendly documentation** You won't be in need to dive into the source code to understand how `gnmic` works, our [documentation site](https://gnmic.openconfig.net) has you covered. ## Quick start guide ### Installation ``` bash -c "$(curl -sL https://get-gnmic.openconfig.net)" ``` ### Capabilities request ``` gnmic -a 10.1.0.11:57400 -u admin -p admin --insecure capabilities ``` ### Get request ``` gnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \ get --path /state/system/platform ``` ### Set request ``` gnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \ set --update-path /configure/system/name \ --update-value gnmic_demo ``` ### Subscribe request ``` gnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \ sub --path "/state/port[port-id=1/1/c1/1]/statistics/in-packets" ``` ### Prompt mode The [prompt mode](https://gnmic.openconfig.net/user_guide/prompt_suggestions/) is an interactive mode of the gnmic CLI client for user convenience. ```bash # clone repository with YANG models (Openconfig example) git clone https://github.com/openconfig/public cd public # Start gnmic in prompt mode and read in all the modules: gnmic --file release/models \ --dir third_party \ --exclude ietf-interfaces \ prompt ``` ================================================ FILE: cmd/demo/getresponse.textproto ================================================ notification: { timestamp: 1676419100456944135 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "ssh-server" } elem: { name: "state" } elem: { name: "protocol-version" } } val: { string_val: "V2" } } } notification: { timestamp: 1676420328291197426 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "config" } elem: { name: "hostname" } } val: { string_val: "rosesarered" } } } notification: { timestamp: 1676419100456944135 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "ntp" } elem: { name: "state" } elem: { name: "enabled" } } val: { bool_val: false } } } notification: { timestamp: 1676419100456944135 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "ntp" } elem: { name: "state" } elem: { name: "enable-ntp-auth" } } val: { bool_val: false } } } notification: { timestamp: 1676419100456944135 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "ssh-server" } elem: { name: "state" } elem: { name: "enable" } } val: { bool_val: true } } } notification: { timestamp: 1676420328448197153 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "state" } elem: { name: "hostname" } } val: { string_val: "rosesarered" } } } notification: { timestamp: 1676419100459254468 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "state" } elem: { name: "boot-time" } } val: { uint_val: 1676419100459308639 } } } notification: { timestamp: 1676419100456944135 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "telnet-server" } elem: { name: "state" } elem: { name: "enable" } } val: { bool_val: false } } } notification: { timestamp: 1676422427135895887 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "config" } elem: { name: "hostname" } } val: { string_val: "rosesareredd" } } } notification: { timestamp: 1676422427269965151 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "state" } elem: { name: "hostname" } } val: { string_val: "rosesareredd" } } } notification: { timestamp: 1676422434342310772 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "config" } elem: { name: "hostname" } } val: { string_val: "rosesarered" } } } notification: { timestamp: 1676422434479082363 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "state" } elem: { name: "hostname" } } val: { string_val: "rosesarered" } } } ================================================ FILE: cmd/demo/setrequest.textproto ================================================ replace { path { elem { name: "system" } elem { name: "config" } elem { name: "hostname" } } val { string_val: "violetsareblue" } } replace { path { elem { name: "lacp" } elem { name: "interfaces" } elem { name: "interface" key { key: "name" value: "Port-Channel9" } } } val { json_ietf_val: "{\n \"openconfig-lacp:config\": {\n \"interval\": \"FAST\",\n \"name\": \"Port-Channel9\"\n },\n \"openconfig-lacp:name\": \"Port-Channel9\"\n}" } } update { path { elem { name: "network-instances" } elem { name: "network-instance" key { key: "name" value: "VrfBlue" } } } val { json_ietf_val: "{\n \"openconfig-network-instance:config\": {\n \"name\": \"VrfBlue\",\n \"type\": \"openconfig-network-instance-types:L3VRF\"\n },\n \"openconfig-network-instance:name\": \"VrfBlue\"\n}" } } ================================================ FILE: cmd/demo/setrequest2.textproto ================================================ replace { path { elem { name: "system" } elem { name: "config" } elem { name: "hostname" } } val { string_val: "rosesarered" } } replace { path { elem { name: "lacp" } elem { name: "interfaces" } elem { name: "interface" key { key: "name" value: "Port-Channel9" } } } val { json_ietf_val: "{\n \"openconfig-lacp:config\": {\n \"interval\": \"FAST\",\n \"name\": \"Port-Channel9\"\n },\n \"openconfig-lacp:name\": \"Port-Channel9\"\n}" } } replace { path { elem { name: "network-instances" } elem { name: "network-instance" key { key: "name" value: "VrfBlue" } } } val { json_ietf_val: "{\n \"openconfig-network-instance:config\": {\n \"name\": \"VrfBlue\",\n \"type\": \"openconfig-network-instance-types:L3VRF\"\n },\n \"openconfig-network-instance:name\": \"VrfBlue\"\n}" } } ================================================ FILE: cmd/demo/subscriberesponses.textproto ================================================ sync_response: true update: { timestamp: 1676419100456944135 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "ssh-server" } elem: { name: "state" } elem: { name: "protocol-version" } } val: { string_val: "V2" } } } update: { timestamp: 1676420328291197426 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "config" } elem: { name: "hostname" } } val: { string_val: "rosesarered" } } } update: { timestamp: 1676419100456944135 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "ntp" } elem: { name: "state" } elem: { name: "enabled" } } val: { bool_val: false } } } update: { timestamp: 1676419100456944135 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "ntp" } elem: { name: "state" } elem: { name: "enable-ntp-auth" } } val: { bool_val: false } } } update: { timestamp: 1676419100456944135 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "ssh-server" } elem: { name: "state" } elem: { name: "enable" } } val: { bool_val: true } } } update: { timestamp: 1676420328448197153 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "state" } elem: { name: "hostname" } } val: { string_val: "rosesarered" } } } update: { timestamp: 1676419100459254468 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "state" } elem: { name: "boot-time" } } val: { uint_val: 1676419100459308639 } } } update: { timestamp: 1676419100456944135 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "telnet-server" } elem: { name: "state" } elem: { name: "enable" } } val: { bool_val: false } } } update: { timestamp: 1676422427135895887 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "config" } elem: { name: "hostname" } } val: { string_val: "rosesareredd" } } } update: { timestamp: 1676422427269965151 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "state" } elem: { name: "hostname" } } val: { string_val: "rosesareredd" } } } update: { timestamp: 1676422434342310772 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "config" } elem: { name: "hostname" } } val: { string_val: "rosesarered" } } } update: { timestamp: 1676422434479082363 prefix: { origin: "openconfig" target: "fakedut" } update: { path: { elem: { name: "system" } elem: { name: "state" } elem: { name: "hostname" } } val: { string_val: "rosesarered" } } } ================================================ FILE: config.json ================================================ { "username": "admin", "password": "sros", "port": 57400, "timeout": "5s", "skip-verify": true, "tls-key": "/path/to/client.key", "tls-cert": "/path/to/client.crt", "tls-ca": "/path/to/ca.crt", "targets": { "172.17.0.100": { "timeout": "2s", "subscriptions": [ "sub1" ], "outputs": [ "output1", "output3" ] }, "172.17.0.101": { "username": "sros", "password": "sros", "insecure": true, "subscriptions": [ "sub2" ], "outputs": [ "output2", "output3" ] }, "172.17.0.102:57000": { "password": "sros123", "tls-key": "/path/file1", "tls-cert": "/path/file2" }, "172.17.0.103": null }, "subscriptions": { "sub1": { "paths": [ "/configure/port[port-id=*]", "/state/port[port-id=*]" ], "stream-mode": "on_change" }, "sub2": { "paths": [ "/configure/port[port-id=*]/statistics" ], "stream-mode": "sample", "sample-interval": "10s" } }, "outputs": { "output1": { "type": "file", "file-type": "stdout" }, "output2": { "type": "file", "filename": "local.log" }, "output3": { "type": "nats", "address": "localhost:4222", "subject-prefix": "telemetry", "username": null, "password": null }, "output4": { "type": "stan", "address": "localhost:4223", "subject": "telemetry", "username": null, "password": null, "name": null, "cluster-name": "test-cluster", "timeout": null, "ping-interval": null, "ping-retry": null }, "output5": { "type": "kafka", "address": "localhost:9092", "topic": "telemetry", "max-retry": null, "timeout": null }, "output6": { "type": "nats", "address": "localhost:4222", "subject-prefix": "telemetry", "username": null, "password": null } } } ================================================ FILE: config.toml ================================================ username = "admin" password = "sros" port = 57400 timeout = "5s" skip-verify = true tls-key = "/path/to/client.key" tls-cert = "/path/to/client.crt" tls-ca = "/path/to/ca.crt" [targets] [targets."172.17.0.100"] timeout = "2s" subscriptions = [ "sub1" ] outputs = [ "output1", "output3" ] [targets."172.17.0.101"] username = "sros" password = "sros" insecure = true subscriptions = [ "sub2" ] outputs = [ "output2", "output3" ] [targets."172.17.0.102:57000"] password = "sros123" tls-key = "/path/file1" tls-cert = "/path/file2" [subscriptions.sub1] paths = [ "/configure/port[port-id=*]", "/state/port[port-id=*]" ] stream-mode = "on_change" [subscriptions.sub2] paths = [ "/configure/port[port-id=*]/statistics" ] stream-mode = "sample" sample-interval = "10s" [outputs.output1] type = "file" file-type = "stdout" [outputs.output2] type = "file" filename = "local.log" [outputs.output3] type = "nats" address = "localhost:4222" subject-prefix = "telemetry" [outputs.output4] type = "stan" address = "localhost:4223" subject = "telemetry" cluster-name = "test-cluster" [outputs.output5] type = "kafka" address = "localhost:9092" topic = "telemetry" [outputs.output6] type = "nats" address = "localhost:4222" subject-prefix = "telemetry" ================================================ FILE: config.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: sros port: 57400 timeout: 5s skip-verify: true tls-key: /path/to/client.key tls-cert: /path/to/client.crt tls-ca: /path/to/ca.crt targets: 172.17.0.100: timeout: 2s subscriptions: - sub1 outputs: - output1 - output3 172.17.0.101: username: sros password: sros insecure: true subscriptions: - sub2 outputs: - output2 - output3 172.17.0.102:57000: password: sros123 tls-key: /path/file1 tls-cert: /path/file2 172.17.0.103: subscriptions: sub1: paths: - /configure/port[port-id=*] - /state/port[port-id=*] stream-mode: on_change # target-defined # sample sub2: paths: - /configure/port[port-id=*]/statistics stream-mode: sample sample-interval: 10s outputs: output1: type: file file-type: stdout output2: type: file filename: local.log output3: type: nats address: localhost:4222 subject-prefix: telemetry username: password: output4: type: stan address: localhost:4223 subject: telemetry username: password: name: cluster-name: test-cluster timeout: ping-interval: ping-retry: output5: type: kafka address: localhost:9092 topic: telemetry max-retry: timeout: output6: type: nats address: localhost:4222 subject-prefix: telemetry username: password: ================================================ FILE: docs/CNAME ================================================ gnmic.openconfig.net ================================================ FILE: docs/basic_usage.md ================================================ The following examples demonstrate the basic usage of `gnmic` in a scenario where the remote target runs an unsecured (without TLS enabled) gNMI server. The `admin:admin` credentials are used to connect to the gNMI server running at `10.1.0.11:57400` address. !!!info For the complete command usage examples, refer to the ["Command reference"](cmd/capabilities.md) menu. ### Capabilities RPC Getting the device's [capabilities](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#32-capability-discovery) is done with [`capabilities`](cmd/capabilities.md) command: ```bash gnmic -a 10.1.0.11:57400 -u admin -p admin --insecure capabilities gNMI_Version: 0.7.0 supported models: - nokia-conf, Nokia, 19.10.R2 - nokia-state, Nokia, 19.10.R2 - nokia-li-state, Nokia, 19.10.R2 - nokia-li-conf, Nokia, 19.10.R2 << SNIPPED >> supported encodings: - JSON - BYTES ``` ### Get RPC [Retrieving](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#33-retrieving-snapshots-of-state-information) the data snapshot from the target device is done with [`get`](cmd/get.md) command: ```bash gnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \ get --path /state/system/platform { "source": "10.1.0.11:57400", "timestamp": 1592829586901061761, "time": "2020-06-22T14:39:46.901061761+02:00", "updates": [ { "Path": "state/system/platform", "values": { "state/system/platform": "7750 SR-1s" } } ] } ``` ### Set RPC [Modifying](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#34-modifying-state) state of the target device is done with [`set`](cmd/set.md) command: ```bash gnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \ set --update-path /configure/system/name \ --update-value gnmic_demo { "source": "0.tcp.eu.ngrok.io:12267", "timestamp": 1592831593821038738, "time": "2020-06-22T15:13:13.821038738+02:00", "results": [ { "operation": "UPDATE", "path": "configure/system/name" } ] } ``` ### Subscribe RPC [Subscription](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35-subscribing-to-telemetry-updates) to the gNMI telemetry data can be done with [`subscribe`](cmd/subscribe.md) command: ```bash gnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \ sub --path "/state/port[port-id=1/1/c1/1]/statistics/in-packets" { "source": "0.tcp.eu.ngrok.io:12267", "timestamp": 1592832965197288856, "time": "2020-06-22T15:36:05.197288856+02:00", "prefix": "state/port[port-id=1/1/c1/1]/statistics", "updates": [ { "Path": "in-packets", "values": { "in-packets": "12142" } } ] } ``` ### YANG path browser `gnmic` can produce a list of XPATH/gNMI paths for a given YANG model with its [`path`](cmd/path.md) command. The paths in that list can be used as the `--path` values for the Get/Set/Subscribe commands. ```bash # nokia model gnmic path -m nokia-state --file nokia-state-combined.yang | head -10 /state/aaa/radius/statistics/coa/dropped/bad-authentication /state/aaa/radius/statistics/coa/dropped/missing-auth-policy /state/aaa/radius/statistics/coa/dropped/invalid /state/aaa/radius/statistics/coa/dropped/missing-resource /state/aaa/radius/statistics/coa/received /state/aaa/radius/statistics/coa/accepted /state/aaa/radius/statistics/coa/rejected /state/aaa/radius/statistics/disconnect-messages/dropped/bad-authentication /state/aaa/radius/statistics/disconnect-messages/dropped/missing-auth-policy /state/aaa/radius/statistics/disconnect-messages/dropped/invalid ``` ================================================ FILE: docs/blog/index.md ================================================ Coming soon ================================================ FILE: docs/changelog.md ================================================ ## Changelog ### v0.45.0 - March 2nd 2026 - Prometheus and Prometheus RemoteWrite outputs: - When converting values to labels, duplicate label names are now resolved by prepending parent path elements until uniqueness is achieved, preventing metrics from being dropped. - Get/Set commands: - Custom gNMI extensions can be included in requests via `--registered-extensions` and a JSON payload; use `--proto-dir` and `--proto-file` to specify the extension Protobuf definitions. - Formatters: - JSON output no longer escapes HTML characters (`<`, `>`, `&`), producing more readable output for values such as path prefixes containing `->`. - Outputs: - OTLP: Implemented dynamic config via `Update()` and `UpdateProcessor()`, configurable Resource vs DataPoint level attributes, support for multiple metrics per gNMI message, and include gNMIc version in OTLP resource scope. - Target: - Target last error is now reflected in a consistent way across collector state and API responses. - gNMI API: - Improved error formatting and added tests for the `pkg/api` package. - Performance: - Pool bytes buffers and strings builders where it makes sense to reduce allocations. - Dependencies: - Bumped `github.com/cloudflare/circl` from 1.6.1 to 1.6.3. - Bumped `github.com/go-git/go-git/v5` from 5.13.0 to 5.16.5. - Bumped `go.opentelemetry.io/otel/sdk` from 1.38.0 to 1.40.0. ### v0.44.0 - February 17th 2026 - gNMI Extensions: - gNMI extensions in get, set, and subscribe responses are now parsed and displayed as JSON when using `--proto-dir`, `--proto-file`, and `--registered-extensions` with the corresponding Protobuf files. - Collector mode: - Collector mode now stores Targets state (gNMI connection and subscription(s) state) in a separate store. - Collector mode supports an SSE endpoint streaming config and state for any object (Target, subscription, outputs, etc.) - Target: - Multiple gRPC level config knobs can now be set per target: gRPC read/write buffer, gRPC window size, and other dial options. Configuration is documented in the target configuration reference. ### v0.43.0 - February 1st 2026 - Inputs: - Jetstream: - Added support for configuring `max-ack-pending` to limit the maximum number of unacknowledged messages on a NATS JetStream input. - DeliverPolicy and AckPolicy are now fully configurable for greater flexibility and control. - Added NATS JetStream workqueue retention pattern support for exactly-once message processing in task distribution scenarios. - Outputs: - Jetstream: - Added `retention-policy` configuration option with support for `limits` (default) and `workqueue` retention policies. - Stream existence verification with detailed logging; omit `create-stream` to use existing streams. - Introduced support for OpenTelemetry Protocol (OTLP) as an output destination, enabling direct export of telemetry data to OTLP-compatible backends with full metric conversion (gauges, counters, histograms) and custom resource attributes. - Commands: - Added the new `collector` command: Runs gNMIc in collector mode, enabling dynamic, live updates to all configuration objects including targets, subscriptions, outputs, inputs, and processors. Unlike the `subscribe` command, the `collector` command supports on-the-fly configuration changes via the REST API, without requiring a restart. gNMIc automatically reconciles changes to maintain the desired state. - The `collector` command also includes a suite of subcommands, allowing you to configure the gNMIc collector directly from the CLI. - Formatters: - Flat format: Fixed leading slash handling when origin is not included in prefix or prefix is non-existent, ensuring consistent path formatting across all notification types. - Processors: - `event_group_by` processor now correctly handles delete events. - API: - Fixed the API path to patch subscriptions for a target ID. - Target: - When a target is removed, it is now also removed from the configuration. - Dependencies: - Fixed `gnmic/pkg/api` module version mismatch in go.mod for consumers building gNMIc as a dependency. - Bumped `golang.org/x/crypto` to v0.45.0. ### v0.42.0 - September 19th 2025 - Inputs: - Add support for NATS Jetstream input type. - Kafka: Fixed event parsing when `eventMsg` was not initialized, preventing nil pointer dereference. - Loader: - Loaded targets subscribe requests are now subject to the `subscribe-backoff` timer when new targets are added via loaders (HTTP, file, etc.) or config change events. - Loaded target configuration now supports environment variable expansion when `expand-env` is set to true, enabling per-target credentials via env vars. - Consul loader: Fixed tag matching logic to allow services with extra metadata tags (subset matching); services with required tags plus additional tags are no longer incorrectly rejected. - Consul loader: Improved Go template parsing for target name and event-tags. - HTTP loader: Various fixes and added tests (fixes #712). - Outputs: - Kafka: Fixed missing label in error metric that could cause panics when error reason was unavailable. - gNMI server: - The unary RPCs timeout is now configurable via `gnmi-server.timeout` in the config (default remains 2 minutes). - Get command: - Added optional organization and version for model selection: prepend `/` for organization, append `:` for version when specifying models. - Subscribe: - Fixed `sync_response` output being suppressed for ONCE mode subscriptions; behavior now matches STREAM mode. - Targets: - A new internal Prometheus metric `gnmic_target_connection_state` reflects the gRPC client connection state with values: 0 (UNKNOWN), 1 (IDLE), 2 (CONNECTING), 3 (READY), 4 (TRANSIENT_FAILURE), or 5 (SHUTDOWN). The `target_up` metric now correctly reflects connection failures (e.g., auth issues). - Bug fixes: - Fixed memory leak when subscription fails: `cancel()` reference was kept alive indefinitely. - Fixed OS environment variable values being incorrectly lowercased (fixes #663). - API: - Documentation updated for `POST /api/v1/config/targets` to reflect that the `name` field is required for proper target identification. - Dependencies: - Bumped `golang.org/x/crypto` to v0.41.0. - Bumped `golang.org/x/oauth2` to v0.31.0. ### v0.41.0 - April 6th 2025 - Processors: - Added `event-time-epoch` processor, enabling converting string-based time values into epoch timestamps. - Fixed `ieeefloat32` processor for correct handling of binary IEEE float32 values. - Target Discovery: - Consul loader: Adds the ability to use Go Templates on Consul targets to set target name as well as event-tags (e.g., `target-name`, `target-tags` with `{{.Meta.*}}`). - Loader: - When a target configuration changes, loaders now generate delete and add actions so the subscription is restarted to apply the new parameters (fixes #563). - Outputs: - Messages are now exported to outputs in sequence to avoid sync responses being sent before initial notifications (fixes #612). - Output internal metrics are now registered only once, preventing duplicate registration errors (fixes #586). - Path generation: - Fixed xpath generation with prefix: module prefix is now replaced with module name when generating xpaths (fixes #633). - Targets: - `target_up` metric now resets before creating metrics so deleted targets (e.g., via Consul) no longer show as still up (fixes #604). - Dependencies: - Bumped `github.com/golang/glog` to v1.2.4. ### v0.40.0 - January 27th 2025 - Processors: - Introducing `event-value-tag-v2` processor, enabling the addition of values as tags to other messages without requiring caching to be enabled in the associated output. - Logging related to calls to the `/api/v1/healthz` API endpoint is now optional. - Clustering: - New REST API endpoints added: - Switch the cluster leader: `DELETE /api/v1/cluster/leader` - Drain an instance: `POST /api/v1/members/{id}/drain` where id is the instance name to be drained - Rebalance the load between instances: `POST /api/v1/cluster/rebalance` ### v0.39.0 - November 7th 2024 - Get Command - Added `--dry-run` flag. - Set Command - Added `--no-trim` flag to disable trimming white spaces from values payload. - REST API - Added `/api/v1/admin/shutdown` endpoint to shutdown gNMIc. - Outputs: - File: file output now supports file rotation. - NATS and Jetstream: The publishers buffer size is now configurable. - Build: - Added `ARM64` binary and container image. - gNMIc Metrics: - Added a metric to keep track of failed subscribe requests. - Added a metric to keep track of targets connectivity state. - Clustering: - The REST API client used for building gNMIc cluster can now be configured with client certificates to support mTLS. - Processors: - Added a processor to handle converting binary IEEE float32 values to float32. ### v0.38.0 - July 8th 2024 - Kafka Output - Add configurable Kafka version - gNMI extensions - Implement [Commit confirmed gNMI extension](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-commit-confirmed.md) - Implement [Depth gNMI extension](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-depth.md) ### v0.37.0 - May 13th 2024 - gNMI connection TCP Keepalive - It is now possible to configure the TCP keepalive probes time interval. - gRPC Keepalive - The gRPC connection keepalive parameters are now configurable. It follows the gRPC spec: https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md - Proxy Command: - gNMIc now supports a `proxy` command. When issued, gNMIc runs as a gNMI proxy. See details [here](cmd/proxy.md) - Processor Command: - gNMIc now supports a `processor` command. It can be used to run a set of processor offline against an input of event messages and print the result. See details [here](cmd/processor.md) - Kafka Output: - The Kafka output now supports a configurable flush-interval parameters. - InfluxDB Output: - The InfluxDB output now supports writing gNMI deletes to InfluxDB using a custom tag name. - Prometheus Output: - The Prometheus output will now automatically convert boolean values (true: 1 and false: 0). ### v0.36.0 - February 13th 2024 - Event Message - gNMI updates with deleted paths are now converted into separate event messages where the keys are extracted from the path and set as event tags. - gNMI TLS cipher suites - It is now possible to select the list of a cipher suites that gNMIc advertises to a gNMI server during a TLS handshake. The full list of supported ciphers can be found [here](user_guide/targets/targets.md#controlling-the-advertised-cipher-suites) - Set Request - The Set command now features a new flag, `--proto-file`, which allows the specification of one or more files. These files should contain gNMI Set requests in `prototext` format, which will be sent to the specified targets. ### v0.35.0 - January 20th 2024 - Processors - Added a plugin process type that allows users to write their own custom processors: [examples](https://github.com/openconfig/gnmic/tree/main/examples/plugins) - gRPC metadata - A new flag `--metadata | -H` is introduced. It allows users to add custom gRPC metadata headers to any request. - Outputs: - Kafka output: - Added support for custom topics per target/subscription. - Added support for both Async and Sync Kafka producers. - Commands: - Listen command: When using the `listen` command outputs internal metrics are properly initialized and exposed to prometheus for scraping. ### v0.34.0 - November 11th 2023 - Prometheus Write Output - The number of `prometheus_write` writers can now be configured. - Subscription Encoding - A subscription encoding can now be set per target. Before, it was either a global attribute or set per subscription. With this change, it can be set globally, per target or per subscription. - Processors: - New `event-combine` processor: A convenience processor that allows combining other processors into a single one. - New `event-rate-limit` processor: A processor that rate-limits each event with matching tags to the configured amount per-seconds. - Outputs: - New `asciigraph` output: https://asciinema.org/a/617477 - Clustering: - New `redis` locker: For leader election, service discovery and target distribution gNMIc supports both `Consul` and `Kubernetes`. It is now possible to use `redis` for the same purpose. ### v0.33.0 - October 8th 2023 - Rest API - Added a kubernetes friendly `api/v1/healthz` endpoint. - Set Command - Added support for gNMI set [`union_replace`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-union_replace.md) operation. - Outputs - Allow the number of workers used by the `prometheus` and `prometheus_write` outputs to be configurable to improve performance. - Go version - Upgrade to Golang v1.21.1. ### v0.32.0 - August 31st 2023 - TLS - It is now possible to override the serverName used by gNMIc when verifying the server name present in the certificate sent by the gNMI server. [PR](https://github.com/openconfig/gnmic/pull/173) - Subscription - Added support for mixing on-change and sample stream subscription in the same gRPC stream. [PR](https://github.com/openconfig/gnmic/pull/197) - Added support for attaching specific outputs to a subscription. [PR](https://github.com/openconfig/gnmic/pull/209) - REST API - Added a health chek endpoint to be used by kubernetes. [PR](https://github.com/openconfig/gnmic/pull/202) - Kafka Output - Added support for Kafka compression. [PR](https://github.com/openconfig/gnmic/pull/203) - Generate Path - Added `enum-values` to the `JSON` output of `generate path` command. [PR](https://github.com/openconfig/gnmic/pull/215) ### v0.31.0 - May 17th 2023 - Prometheus output - When using the Consul auto discovery feature of the Proemtheus output, it is now possible to configure different service and listen addresses. This is useful when gNMIc is running as a container of behind a NAT device - Set Request file - The CLI origin is now allowed in the `path` field of `updates`, `replaces` and `deletes` in a set request file. If the `path` field has the `cli:/` origin, the `value` field is expected to be a string and will be set in an `ascii` TypedValue. ### v0.30.0 - April 18th 2023 - Set Command - The [set command](cmd/set.md) now supports the flags `--replace-cli`, `--replace-cli-file`, `--update-cli` and `--update-cli-file`, these flags can be used to send gNMI set requests with the CLI origin. - Logging: - Reduce log verbosity of File and HTTP target discovery mechanisms. - Processors: - The [Drop](user_guide/event_processors/event_drop.md) event processor completely removes the message to be dropped instead of replacing it with an empty message. - Inputs: - [Kafka input](user_guide/inputs/kafka_input.md) now supports TLS connections. - Outputs: - [Kafka output](user_guide/outputs/kafka_output.md) now has a configuration attribute called `insert-key`, if true, the messages written will include a key built from the gNMI message source and subscription name. - [TCP output](user_guide/outputs/tcp_output.md) now has a configuration attribute called `delimiter`, it allows to set user defined string to be sent between each message. This allows the receiving end to properly split JSON objects. It it particularly useful with Logstash when writing gNMI events to an ELK stack. - TLS: - When using `gNMIc`'s components that expose a TLS server (gNMI server, Tunnel server, Rest API and Prometheus output) it's possible to fine tune the how the server requests and validates a client certificate. This is done using the configuration attribute `client-auth` under each server's TLS section, it takes 4 different values: - request: The server requests a certificate from the client but does not require the client to send a certificate. If the client sends a certificate, it is not required to be valid. - require: The server requires the client to send a certificate and does not fail if the client certificate is not valid. - verify-if-given: The server requests a certificate, does not fail if no certificate is sent. If a certificate is sent it is required to be valid. - require-verify: The server requires the client to send a valid certificate. - Diff Command: - The [diff command](cmd/diff/diff.md) has 2 new sub commands: - [`setrequest`](cmd/diff/diff_setrequest.md): compares the intent between two `SetRequest` messages encoded in textproto format. - [`set-to-notifs`](cmd/diff/diff_set_to_notifs.md): verifies whether a set of notifications from a `GetResponse` or a stream of `SubscribeResponse` messages comply with a `SetRequest` messages in textproto format. The envisioned use case is to check whether a stored snapshot of device state matches that of the intended state as specified by a `SetRequest`. - Outputs: - When using the `event` format with certain outputs (`file`, `nats`, `jetstream`, `kafka`, `tcp` or `udp`) it's possible to send event message individually as opposed to sending them in an array. This is done using the attribute `split-events: true` under each of the outputs configuration sections. - [Prometheus output](user_guide/outputs/prometheus_output.md) now supports a custom service address field under `service-registration`, it specifies the address to be registered in Consul for discovery. It can be a hostname, an IP address or a IP/Host:Port socket address. It it does not contain a port number, the port number from the `listen` field is used. - Set Request file - The Set request file can be used with Origin `cli`, gNMIc will properly format the commands as string, not as JSON value. ### v0.29.0 - February 20th 2023 - Generate Path - The `generate path` command with the flag `--json` shows the features the path depends on. The list of features is built recursively from the YANG attribute `if-feature`. - Processors: - New processor [`event-starlark`](user_guide/event_processors/event_starlark.md) allows to run a [starlak](https://github.com/google/starlark-go/blob/master/doc/spec.md) script on the received messages. - Loaders - The [HTTP loader](user_guide/targets/target_discovery/http_discovery.md) now supports different authentication schemas as well as setting a template from a local file. ### v0.28.0 - December 7th 2022 - Targets - Targets static tags are now properly propagated to outputs when a cache is used. - Listen Command: - The `system-name` HTTP2 header is now used as a tag in exported metrics. - Outputs: - The timestamp precision under `gNMIc`'s InfluxDB output is now configurable. - Added a new `snmp` output type, it allows to dynamically convert gNMI updates into SNMP traps. ### v0.27.0 - October 8th 2022 - Targets - Add supports for socks5 proxies per target. - Logging - Support for log rotation via the flags `--log-max-size`, `log-max-backups` and `--log-compress` ### v0.26.0 - June 28th 2022 - Outputs - Add [Prometheus Remote Write output](user_guide/outputs/prometheus_write_output.md), this output type can be used to push metrics to various systems like [Mimir](https://grafana.com/oss/mimir/), [CortexMetrics](https://cortexmetrics.io/), [VictoriaMetrics](https://victoriametrics.com/), [Thanos](https://thanos.io/)... - Add [NATS Jetstream output](user_guide/outputs/jetstream_output.md), it allows to write metrics to NATS jetstream which supports persistency and filtering. - [gNMI historical subscriptions](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md#1-purpose) `gNMIc` now support historical subscription using the [gNMI history extension](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md#2-definition) ### v0.25.1 - June 13th 2022 - Upgrade Go version to go1.18.1. - Fix running `gnmic subscribe` with only Inputs and Outputs configured (no subscriptions or targets). ### v0.25.0 - June 11th 2022 - Processors - [Strings replace processor](user_guide/event_processors/event_strings.md) supports replaces using regular expressions. - Processors are now supported when collecting telemetry using [listen command](cmd/listen.md) (Nokia SROS specific) - New Processors - [Data convert](user_guide/event_processors/event_data_convert.md) - [Duration convert](user_guide/event_processors/event_duration_convert.md) - [Value tag](user_guide/event_processors/event_value_tag.md) - [Clustering](user_guide/HA.md) - `gNMIc` supports kubernetes based clustering, i.e you can build `gNMIc` clusters on kubernetes without the need for Consul cluster. - [Yang path generation](cmd/generate.md) - The command `gnmic generate path` supports generating paths for YANG containers. In earlier versions, the paths generation was done for YANG leaves only. - Internal gNMIc Prometheus metrics `gNMIc` exposes additional internal metrics available to be scraped using Prometheus. - Static tags from target configuration - It is now possible to set static tags on events by configuring them under each target. - Influxdb cache The [InfluxDB output](user_guide/outputs/influxdb_output.md) now supports gNMI based caching, allowing to apply processors on multiple event messages at once and batching the written points to InfluxDB. ### v0.24.0 - March 13th 2022 - [gRPC Tunnel Support](user_guide/tunnel_server.md) Add support for gNMI RPC using a gRPC tunnel, gNMIc runs as a collector with an embedded tunnel server. ### v0.23.0 - February 24th 2022 - Docker image: - The published `gnmic` docker image is now based on `alpine` instead of an empty container. - A `from scratch` image is published and can be obtained using the command: ```bash docker pull ghcr.io/karimra/gnmic:latest-scratch docker pull ghcr.io/karimra/gnmic:v0.23.0-scratch ``` - [gNMIc Golang API](user_guide/golang_package/intro.md): - Add gNMI responses constructors - Add gRPC tunnel proto messages constructors - [Target Discovery](user_guide/targets/target_discovery/discovery_intro.md): - Add the option to transform the loaded targets format using a Go text template for file and HTTP loaders - Poll based target loaders (file, HTTP and docker) now support a startup delay timer ### v0.22.1 - February 2nd 2022 - Fix a Prometheus output issue when using gNMI cache that causes events to be missing from the metrics. ### v0.22.0 - February 1st 2022 - [gNMIc Golang API](user_guide/golang_package/intro.md): Added the `github.com/karimra/gnmic/api` golang package. It can be imported by other Golang programs to ease the creation of gNMI targets and gNMI Requests. ### v0.21.0 - January 23rd 2022 - [Generate Cmd](cmd/generate/generate_path.md): Add YANG module namespace to generated paths. - Outputs: Outputs [File](user_guide/outputs/file_output.md), [NATS](user_guide/outputs/nats_output.md) and [Kafka](user_guide/outputs/kafka_output.md) now support a `msg-template` field to customize the written messages using Go templates. - API: Add [Cluster API](user_guide/api/cluster.md) endpoints. - Actions: Add [Template](user_guide/actions/actions.md#template-action) action. Add Subscribe ONCE RPC to [gNMI](user_guide/actions/actions.md#gnmi-action) action. Allow [gNMI](user_guide/actions/actions.md#gnmi-action) action on multiple targets. Add [Script](user_guide/actions/actions.md#script-action) action. - [Get Cmd](cmd/get.md): Implement Format `event` for GetResponse messages. Add the ability to execute processors with Get command flag [`--processor`](cmd/get.md#processor) on GetResponse messages. - [Target Discovery](user_guide/targets/target_discovery/discovery_intro.md): Add the ability to run [actions](user_guide/actions/actions.md) on target discovery or deletion. - [Set Cmd](cmd/set.md): Add [`--dry-run`](cmd/set.md#dry-run) flag which runs the set request templates and prints their output without sending the SetRequest to the targets. - TLS: Add pre-master key logging for TLS connections using the flag [`--log-tls-secret`](global_flags.md#log-tls-secret). The key can be used to decrypt encrypted gNMI messages using wireshark. - Target: Add `target.Stop()` method to gracefully close the target underlying gRPC connection. ### v0.20.0 - October 19th 2021 - Add [gomplate](https://docs.gomplate.ca) template functions to all templates rendered by `gnmic`. - [Path generation](cmd/generate/generate_path.md): `gnmic generate path` supports generating paths with type and description in JSON format. - [Set RPC template](cmd/set.md#templated-set-request-file): Set RPC supports multiple template files in a single command. - [Clustering](user_guide/HA.md): `gnmic` clusters can be formed using secure (HTTPS) API endpoints. - [Configuration payload generation](cmd/generate.md): Configuration keys can now be formatted as `camelCase` or `snake_case` strings ### v0.19.1 - October 7th 2021 - Path search Do not enter search mode if not paths are found. - [Prometheus Output](user_guide/outputs/prometheus_output.md) Change the default service name when registering with a Consul server ### v0.19.0 - September 16th 2021 - Event Processors [Event Convert](user_guide/event_processors/event_convert.md) now converts binary float notation to float - Target Loaders: - [HTTP Loader](user_guide/targets/target_discovery/http_discovery.md) gNMIc can now dynamically discover targets from a remote HTTP server. HTTP Loader is now properly instrumented using Prometheus metrics. - [File Loader](user_guide/targets/target_discovery/file_discovery.md) Supports remote files (ftp, sftp, http(s)) in addition to local file system files. File loader is now properly instrumented using Prometheus metrics. - [Consul Loader](user_guide/targets/target_discovery/consul_discovery.md) Consul Loader is now properly instrumented using Prometheus metrics. - [Docker Loader](user_guide/targets/target_discovery/docker_discovery.md) Docker Loader is now properly instrumented using Prometheus metrics. - gRPC gNMIc now adds its version as part of the user-agent HTTP header. ### v0.18.0 - August 17th 2021 - [gNMI Server](user_guide/gnmi_server.md): Add support for a global gNMI server. It supports all types of subscriptions, ran against a local cache build out the configured subscriptions. It support Get and Set RPCs as well, those are run against the configured targets. The gNMI server supports Consul based service registration. - Outputs: Add support for [gNMI server](user_guide/outputs/gnmi_output.md) output type - [Target configuration](user_guide/targets/targets.md): Support multiple IP addresses per target, all addresses are tried simultaneously. The first successful gRPC connection is used. - [Prometheus Output](user_guide/outputs/prometheus_output.md): Add the option of generating Prometheus metrics on-scrape, instead of on-reception. The gNMI notifications are stored in a local cache and used to generate metrics when a Prometheus server sends a scrape request. - Event Processors: Add [`group-by`](user_guide/event_processors/event_group_by.md) processor, it groups events together based on a given criteria. The events can belong to different gNMI notifications or even to different subscriptions. - Event Processor Convert: Add support for boolean conversion - [Deployment Examples](deployments/deployments_intro.md): Add [containerlab](https://containerlab.srlinux.dev) based deployment examples. These deployment come with a router fabric built using Nokia's [SRL](https://learn.srlinux.dev) - [API server](user_guide/api/api_intro.md): Add Secure API server configuration options - Target Loaders: [Consul loader](user_guide/targets/target_discovery/consul_discovery.md#services-watch) update: Add support for gNMI target discovery from Consul services. - Get Request: Add printing of Target as part of Path Prefix - Set Request: Add printing of Target as part of Path Prefix ### v0.17.0 - July 14th 2021 - Event Trigger: Enhance `event-trigger` to run multiple actions sequentially when an event occurs. The output of an action can be used in the following ones. - Kafka output: Add `SASL_SSL` and `SSL` security protocols to kafka output. - gRPC authentication: Add support for token based gRPC authentication. ### v0.16.2 - July 13th 2021 - Fix nil pointer dereference in case a subscription has `suppress-redundant` but no `heartbeat-interval`. ### v0.16.1 - July 12th 2021 - Bump github.com/openconfig/goyang version to v0.2.7 ### v0.16.0 - June 14th 2021 - Target Discovery: Add Docker Engine target loader, `gnmic` can dynamically discover gNMI targets running as docker containers. - Event Trigger: gNMI action Enhance `gNMI action` to take external variables as input, in addition to the received gNMI update. ### v0.15.0 - June 7th 2021 - Subscription: Add field `set-target` under subscription config, a boolean that enables setting the target name as a gNMI prefix target. - Outputs: Add `add-target` and `target-template` fields under all outputs, Enables adding the target value as a tag/label based on the subscription and target metadata ### v0.14.3 - June 6th 2021 - Set command: Fix `ascii` values encoding if used with `--request-file` flag. ### v0.14.2 - June 3rd 2021 - Fix `event-convert` processor when the conversion is between integer types. - Add an implicit conversion of uint to int if the influxdb output version is 1.8.x. This is a workaround for the limited support of influx APIv2 by influxDB1.8 ### v0.14.1 - May 31st 2021 - Fix OverrideTS processor - Add `override-timestamps` option under outputs, to override the message timestamps regardless of the message output format ### v0.14.0 - May 28th 2021 - New Output format `flat` - This format prints the Get and Subscribe RPCs as a list of `xpath: value`, where the `xpath` points to a leaf value. - New `gnmic diff` command: - This command prints the difference in responses between a reference target `--ref` and one or more targets to be compared to the reference `--compare`. - The output is printed as `flat` format results. ### v0.13.0 - May 10th 2021 - New `gnmic generate` Command: - Given a set of yang models and an xpath, `gnmic generate` generates a JSON/YAML representation of the YANG object the given path points to. - Given a set of yang models and an set of xpaths (with `--update` or `--replace`), `gnmic generate set-request` generates a set request file that can be filled with the desired values and used with `gnmic set --request-file` - The sub-command `gnmic generate path` is an alias to `gnmic path` - Path Command: - add flag `--desc` which, if present, prints the YANG leaf description together with the generated paths. - add flag `--config-only` which, if present, only generates paths pointing to YANG leaves representing config data. - add flag `--state-only` which, if present, only generates paths pointing to a YANG leaf representing state data. ### v0.12.2 - April 24th 2021 - Fix a bug that cause gNMIc to crash if certain processors are used. ### v0.12.1 - April 21st 2021 - Fix parsing of stringArray flags containing a space. ### v0.12.0 - April 20th 2021 - Outputs: - InfluxDB and Prometheus outputs: Convert gNMI Decimal64 values to Float64. - Set Command: - Add the ability to run a Set command using a single file, including `replaces`, `updates` and `deletes`. - The request file `--request-file` is either a static file or a Golang Text Template rendered separately for each target. - The template input is read from a file referenced by the flag `--request-vars`. ### v0.11.0 - April 15th 2021 - Processors: - Add `event-allow` processor, basically an allow ACL based on `jq` condition or regular expressions. - Add `event-extract-tags` processor, it adds tags based on regex named groups from tag names, tag values, value names, or values. - Add `gnmi-action` to `event-trigger` processor, the action runs a gNMI Set or Get if the trigger condition is met. - Set Command: - Improve usability by supporting reading values (--update-file and --replace-file) from standard input. ### v0.10.0 - April 8th 2021 - New command: - `getset` command: This command conditionally executes both a `Get` and a `Set` RPC, the `GetResponse` is used to evaluate a condition which if met triggers the execution of the `Set` RPC. - Processors: - Some processors' apply condition can be expressed using `jq` instead of regular expressions. ### v0.9.1 - March 23rd 2021 - Processors: - Add `event-trigger` processor: This processor is used to trigger a predefined action if a condition is met. - New processor `event-jq` which applies a transformation on the messages expressed as a jq expression. - Shell autocompletion: - Shell (bash, zsh and fish) autocompletion scripts can be generated using `gnmic completion [bash|zsh|fish]`. - gRPC gzip compression: - `gnmic` supports gzip compression on gRPC connections. ### v0.9.0 - March 11th 2021 - Clustered Prometheus output: - When deployed as a cluster, it is possible to register only one of the prometheus outputs in Consul. This is handy in the case of a cluster with data replication. - Proto file loading at runtime (Nokia SROS): - `gnmic` supports loading SROS proto files at runtime to decode gNMI updates with `proto` encoding - Kafka Output: - Kafka SASL support: PLAIN, SCRAM SHA256/SHA512 OAuth mechanisms are supported. - Configuration: - `gnmic` supports configuration using environment variables. - Processors: - add `event-merge` processor. - Target Loaders: - `gnmic` supports target loaders at runtime, new targets can be added to the configuration from a file that `gnmic` watches or from `Consul` ### v0.8.0 - March 2nd 2021 - Inputs: - Processors can now be applied by the input plugins. - Prometheus output: - The Prometheus output can now register as a service in Consul, a Prometheus client can discover the output using consul service discovery. - Clustering: - `gnmic` can now run as a cluster, this requires a running Consul instance that will be used by the `gnmic` instance for leader election and target load sharing. - Configuration file: - The default configuration file placement now follows [XDG](https://wiki.archlinux.org/index.php/XDG_Base_Directory) recommendations - CLI exit status: - Failure of most commands is properly reflected in the cli exit status. - Configuration: - Configuration fields that are OS paths are expanded by `gnmic` - Deployment examples: - A set of deployment examples is added to the repo and the docs. ### v0.7.0 - January 28th 2021 - Prometheus output metrics customization: - `metric-prefix` and `append-subscription-name` can be used to change the default metric prefix and append the subscription name to the metric name. - `export-timestamps`: enables/disables the export of timestamps together with the metric. - `strings-as-labels`: enables/disables automatically adding paths with a value of type string as a metric label. - NATS output: - allow multiple NATS workers under NATS output via field `num-workers`. - add NATS prometheus internal metrics. - STAN output: - allow multiple STAN workers under STAN output via field `num-workers`. - add NATS prometheus internal metrics. - File output: - add File prometheus metrics. - Inputs: - support ingesting gNMI data from NATS, STAN or a Kafka message bus. ### v0.6.0 - December 14th 2020 - Processors: - Added processors to `gnmic`, a set of basic processors can be used to manipulate gNMI data flowing through `gnmic`. These processors are applied by the output plugins - Upgrade command: `gnmic` can be upgraded using `gnmic version upgrade` command. ### v0.5.2 - December 1st 2020 - Outputs: - Improve outputs logging - Add Prometheus metrics to Kafka output ### v0.5.1 - November 28th 2020 - Prompt Mode: - Fix subscribe RPC behavior - QoS: - Do not populate QoS field if not set via config file or flag. Outputs: - add configurable number of workers to some outputs. ### v0.5.0 - November 25th 2020 - Prompt Mode: - Add prompt sub commands. - XPATH parsing: - Add custom xpath parsingto gnmi.Path to allow for paths including column `:`. - TLS: - Allow configurable TLS versions per target, the minimum, the maximum and the preferred TLS versions ca be configured. ### v0.4.3 - November 10th 2020 - Missing path: - Initialize the path field if not present in SubscribeResponse ### v0.4.2 - November 5th 2020 - YANG: - Prompt command flags `--file` and `--dir` support globs. - Subscribe: - added flags `--output` that allows to choose a single output for `subscribe` updates - Prompt: - Max suggestions is automatically adjusted based on the terminal height. - Add suggestions for address and subscriptions. ### v0.4.1 - October 22nd 2020 - Prompt: - Add suggestions of xpath with origin, `--suggest-with-origin`. ### v0.4.0 - October 21st 2020 - New Command: - Add new command `prompt` - Prompt: - Add ctrl+z key bind to delete a single path element. - Add YANG info to xpath suggestions. - Add GoLeft, GoRight key binds. - Sort xpaths and prefixes suggestions. - xpaths suggestions are properly generated if a prefix is present. - flag `--suggest-all-flags` allows adding global flags suggestion in prompt mode. - Prometheus output: - Add support for Prometheus output plugin. ### v0.3.0 - October 1st 2020 - InfluxDB output: - Add support for influxDB output plugin. ### v0.2.3 - September 18th 2020 - Retry - Add basic RPC retry mechanism. - ONCE mode subscription: - Handle targets that send an EOF error instead of a SyncResponse to signify the end of ONCE subscriptions. - Docker image: - Docker images added to ghcr.io as well as docker hub. ### v0.2.2 - September 3rd 2020 - CLI: - Properly handle paths that include quotes. - Unix Socket: - Allow send/rcv of gNMI data to/from a unix socket. - Outputs: - Add TCP output plugin. ### v0.2.1 - August 11th 2020 - Releases: - Add .deb. and .rpm packages to releases. - Outputs: - Add UDP output plugin. ### v0.2.0 - August 7th 2020 - Releases: - Add ARM releases. - Push docker image to docker hub. ### v0.1.1 - July 23rd 2020 - Set Cmd: - Support `json_ietf` encoding when the value is specified from a file. ### v0.1.0 - July 16th 2020 - Outputs: - Allow NATS/STAN output subject customization. ### v0.0.7 - July 16th 2020 - gNMI Target: - Add support for gNMI Target field. - gNMI Origin: - Add support for gNMI Origin field. - Prometheus internal metrics: - Add support for `gnmic` internal metrics via a Prometheus server. - Outputs: - Add support for multiple output plugins (file, NATS, STAN, Kafka) - Targets: - Support target specific configuration. - Poll Subscription: - Allow selecting polled targets and subscription using a CLI select menu. - gNMI Models: - Support multiple Models in Get and Subscribe RPCs. ### v0.0.6 - June 2nd 2020 - Nokia Dialout: - Add Support for Nokia Dialout telemetry. - Printing: - Convert timestamps to Time. ### v0.0.5 - May 18th 2020 - Formatting: - Add `textproto` format. ### v0.0.4 - May 11th 2020 - Logging: - Support logging to file instead of Stderr. - Set Command: - support Set values from YAML file. ### v0.0.3 - April 23rd 2020 - Proxy: - Allow usage of ENV proxy values for gRPC connections. - Installation: - Add installation script. ### v0.0.2 - April 13th 2020 - Terminal printing clean up. - Path Command: Add search option. ### v0.0.1 - March 24th 2020 - Capabilities RPC Command. - Get RPC Command. - Subscribe RPC Command. - Set RPC Command. - TLS support. - Version Command. - Path Commnd. ### initial Commit - February 20th 2020 ================================================ FILE: docs/cmd/capabilities.md ================================================ ## Description The `[cap | capabilities]` command represents the [gNMI Capabilities RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L51). It is used to send a [Capability Request](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L431) to the specified target(s) and expects one [Capability Response](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L440) per target. [Capabilities](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#32-capability-discovery) allows the client to retrieve the set of capabilities that is supported by the target: * gNMI version * available data models * supported encodings * gNMI extensions This allows the client to, for example, validate the service version that is implemented and retrieve the set of models that the target supports. The models can then be specified in subsequent Get/Subscribe RPCs to precisely tell the target which models to use. ### Usage `gnmic [global-flags] capabilities [local-flags]` ### Examples #### single host ```text gnmic -a --username --password \ --insecure capabilities gNMI_Version: 0.7.0 supported models: - nokia-conf, Nokia, 19.10.R2 - nokia-state, Nokia, 19.10.R2 - nokia-li-state, Nokia, 19.10.R2 - nokia-li-conf, Nokia, 19.10.R2 << SNIPPED >> supported encodings: - JSON - BYTES ``` #### multiple hosts ```bash gnmic -a , -u -p \ --insecure cap ``` ================================================ FILE: docs/cmd/collector.md ================================================ ### Description The `[collect | collector | coll | c]` command starts gNMIc as a long-running telemetry collector service. Unlike the `subscribe` command which is designed for interactive use, the collector command is optimized for production deployments with dynamic configuration capabilities via REST API. The collector provides: - **Dynamic configuration** - Add, modify, or remove targets, subscriptions, outputs, inputs, and processors at runtime via REST API - **Clustering support** - Multiple collector instances can form a cluster with automatic target distribution and failover - **Embedded gNMI server** - Expose collected telemetry to downstream gNMI clients - **Tunnel target support** - Accept connections from gNMI tunnel targets ### Usage `gnmic [global-flags] collect [local-flags]` ### Local Flags #### pyroscope-server-address The `[--pyroscope-server-address]` flag sets the Pyroscope server address for continuous profiling. When set, the collector will send profiling data to the specified Pyroscope server. #### pyroscope-application-name The `[--pyroscope-application-name]` flag sets the application name used in Pyroscope. Defaults to `gnmic-collector`. ### Subcommands The collector command provides subcommands to interact with a running collector instance via its REST API: | Subcommand | Aliases | Description | |------------|---------|-------------| | `targets` | `target`, `tg` | Manage targets | | `subscriptions` | `subscription`, `sub` | Manage subscriptions | | `outputs` | `output`, `out` | Manage outputs | | `inputs` | `input`, `in` | Manage inputs | | `processors` | `processor`, `proc` | Manage processors | Each subcommand supports the following operations: | Operation | Aliases | Description | |-----------|---------|-------------| | `list` | `ls` | List all resources | | `get` | `g`, `show`, `sh` | Get a specific resource | | `set` | `create`, `cr` | Create or update a resource | | `delete` | `d`, `del`, `rm` | Delete a resource | ### Configuration The collector is configured using the standard gNMIc configuration file. The key sections are: ```yaml # API server configuration (required for collector) api-server: address: :7890 timeout: 10s tls: ca-file: cert-file: key-file: enable-metrics: false debug: false # Targets to collect from targets: router1: address: 10.0.0.1:57400 username: admin password: admin skip-verify: true # Subscriptions define what data to collect subscriptions: interfaces: paths: - /interfaces/interface/state/counters mode: stream stream-mode: sample sample-interval: 10s # Outputs define where to send collected data outputs: prometheus: type: prometheus listen: :9804 path: /metrics # Inputs for receiving data from message queues inputs: nats-input: type: nats address: nats://localhost:4222 subject: telemetry.> # Event processors for data transformation processors: add-hostname: event-add-tag: tags: - tag-name: hostname value: ${HOST} # Clustering configuration (optional) clustering: cluster-name: gnmic-cluster instance-name: gnmic-1 locker: type: consul address: consul:8500 # gNMI server configuration (optional) gnmi-server: address: :57401 skip-verify: true ``` ### Examples #### 1. Start a basic collector ```bash gnmic --config collector.yaml collect ``` #### 2. Start with Pyroscope profiling ```bash gnmic --config collector.yaml collect \ --pyroscope-server-address http://pyroscope:4040 \ --pyroscope-application-name my-collector ``` #### 3. List targets from a running collector ```bash gnmic --config collector.yaml collect targets list ``` Output: ``` NAME ADDRESS USERNAME STATE SUBSCRIPTIONS OUTPUTS INSECURE SKIP VERIFY router1 10.0.0.1:57400 admin running 2 1 false true router2 10.0.0.2:57400 admin running 2 1 false true ``` #### 4. Get details of a specific target ```bash gnmic --config collector.yaml collect targets get --name router1 ``` #### 5. Create a new target ```bash gnmic --config collector.yaml collect targets set --input target.yaml ``` Where `target.yaml` contains: ```yaml name: router3 address: 10.0.0.3:57400 username: admin password: admin skip-verify: true subscriptions: - interfaces outputs: - prometheus ``` #### 6. Delete a target ```bash gnmic --config collector.yaml collect targets delete --name router3 ``` #### 7. List subscriptions ```bash gnmic --config collector.yaml collect subscriptions list ``` Output: ``` NAME PREFIX PATHS ENCODING MODE SAMPLE INTERVAL TARGETS OUTPUTS interfaces - /interfaces/interface/state/counters json stream/sample 10s 2/2 1 ``` #### 8. List outputs ```bash gnmic --config collector.yaml collect outputs list ``` Output: ``` NAME TYPE FORMAT EVENT PROCESSORS prometheus prometheus - 1 ``` #### 9. List processors with details ```bash gnmic --config collector.yaml collect processors list --details ``` ### See Also - [Collector Introduction](../user_guide/collector/collector_intro.md) - Overview and architecture - [Collector Configuration](../user_guide/collector/collector_configuration.md) - Detailed configuration reference - [Collector REST API](../user_guide/collector/collector_api.md) - API endpoints reference ================================================ FILE: docs/cmd/diff/diff.md ================================================ ### Description The `diff` command is similar to a `get` or `subscribe` (mode ONCE) commands ran against at least 2 targets, a reference and one or more compared targets. The command will compare the returned responses from the compared targets to the ones returned from the reference target and only print the difference between them. The output is printed as a list "flattened" gNMI updates, each line containing an XPath pointing to a leaf followed by its value. Each line is preceded with either signs `+` or `-`: - `+` means the leaf and its value are present in the compared target but not in the reference target. - `-` means the leaf and its value are present in the reference target but not in the compared target. e.g: ```text + network-instance[name=default]/interface[name=ethernet-1/36.0]: {} - network-instance[name=default]/protocols/bgp/autonomous-system: 101 ``` The output above indicates: - The compared target has interface `ethernet-1/36.0` added to network instance `default` while the reference doesn't. - The compared target is missing the autonomous-system `101` configuration under network-instance `default` protocols/bgp compared to the reference. The data to be compared is specified with the flag `--path`, which can be set multiple times to compare multiple data sets. By default, the data it is retrieved using a `Get RPC`, if the flag `--sub` is present, a `Subscribe RPC` with mode ONCE is used instead. Each of the `get` and `subscribe` methods has pros and cons, with the `get` method you can choose to compare `CONFIG` or `STATE` only, via the flag `--type`. The `subscribe` method allows to stream the response(s) in case a larger data set needs to be compared. In addition to that, some routers support more encoding options when using the `subscribe RPC` Multiple targets can be compared to the reference at once, the printed output of each difference will start with the line `"$reference" vs "$compared"` Aliases: `compare` ### Usage `gnmic [global-flags] diff [local-flags]` ### Flags #### ref The `--ref` flag is a mandatory flag that specifies the target to used as reference to compare other targets to. #### compare The `--compare` flag is a mandatory flag that specifies the targets to compare to the reference target. #### prefix As per [path prefixes](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#241-path-prefixes), the prefix `[--prefix]` flag represents a common prefix that is applied to all paths specified using the local `--path` flag. Defaults to `""`. #### path The mandatory path flag `[--path]` is used to specify the [path(s)](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) the client wants to receive a snapshot of. Multiple paths can be specified by using multiple `--path` flags: ```bash gnmic --insecure \ --ref router1 --compare router2,router3 diff --path "/state/ports[port-id=*]" \ --path "/state/router[router-name=*]/interface[interface-name=*]" ``` If a user needs to provide [origin](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) information to the Path message, the following pattern should be used for the path string: `"origin:path"`: #### model The optional model flag `[--model]` is used to specify the schema definition modules that the target should use when returning a GetResponse. The model name should match the names returned in Capabilities RPC. Currently only single model name is supported. #### target With the optional `[--target]` flag it is possible to supply the [path target](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target) information in the prefix field of the GetRequest message. #### type The type flag `[--type]` is used to specify the [data type](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L399) requested from the server. One of: ALL, CONFIG, STATE, OPERATIONAL (defaults to "ALL") #### sub When the flag `--sub` is present, `gnmic` will use a `Subscribe RPC` with mode ONCE, instead of a `Get RPC` to retrieve the data to be compared. ### Examples ```bash gnmic diff -t config --skip-verify -e ascii \ --ref clab-te-leaf1 \ --compare clab-te-leaf2 \ --path /network-instance ``` ```bash "clab-te-leaf1:57400" vs "clab-te-leaf2:57400" + network-instance[name=default]/interface[name=ethernet-1/36.0] : {} - network-instance[name=default]/protocols/bgp/autonomous-system : 101 + network-instance[name=default]/protocols/bgp/autonomous-system : 102 - network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:11:1] : {} - network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:11:1]/admin-state: enable - network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:11:1]/peer-as : 201 - network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:11:1]/peer-group : eBGPv6 - network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:12:1] : {} - network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:12:1]/admin-state: enable - network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:12:1]/peer-as : 202 - network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:12:1]/peer-group : eBGPv6 + network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:21:1] : {} + network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:21:1]/admin-state: enable + network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:21:1]/peer-as : 201 + network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:21:1]/peer-group : eBGPv6 + network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:22:1] : {} + network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:22:1]/admin-state: enable + network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:22:1]/peer-as : 202 + network-instance[name=default]/protocols/bgp/neighbor[peer-address=2002::192:168:22:1]/peer-group : eBGPv6 + network-instance[name=default]/protocols/bgp/router-id : 10.0.1.2 - network-instance[name=default]/protocols/bgp/router-id : 10.0.1.1 - network-instance[name=myins] : {} - network-instance[name=myins]/admin-state : enable - network-instance[name=myins]/description : desc1 - network-instance[name=myins]/interface[name=ethernet-1/36.0] : {} - network-instance[name=myins]/type : ip-vrf ``` ================================================ FILE: docs/cmd/diff/diff_set_to_notifs.md ================================================ ### Description The `diff set-to-notifs` command is used to verify whether a set of notifications from a `GetResponse` or a stream of `SubscribeResponse` messages comply with a `SetRequest` messages in textproto format. The envisioned use case is to check whether a stored snapshot of device state matches that of the intended state as specified by a `SetRequest`. The output is printed as a list of "flattened" gNMI updates, each line containing an XPath pointing to a leaf followed by its value. Each line is preceded with either signs `+` or `-`: - `+` means the leaf and its value are present in the new SetRequest but not in the reference SetRequest. - `-` means the leaf and its value are present in the reference SetRequest but not in the new SetRequest. e.g: ```text SetToNotifsDiff(-want/SetRequest, +got/Notifications): - /lacp/interfaces/interface[name=Port-Channel9]/config/interval: "FAST" - /lacp/interfaces/interface[name=Port-Channel9]/config/name: "Port-Channel9" - /lacp/interfaces/interface[name=Port-Channel9]/name: "Port-Channel9" - /network-instances/network-instance[name=VrfBlue]/config/name: "VrfBlue" - /network-instances/network-instance[name=VrfBlue]/config/type: "openconfig-network-instance-types:L3VRF" - /network-instances/network-instance[name=VrfBlue]/name: "VrfBlue" m /system/config/hostname: - "violetsareblue" + "rosesarered" ``` The output above indicates: - The set of paths starting with `/lacp/interfaces/interface[name=Port-Channel9]/config/interval: "FAST"` are present in the SetRequest but missing in the response from the device. - The value at path `/system/config/hostname` does not match that of the SetRequest. When `--full` is specified, values common between the SetRequest and the response messages are also shown. ### How to obtain a GetResponse or SubscribeResponse To obtain GetRespnse/SubscribeResponse in textproto format, simply run `gnmic`'s subscribe or get functions and pass in the flag `--format prototext`. Responses retrieved from either GetRequest or SubscribeRequest are supported by this command's `--response` flag. ### Usage `gnmic [global-flags] diff set-to-notifs [local-flags]` ### Flags #### setrequest The `--setrequest` flag is a mandatory flag that specifies the reference gNMI SetRequest textproto file for comparing against the new SetRequest. #### response The `--response` flag is a mandatory flag that specifies the gNMI Notifications textproto file (can contain a GetResponse or SubscribeResponse stream) for comparing against the reference SetRequest. ### Examples ```bash $ gnmic diff set-to-notifs --setrequest cmd/demo/setrequest.textproto --response cmd/demo/subscriberesponses.textproto ``` ================================================ FILE: docs/cmd/diff/diff_setrequest.md ================================================ ### Description The `diff setrequest` command is used to compare the intent between two `SetRequest` messages encoded in textproto format. The output is printed as a list of "flattened" gNMI updates, each line containing an XPath pointing to a leaf followed by its value. Each line is preceded with either signs `+` or `-`: - `+` means the leaf and its value are present in the new SetRequest but not in the reference SetRequest. - `-` means the leaf and its value are present in the reference SetRequest but not in the new SetRequest. e.g: ```text SetRequestIntentDiff(-A, +B): -------- deletes/replaces -------- + /network-instances/network-instance[name=VrfBlue]: deleted or replaced only in B -------- updates -------- m /system/config/hostname: - "violetsareblue" + "rosesarered" ``` The output above indicates: - The new target deletes or replaces the path `/network-instances/network-instance[name=VrfBlue]` while the reference doesn't. - The new target changes the value of `/system/config/hostname` compared to the reference from `"violetsareblue"` to `"rosesarered"`. When `--full` is specified, values common between the two SetRequest are also shown. ### SetRequest Intent It is possible for two SetRequests to be different but which are semantically equivalent -- i.e. they both modify the same leafs in the same ways. In other words, their overall effects are the same. For example, a replace on the leaf `/system/config/hostname` with the value `"foo"` is the same as an update on the same leaf with the same value. A replace on the container `/system/` with the value `{ config: { hostname: "foo" } }` is the same as a delete on that container followed by a replace to the leaf. Overwrites are also possible, although this is currently unsupported. In order to compare equivalent SetRequests correctly, this tool breaks down a SetRequest into its "minimal intent" (deletes followed by updates) prior to the diff computation. This is why the output groups deletes/replaces into the same section. ### Usage `gnmic [global-flags] diff setrequest [local-flags]` ### Flags #### ref The `--ref` flag is a mandatory flag that specifies the reference gNMI SetRequest textproto file for comparing against the new SetRequest. #### new The `--new` flag is a mandatory flag that specifies the new gNMI SetRequest textproto file for comparing against the reference SetRequest. ### Examples ```bash $ gnmic diff setrequest --ref cmd/demo/setrequest.textproto --new cmd/demo/setrequest2.textproto ``` ================================================ FILE: docs/cmd/generate/generate_path.md ================================================ ### Description The path sub command is an alias for the [`gnmic path`](../../cmd/path.md) command. ================================================ FILE: docs/cmd/generate/generate_set_request.md ================================================ ### Description The set-request sub command generates a Set request file given a list of update and/or replace paths. If no paths are supplied, a root (`/`) replace path is used as a default. The generated file can be manually edited and used with `gnmic` set command: `gnmic set --request-file ` Aliases: `sreq`, `srq`, `sr` ### Usage `gnmic [global-flags] generate [generate-flags] set-request [sub-command-flags]` ### Flags #### update The `--update` flag specifies a valid xpath, used to generate an __updates__ section of the [set request file](../set.md#template-based-set-request). Multiple `--update` flags can be supplied. #### replace The `--replace` flag specifies a valid xpath, used to generate a __replaces__ section of the [set request file](../set.md#template-based-set-request). Multiple `--replace` flags can be supplied. ### Examples #### Openconfig YANG repo: [openconfig/public](https://github.com/openconfig/public) Clone the OpenConfig repository: ```bash git clone https://github.com/openconfig/public cd public ``` ```bash gnmic --encoding json_ietf \ generate \ --file release/models \ --dir third_party \ --exclude ietf-interfaces \ set-request \ --replace /interfaces/interface/subinterfaces/subinterface/ipv4/addresses/address ``` The above command generates the below YAML output (JSON if `--json` flag is supplied) ```yaml replaces: - path: /interfaces/interface/subinterfaces/subinterface/ipv4/addresses/address value: - config: ip: "" prefix-length: "" ip: "" vrrp: vrrp-group: - config: accept-mode: "false" advertisement-interval: "100" preempt: "true" preempt-delay: "0" priority: "100" virtual-address: "" virtual-router-id: "" interface-tracking: config: priority-decrement: "0" track-interface: "" virtual-router-id: "" encoding: JSON_IETF ``` The __value__ section can be filled with the desired configuration variables. #### Nokia SR OS ```bash git clone https://github.com/nokia/7x50_YangModels cd 7x50_YangModels git checkout sros_21.2.r2 ``` ```bash gnmic generate \ --file YANG/nokia-combined \ --dir YANG \ set-request \ --replace /configure/service/vprn/bgp/family ``` The above command generates the below YAML output (JSON if `--json` flag is supplied) ```yaml replaces: - path: /configure/service/vprn/bgp/family value: flow-ipv4: "false" flow-ipv6: "false" ipv4: "true" ipv6: "false" label-ipv4: "false" mcast-ipv4: "false" mcast-ipv6: "false" ``` #### Cisco YANG repo: [YangModels/yang](https://github.com/YangModels/yang) Clone the `YangModels/yang` repo and change into the main directory of the repo: ```bash git clone https://github.com/YangModels/yang cd yang/vendor ``` ```bash gnmic --encoding json_ietf \ generate \ --file vendor/cisco/xr/721/Cisco-IOS-XR-um-router-bgp-cfg.yang \ --file vendor/cisco/xr/721/Cisco-IOS-XR-ipv4-bgp-oper.yang \ --dir standard/ietf \ set-request \ --path /active-nodes ``` The above command generates the below YAML output (JSON if `--json` flag is supplied) ```yaml replaces: - path: /active-nodes value: active-node: - node-name: "" selective-vrf-download: role: address-family: ipv4: unicast: "" ipv6: unicast: "" vrf-groups: vrf-group: - vrf-group-name: "" encoding: JSON_IETF ``` #### Juniper YANG repo: [Juniper/yang](https://github.com/Juniper/yang) Clone the Juniper YANG repository and change into the release directory: ```bash git clone https://github.com/Juniper/yang cd yang/20.3/20.3R1 ``` ```bash gnmic --encoding json_ietf \ generate --file junos/conf \ --dir common set-request \ --replace /configuration/interfaces/interface/unit/family/inet/address ``` The above command generates the below YAML output (JSON if `--json` flag is supplied) ```yaml replaces: - path: /configuration/interfaces/interface/unit/family/inet/address value: - apply-groups: "" apply-groups-except: "" apply-macro: - data: - name: "" value: "" name: "" arp: - case_1: "" case_2: "" l2-interface: "" name: "" publish: "" broadcast: "" destination: "" destination-profile: "" master-only: "" multipoint-destination: - apply-groups: "" apply-groups-except: "" apply-macro: - data: - name: "" value: "" name: "" case_1: "" case_2: "" epd-threshold: apply-groups: "" apply-groups-except: "" apply-macro: - data: - name: "" value: "" name: "" epd-threshold-plp0: "" plp1: "" inverse-arp: "" name: "" oam-liveness: apply-groups: "" apply-groups-except: "" apply-macro: - data: - name: "" value: "" name: "" down-count: "" up-count: "" oam-period: disable: {} oam_period: "" shaping: apply-groups: "" apply-groups-except: "" apply-macro: - data: - name: "" value: "" name: "" cbr: cbr-value: "" cdvt: "" queue-length: "" rtvbr: burst: "" cdvt: "" peak: "" sustained: "" vbr: burst: "" cdvt: "" peak: "" sustained: "" transmit-weight: "" name: "" preferred: "" primary: "" virtual-gateway-address: "" vrrp-group: - advertisements-threshold: "" apply-groups: "" apply-groups-except: "" apply-macro: - data: - name: "" value: "" name: "" authentication-key: "" authentication-type: "" case_1: "" case_2: "" case_3: "" name: "" preferred: "" priority: "" track: apply-groups: "" apply-groups-except: "" apply-macro: - data: - name: "" value: "" name: "" interface: - apply-groups: "" apply-groups-except: "" apply-macro: - data: - name: "" value: "" name: "" bandwidth-threshold: - name: "" priority-cost: "" name: "" priority-cost: "" priority-hold-time: "" route: - priority-cost: "" route_address: "" routing-instance: "" virtual-link-local-address: "" vrrp-inherit-from: active-group: "" active-interface: "" apply-groups: "" apply-groups-except: "" apply-macro: - data: - name: "" value: "" name: "" web-authentication: apply-groups: "" apply-groups-except: "" apply-macro: - data: - name: "" value: "" name: "" http: "" https: "" redirect-to-https: "" encoding: JSON_IETF ``` #### Arista YANG repo: [aristanetworks/yang](https://github.com/aristanetworks/yang) Arista uses a subset of OpenConfig modules and does not provide IETF modules inside their repo. So make sure you have IETF models available so you can reference it, a `openconfig/public` is a good candidate. Clone the Arista YANG repo: ```bash git clone https://github.com/aristanetworks/yang cd yang ``` The above command generates the below YAML output (JSON if `--json` flag is supplied) ```bash gnmic --encoding json_ietf \ generate --file EOS-4.23.2F/openconfig/public/release/models \ --dir ../openconfig/public/third_party/ietf \ --exclude ietf-interfaces \ set-request \ --replace bgp/neighbors/neighbor/config ``` ```yaml replaces: - path: bgp/neighbors/neighbor/config value: auth-password: "" description: "" enabled: "true" local-as: "" neighbor-address: "" peer-as: "" peer-group: "" peer-type: "" remove-private-as: "" route-flap-damping: "false" send-community: NONE ``` ================================================ FILE: docs/cmd/generate.md ================================================ ### Description Most `gNMI` targets use YANG as a modeling language for their datastores. It order to access and manipulate the stored data (`Get`, `Set`, `Subscribe`), a tool should be aware of the underlying YANG model, be able to generate paths pointing to the desired `gNMI` objects as well as building configuration payloads matching data instances on the targets. The `generate` command takes the target's YANG models as input and generates: - Paths in `xpath` or `gNMI` formats. - Configuration payloads that can be used as [update](../cmd/set.md#3-update-with-a-value-from-json-or-yaml-file) or [replace](../cmd/set.md#3-replace-with-a-value-from-json-or-yaml-file) input files for the Set command. - A Set request file that can be used as a [template](../cmd/set.md#template-based-set-request) with the Set command. Aliases: `gen` ### Usage `gnmic [global-flags] generate [local-flags]` or `gnmic [global-flags] generate [local-flags] sub-command [sub-command-flags]` ### Persistent Flags #### output The `--output` flag specifies the file to which the generated output will be written, defaults to `stdout` #### json When used with `generate` command, the `--json` flag, if present changes the output format from YAML to JSON. When used with `generate path` command, it outputs the path, the leaf **type**, its **description**, its **default value** and if it is a **state leaf** or not in an array of JSON objects. ### Local Flags #### path The `--path` flag specifies the path whose payload (JSON/YAML) will be generated. Defaults to `/` #### config-only The `--config-only` flag, if present instruct `gnmic` to generate JSON/YAML payloads from YANG nodes not marked as `config false`. #### camel-case The `--camel-case` flag, if present allows to convert all the keys in the generated JSON/YAML paylod to `CamelCase` #### snake-case The `--snake-case` flag, if present allows to convert all the keys in the generated JSON/YAML paylod to `snake_case` ### Sub Commands #### Path The path sub command is an alias for the [`gnmic path`](../cmd/path.md) command. #### Set-request The [set-request](../cmd/generate/generate_set_request.md) sub command generates a Set request file given a list of update and/or replace paths. ### Examples #### Openconfig YANG repo: [openconfig/public](https://github.com/openconfig/public) Clone the OpenConfig repository: ```bash git clone https://github.com/openconfig/public cd public ``` ```bash gnmic --encoding json_ietf \ generate \ --file release/models \ --dir third_party \ --exclude ietf-interfaces \ --path /interfaces/interface/subinterfaces/subinterface/ipv4/addresses/address ``` ```yaml - config: ip: "" prefix-length: "" ip: "" vrrp: vrrp-group: - config: accept-mode: "false" advertisement-interval: "100" preempt: "true" preempt-delay: "0" priority: "100" virtual-address: "" virtual-router-id: "" interface-tracking: config: priority-decrement: "0" track-interface: "" virtual-router-id: "" ``` ================================================ FILE: docs/cmd/get.md ================================================ ### Description The `get` command represents the gNMI [Get RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L57). It is used to send a [GetRequest](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L395) to the specified target(s) (using the global flag [`--address`](../global_flags.md#address) and expects one [GetResponse](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L420) per target, per path. The [Get RPC](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#33-retrieving-snapshots-of-state-information) is used to retrieve a snapshot of data from the target. It requests that the target snapshots a subset of the data tree as specified by the paths included in the message and serializes this to be returned to the client using the specified encoding. ### Usage `gnmic [global-flags] get [local-flags]` ### Flags #### prefix As per [path prefixes](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#241-path-prefixes), the prefix `[--prefix]` flag represents a common prefix that is applied to all paths specified using the local `--path` flag. Defaults to `""`. #### path The mandatory path flag `[--path]` is used to specify the [path(s)](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) the client wants to receive a snapshot of. Multiple paths can be specified by using multiple `--path` flags: ```bash gnmic -a --insecure \ get --path "/state/ports[port-id=*]" \ --path "/state/router[router-name=*]/interface[interface-name=*]" ``` If a user needs to provide [origin](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) information to the Path message, the following pattern should be used for the path string: `"origin:path"`: !!! note The path after the origin value has to start with a `/` ``` gnmic -a --insecure \ get --path "openconfig-interfaces:/interfaces/interface" ``` #### model The optional model flag `[--model]` is used to specify the schema definition modules that the target should use when returning a GetResponse. The model name should match the names returned in Capabilities RPC. Currently only single model name is supported. #### target With the optional `[--target]` flag it is possible to supply the [path target](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target) information in the prefix field of the GetRequest message. #### values-only The flag `[--values-only]` allows to print only the values returned in a GetResponse. This is useful when only the value of a leaf is of interest, like check if a value was set correctly. #### type The type flag `[--type]` is used to specify the [data type](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L399) requested from the server. One of: ALL, CONFIG, STATE, OPERATIONAL (defaults to "ALL") #### processor The `[--processor]` flag allow to list [event processor](../user_guide/event_processors/intro.md) names to be run as a result of receiving the GetReponse messages. The processors are run in the order they are specified (`--processor proc1,proc2` or `--processor proc1 --processor proc2`). #### depth The `[--depth]` flag set the gNMI extension depth value as defined [here](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-depth.md) ### Examples ```bash # simple Get RPC gnmic -a get --path "/state/port[port-id=*]" # Get RPC with multiple paths gnmic -a get --path "/state/port[port-id=*]" \ --path "/state/router[router-name=*]/interface[interface-name=*]" # Get RPC with path prefix gnmic -a get --prefix "/state" \ --path "port[port-id=*]" \ --path "router[router-name=*]/interface[interface-name=*]" ``` ================================================ FILE: docs/cmd/getset.md ================================================ ### Description The `getset` command is a combination of the gNMI [Get RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L57) and the gNMI [Set RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L62). It allows to conditionally execute a [Set RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L62) based on a condition evaluated against a [GetResponse](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L420). The `condition` written as a [`jq expression`](https://stedolan.github.io/jq/), is specified using the flag `--condition`. The [SetRPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L62) is executed only if the condition evaluates to `true` ### Usage `gnmic [global-flags] getset [local-flags]` `gnmic [global-flags] gas [local-flags]` `gnmic [global-flags] gs [local-flags]` ### Flags #### prefix As per [path prefixes](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#241-path-prefixes), the prefix `[--prefix]` flag represents a common prefix that is applied to all paths specified using the local `--get`, `--update`, `--replace` and `--delete` flags. Defaults to `""`. #### get The mandatory get flag `[--get]` is used to specify the single [path](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) used in the Get RPC. #### model The optional model flag `[--model]` is used to specify the schema definition modules that the target should use when returning a GetResponse. The model name should match the names returned in Capabilities RPC. Currently only single model name is supported. #### target With the optional `[--target]` flag it is possible to supply the [path target](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target) information in the prefix field of the GetRequest message. #### type The type flag `[--type]` is used to specify the [data type](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L399) requested from the server. One of: ALL, CONFIG, STATE, OPERATIONAL (defaults to "ALL") #### condition The `[--condition]` is a [`jq expression`](https://stedolan.github.io/jq/) that can be used to determine if the Set Request is executed based on the Get Response values. #### update The `[--update]` specifies a [`jq expression`](https://stedolan.github.io/jq/) used to build the Set Request update path. #### replace The `[--replace]` specifies a [`jq expression`](https://stedolan.github.io/jq/) used to build the Set Request replace path. #### delete The `[--delete]` specifies a [`jq expression`](https://stedolan.github.io/jq/) used to build the Set Request delete path. #### value The `[--value]` specifies a [`jq expression`](https://stedolan.github.io/jq/) used to build the Set Request value. ### Examples The command in the below example does the following: - gets the list of interface indexes to interface name mapping, - checks if the interface index (ifindex) 70 exists, - if it does, the set request changes the interface state to `enable` using the interface name. ```bash gnmic getset -a \ --get /interface/ifindex \ --condition '.[] | .updates[].values[""]["srl_nokia-interfaces:interface"][] | select(.ifindex==70) | (.name != "" or .name !=null)' \ --update '.[] | .updates[].values[""]["srl_nokia-interfaces:interface"][] | select(.ifindex==70) | "interface[name=" + .name + "]/admin-state"' \ --value enable ``` ================================================ FILE: docs/cmd/listen.md ================================================ ### Description `gnmic` can be used in a "dial-out telemetry" mode by means of the `listen` command. In the dial-out mode: * a network element is configured with the telemetry paths * a network element initiates a connection towards the server/collector (`gnmic` acts as a server in that case) !!! info Currently `gnmic` only implements the dial-out support for Nokia[^1] SR OS 20.5.r1+ routers. ### Usage ```bash gnmic listen [global flags] [local flags] ``` ### Flags #### address The address flag `[-a | --address]` tells `gnmic` which address to bind an internal server to in an `address:port` format, e.g.: `0.0.0.0:57400`. #### tls-cert Path to the TLS certificate can be supplied with `--tls-cert` flag. #### tls-key Path to the private key can be supplied with `--tls-key` flag. #### max-concurrent-streams To limit the maximum number of concurrent HTTP2 streams use the `--max-concurrent-streams` flag, the default value is 256. ### prometheus-address The prometheus-address flag `[--prometheus-address]` allows starting a prometheus server that can be scraped by a prometheus client. It exposes metrics like memory, CPU and file descriptor usage. ### Examples #### TLS disabled server To start `gnmic` as a server listening on all interfaces without TLS support is as simple as: ```bash gnmic listen -a 0.0.0.0:57400 ``` ??? info "SR OS configuration for non TLS dialout connections" ``` /configure system telemetry destination-group "dialout" allow-unsecure-connection /configure system telemetry destination-group "dialout" destination 10.2.0.99 port 57400 router-instance "management" /configure system telemetry persistent-subscriptions { } /configure system telemetry persistent-subscriptions subscription "dialout" admin-state enable /configure system telemetry persistent-subscriptions subscription "dialout" sensor-group "port_stats" /configure system telemetry persistent-subscriptions subscription "dialout" mode sample /configure system telemetry persistent-subscriptions subscription "dialout" sample-interval 1000 /configure system telemetry persistent-subscriptions subscription "dialout" destination-group "dialout" /configure system telemetry persistent-subscriptions subscription "dialout" encoding bytes /configure system telemetry sensor-groups { } /configure system telemetry sensor-groups { sensor-group "port_stats" } /configure system telemetry sensor-groups { sensor-group "port_stats" path "/state/port[port-id=1/1/c1/1]/statistics/in-octets" } ``` #### TLS enabled server By using [tls-cert](#tls-cert) and [tls-key](#tls-key) flags it is possible to run `gnmic` with TLS. ```bash gnmic listen -a 0.0.0.0:57400 --tls-cert gnmic.pem --tls-key gnmic-key.pem ``` ??? info "SR OS configuration for a TLS enabled dialout connections" The configuration below does not utilise router-side certificates and uses the certificate provided by the server (gnmic). The router will also not verify the certificate. ``` /configure system telemetry destination-group "dialout" tls-client-profile "client-tls" /configure system telemetry destination-group "dialout" destination 10.2.0.99 port 57400 router-instance "management" /configure system telemetry persistent-subscriptions { } /configure system telemetry persistent-subscriptions subscription "dialout" admin-state enable /configure system telemetry persistent-subscriptions subscription "dialout" sensor-group "port_stats" /configure system telemetry persistent-subscriptions subscription "dialout" mode sample /configure system telemetry persistent-subscriptions subscription "dialout" sample-interval 1000 /configure system telemetry persistent-subscriptions subscription "dialout" destination-group "dialout" /configure system telemetry persistent-subscriptions subscription "dialout" encoding bytes /configure system telemetry sensor-groups { } /configure system telemetry sensor-groups { sensor-group "port_stats" } /configure system telemetry sensor-groups { sensor-group "port_stats" path "/state/port[port-id=1/1/c1/1]/statistics/in-octets" } /configure system security tls client-cipher-list "client-ciphers" { } /configure system security tls client-cipher-list "client-ciphers" cipher 1 name tls-rsa-with-aes128-cbc-sha /configure system security tls client-cipher-list "client-ciphers" cipher 2 name tls-rsa-with-aes128-cbc-sha256 /configure system security tls client-cipher-list "client-ciphers" cipher 3 name tls-rsa-with-aes256-cbc-sha /configure system security tls client-cipher-list "client-ciphers" cipher 4 name tls-rsa-with-aes256-cbc-sha256 /configure system security tls client-tls-profile "client-tls" admin-state enable /configure system security tls client-tls-profile "client-tls" cipher-list "client-ciphers" ``` [^1]: Nokia dial-out proto definition can be found in [karimra/sros-dialout](https://github.com/karimra/sros-dialout/blob/master/NOKIA-dial-out-telemetry.proto) ================================================ FILE: docs/cmd/path.md ================================================ ### Description With `path` command it is possible to generate and search through the XPATH style paths extracted from a YANG file. By extracting the XPATH styled paths from a YANG model it is made possible to utilize CLI search tools like `awk`, `sed` and alike to find the paths satisfying specific matching rules. The embedded search capability allows to perform a quick and simple search through the model's paths using simple inclusion/exclusion operators. ### Flags #### types When `--types` flag is present the extracted paths will also have a corresponding type printed out. #### path-type The `--path-type` flag governs which style is used to display the path information. The default value is `xpath` which will produce the XPATH compatible paths. The other option is `gnmi` which will result in the paths to be formatted using the gNMI Path Conventions. === "XPATH" ```bash /state/sfm[sfm-slot=*]/hardware-data/firmware-revision-status ``` === "gNMI" ```bash elem:{name:"state"} elem:{name:"sfm" key:{key:"sfm-slot" value:"*"}} elem:{name:"hardware-data"} elem:{name:"firmware-revision-status"} ``` #### search With the `--search` flag present an interactive CLI search dialog is displayed that allows to navigate through the paths list and perform a search. ```bash ❯ gnmic path --file _test/nokia-state-combined.yang --search Use the arrow keys to navigate: ↓ ↑ → ← and : toggles search ? select path: /state/aaa/radius/statistics/coa/dropped/bad-authentication /state/aaa/radius/statistics/coa/dropped/missing-auth-policy ▸ /state/aaa/radius/statistics/coa/dropped/invalid /state/aaa/radius/statistics/coa/dropped/missing-resource /state/aaa/radius/statistics/coa/received /state/aaa/radius/statistics/coa/accepted /state/aaa/radius/statistics/coa/rejected /state/aaa/radius/statistics/disconnect-messages/dropped/bad-authentication /state/aaa/radius/statistics/disconnect-messages/dropped/missing-auth-policy ↓ /state/aaa/radius/statistics/disconnect-messages/dropped/invalid ``` #### descr When the `--descr` flag is present, the leaf description is printed after the path, indented with a `\t`. #### config-only When the `--config-only` flag is present, paths are generated only for YANG leaves representing config data. #### state-only When the `--state-only` flag is present, paths are generated only for YANG leaves representing state data. #### with-non-leaves When the `--with-non-leaves` flag is present, paths are generated not only for YANG leaves. ### Examples ```bash # output to stdout the XPATH styled paths # from the nokia-state module of nokia-state-combined.yang file gnmic path --file nokia-state-combined.yang # from the nokia-conf module gnmic path -m nokia-conf --file nokia-conf-combined.yang # with the gNMI styled paths gnmic path --file nokia-state-combined.yang --path-type gnmi # with path types gnmic path --file nokia-state-combined.yang --types # entering the interactive navigation prompt gnmic path --file nokia-state-combined.yang --search ``` [^1]: Nokia combined models can be found in [nokia/7x50_YangModels](https://github.com/nokia/7x50_YangModels/tree/master/latest_sros_20.5/nokia-combined) repo. ================================================ FILE: docs/cmd/processor.md ================================================ ### Description The `[processor | proc]` command allows running a set of event processor offline given an input of event messages. If expects a file input (`--input`) containing a list of event messages and one or more processor(s) name(s) (`--name`) defined in the main config file. This command will read the input file, validate the configured processors, apply them on the input event messages and print out the result. ### Usage `gnmic [global-flags] processor [local-flags]` ### Local Flags The processor command supports the following local flags: #### name The `[--name]` flag sets the list of processors names to apply to the input. #### input The `[--input]` flag is used to specify the path to a file containing a list of event messages (`stdin` can be specified by giving the `-` value). #### delimiter The `[--delimiter]` flag is used to set the delimiter string between event messages in the input file, defaults to `\n`. #### output The `[--output]` flag references an output name configured in the main config file. The command will out format the resulting messages according to the output config. This is mainly for outputs with `type: prometheus` ### Example Config File ```yaml outputs: out1: type: prometheus metric-prefix: "gnmic" strings-as-labels: true processors: proc0: event-strings: value-names: - "^_" transforms: # processor name proc1: # processor type event-strings: value-names: - ".*" transforms: # strings function name - path-base: apply-on: "name" proc2: event-strings: tag-names: - "interface_name" - "subscription-name" - "source" transforms: # strings function name - to-upper: apply-on: "value" - to-upper: apply-on: "name" proc3: # processor type event-drop: condition: ".values | length == 0" ``` input File: ```json [ { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/in-packets": 351770 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/in-octets": 35284165 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/in-unicast-packets": 338985 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/in-broadcast-packets": 1218 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/in-multicast-packets": 5062 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/in-discarded-packets": 6377 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/in-error-packets": 128 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/in-fcs-error-packets": 0 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/out-packets": 568218 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/out-octets": 219527024 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/out-mirror-octets": 0 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/out-unicast-packets": 567532 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/out-broadcast-packets": 6 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/out-multicast-packets": 680 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/out-discarded-packets": 0 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/out-error-packets": 0 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/out-mirror-packets": 0 } }, { "name": "sub1", "timestamp": 1710890476202665500, "tags": { "interface_name": "mgmt0", "source": "clab-traps-srl1", "subscription-name": "sub1" }, "values": { "/interface/statistics/carrier-transitions": 1 } } ] ``` Command: ```shell gnmic processor --input /path/to/event_msg.txt --delimiter "\n###" --name proc1,proc2,proc3 --output out1 ``` Output: ```text # HELP gnmic_in_packets gNMIc generated metric # TYPE gnmic_in_packets untyped gnmic_in_packets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 351770 # HELP gnmic_in_octets gNMIc generated metric # TYPE gnmic_in_octets untyped gnmic_in_octets{subscription_name="sub1",interface_name="mgmt0",source="clab-traps-srl1"} 3.5284165e+07 # HELP gnmic_in_unicast_packets gNMIc generated metric # TYPE gnmic_in_unicast_packets untyped gnmic_in_unicast_packets{subscription_name="sub1",interface_name="mgmt0",source="clab-traps-srl1"} 338985 # HELP gnmic_in_broadcast_packets gNMIc generated metric # TYPE gnmic_in_broadcast_packets untyped gnmic_in_broadcast_packets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 1218 # HELP gnmic_in_multicast_packets gNMIc generated metric # TYPE gnmic_in_multicast_packets untyped gnmic_in_multicast_packets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 5062 # HELP gnmic_in_discarded_packets gNMIc generated metric # TYPE gnmic_in_discarded_packets untyped gnmic_in_discarded_packets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 6377 # HELP gnmic_in_error_packets gNMIc generated metric # TYPE gnmic_in_error_packets untyped gnmic_in_error_packets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 128 # HELP gnmic_in_fcs_error_packets gNMIc generated metric # TYPE gnmic_in_fcs_error_packets untyped gnmic_in_fcs_error_packets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 0 # HELP gnmic_out_packets gNMIc generated metric # TYPE gnmic_out_packets untyped gnmic_out_packets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 568218 # HELP gnmic_out_octets gNMIc generated metric # TYPE gnmic_out_octets untyped gnmic_out_octets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 2.19527024e+08 # HELP gnmic_out_mirror_octets gNMIc generated metric # TYPE gnmic_out_mirror_octets untyped gnmic_out_mirror_octets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 0 # HELP gnmic_out_unicast_packets gNMIc generated metric # TYPE gnmic_out_unicast_packets untyped gnmic_out_unicast_packets{subscription_name="sub1",interface_name="mgmt0",source="clab-traps-srl1"} 567532 # HELP gnmic_out_broadcast_packets gNMIc generated metric # TYPE gnmic_out_broadcast_packets untyped gnmic_out_broadcast_packets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 6 # HELP gnmic_out_multicast_packets gNMIc generated metric # TYPE gnmic_out_multicast_packets untyped gnmic_out_multicast_packets{source="clab-traps-srl1",subscription_name="sub1",interface_name="mgmt0"} 680 # HELP gnmic_out_discarded_packets gNMIc generated metric # TYPE gnmic_out_discarded_packets untyped gnmic_out_discarded_packets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 0 # HELP gnmic_out_error_packets gNMIc generated metric # TYPE gnmic_out_error_packets untyped gnmic_out_error_packets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 0 # HELP gnmic_out_mirror_packets gNMIc generated metric # TYPE gnmic_out_mirror_packets untyped gnmic_out_mirror_packets{interface_name="mgmt0",source="clab-traps-srl1",subscription_name="sub1"} 0 # HELP gnmic_carrier_transitions gNMIc generated metric # TYPE gnmic_carrier_transitions untyped gnmic_carrier_transitions{subscription_name="sub1",interface_name="mgmt0",source="clab-traps-srl1"} 1 ``` ================================================ FILE: docs/cmd/prompt.md ================================================ ## Description The `prompt` command starts `gnmic` in an interactive prompt mode with the following auto-completion features: * All `gnmic` [commands names and their flags are suggested](../user_guide/prompt_suggestions.md#commands-and-flags-suggestions). * Values for the flags that rely on YANG-defined data (like `--path`, `--prefix`, `--model`,...) will be dynamically suggested, we call this feature [YANG-completions](../user_guide/prompt_suggestions.md#yang-completions). The auto-completions are generated from the YANG modules d with the `--file` and `--dir` flags. * Flags with the fixed set of values (`--format`, `--encoding`, ...) will get their [values suggested](../user_guide/prompt_suggestions.md#enumeration-suggestions). * Flags that require a [file path value will auto-suggest](../user_guide/prompt_suggestions.md#file-path-completions) the available files as the user types. ### Usage `gnmic [global-flags] prompt [local-flags]` ### Flags #### description-with-prefix When set, the description of the path elements in the suggestion box will contain module's prefix. #### description-with-types When set, the description of the path elements in the suggestion box will contain element's type information. #### max-suggestions The `--max-suggestions` flag sets the number of lines that the suggestion box will display without scrolling. Defaults to 10. Note, the terminal height might limit the number of lines in the suggestions box. #### suggest-all-flags The `--suggest-all-flags` makes `gnmic` prompt suggest both global and local flags for a sub-command. The default behavior (when this flag is not set) is to suggest __only__ local flags for any typed sub-command. #### suggest-with-origin The `--suggest-with-origin` flag prepends the suggested path with the module name to which this path belongs. The path becomes rendered as `:/`. The module name will be used as the [origin](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) of the gNMI path. #### suggestions-bg-color The `--suggestions-bg-color` flag sets the background color of the left part of the suggestion box. Defaults to dark blue. #### description-bg-color The `--description-bg-color` flag sets the background color of the right part of the suggestion box. Defaults to dark gray. #### prefix-color The `--prefix-color` flag sets the gnmic prompt prefix color `gnmic> `. Defaults to dark blue. ### Examples The detailed explanation of the prompt command the the YANG-completions is provided on the [Prompt mode and auto-suggestions](../user_guide/prompt_suggestions.md) page. ================================================ FILE: docs/cmd/proxy.md ================================================ ### Description The `[proxy]` command start a gNMI proxy server. That relays gNMI messages to know targets (either configured or discovered). `gNMIc` proxy relays `Get`, `Set` and `Subscribe` RPCs but not `Capabilities`. To designate the target of an RPC, the `Prefix.Target` field within the RPC request message should be utilized. This field is versatile, accepting a single target, a comma-separated list of targets, or the wildcard character `*` for broader targeting. Here are the key points regarding target specification: - The target can be set to a target name or a comma-separated list of targets. - Setting the target to `*` implies the selection of all known targets. - If the Prefix.Target field is not explicitly set, gNMIc defaults to treating it as if `*` were specified, thus applying the action to all known targets. gNMIc optimizes resource usage by reusing existing gNMI client instances whenever possible. If an appropriate gNMI client does not already exist, gNMIc will create a new instance as required. ### Usage `gnmic [global-flags] proxy` ### Configuration The Proxy behavior is controlled using the `gnmi-server` section of the main config file: ```yaml gnmi-server: # the address the gNMI server will listen to address: :57400 # tls config tls: # string, path to the CA certificate file, # this certificate is used to verify the clients certificates. ca-file: # string, server certificate file. cert-file: # string, server key file. key-file: # string, one of `"", "request", "require", "verify-if-given", or "require-verify" # - request: The server requests a certificate from the client but does not # require the client to send a certificate. # If the client sends a certificate, it is not required to be valid. # - require: The server requires the client to send a certificate and does not # fail if the client certificate is not valid. # - verify-if-given: The server requests a certificate, # does not fail if no certificate is sent. # If a certificate is sent it is required to be valid. # - require-verify: The server requires the client to send a valid certificate. # # if no ca-file is present, `client-auth` defaults to ""` # if a ca-file is set, `client-auth` defaults to "require-verify"` client-auth: "" max-subscriptions: 64 # maximum number of active Get/Set RPCs max-unary-rpc: 64 # defines the maximum msg size (in bytes) the server can receive, # defaults to 4MB max-recv-msg-size: # defines the maximum msg size (in bytes) the server can send, # default to MaxInt32 (2147483647 bytes or 2.147483647 Gb) max-send-msg-size: # defines the maximum number of streams per streaming RPC. max-concurrent-streams: # defines the TCP keepalive tiem and interval for client connections, # if unset it is enabled based on the OS. If negative it is disabled. tcp-keepalive: # set keepalive and max-age parameters on the server-side. keepalive: # MaxConnectionIdle is a duration for the amount of time after which an # idle connection would be closed by sending a GoAway. Idleness duration is # defined since the most recent time the number of outstanding RPCs became # zero or the connection establishment. # The current default value is infinity. max-connection-idle: # MaxConnectionAge is a duration for the maximum amount of time a # connection may exist before it will be closed by sending a GoAway. A # random jitter of +/-10% will be added to MaxConnectionAge to spread out # connection storms. # The current default value is infinity. max-connection-age: # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after # which the connection will be forcibly closed. # The current default value is infinity. max-connection-age-grace: # After a duration of this time if the server doesn't see any activity it # pings the client to see if the transport is still alive. # If set below 1s, a minimum value of 1s will be used instead. # The current default value is 2 hours. time: 120m # After having pinged for keepalive check, the server waits for a duration # of Timeout and if no activity is seen even after that the connection is # closed. # The current default value is 20 seconds. timeout: 20s # defines the minimum allowed sample interval, this value is used when the received sample-interval # is greater than zero but lower than this minimum value. min-sample-interval: 1ms # defines the default sample interval, # this value is used when the received sample-interval is zero within a stream/sample subscription. default-sample-interval: 1s # defines the minimum heartbeat-interval # this value is used when the received heartbeat-interval is greater than zero but # lower than this minimum value min-heartbeat-interval: 1s # enables the collection of Prometheus gRPC server metrics enable-metrics: false # enable additional debug logs debug: false # Enables Consul service registration service-registration: # Consul server address, default to localhost:8500 address: # Consul Data center, defaults to dc1 datacenter: # Consul username, to be used as part of HTTP basicAuth username: # Consul password, to be used as part of HTTP basicAuth password: # Consul Token, is used to provide a per-request ACL token # which overrides the agent's default token token: # gnmi server service check interval, only TTL Consul check is enabled # defaults to 5s check-interval: # Maximum number of failed checks before the service is deleted by Consul # defaults to 3 max-fail: # Consul service name name: # List of tags to be added to the service registration, # if available, the instance-name and cluster-name will be added as tags, # in the format: gnmic-instance=$instance-name and gnmic-cluster=$cluster-name tags: ``` ### Example #### simple proxy This config start gNMIc as a gNMI proxy serving 2 targets `router1` and `router2` ```yaml gnmi-server: address: :57401 targets: router1: skip-verify: true router2: skip-verify: true ``` ```shell gnmic --config gnmic.yaml proxy ``` #### proxy with target discovery ```yaml gnmi-server: address: :57401 loader: type: file path: targets.yaml ``` ```shell gnmic --config gnmic.yaml proxy ``` #### proxy with service registration ```yaml gnmi-server: address: gnmi-proxy-address:57401 service-registration: name: proxy address: consul-server:8500 loader: type: file path: targets.yaml ``` ```shell gnmic --config gnmic.yaml proxy ``` ================================================ FILE: docs/cmd/set.md ================================================ ## Description The `set` command represents the [gNMI Set RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L62). It is used to send a [Set Request](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L339) to the specified target(s) and expects one [Set Response](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L356) per target. Set RPC allows the client to [modify the state](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#34-modifying-state) of data on the target. The data specified referenced by a path can be [updated, replaced or deleted](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#343-transactions). !!! note It is possible to combine `update`, `replace` and `delete` in a single `gnmic set` command. ## Usage `gnmic [global-flags] set [local-flags]` The Set Request can be any of (or a combination of) update, replace or/and delete operations. ## Flags ### prefix The `--prefix` flag sets a common [prefix](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#241-path-prefixes) to all paths specified using the local `--path` flag. Defaults to `""`. If a user needs to provide [origin](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) information to the Path message, the following pattern should be used for the path string: `"origin:path"`: !!! note The path after the origin value has to start with a `/` ```bash gnmic set --update "openconfig-interfaces:/interfaces/interface::::::" ``` ### target With the optional `[--target]` flag it is possible to supply the [path target](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target) information in the prefix field of a SetRequest message. ### dry-run The `--dry-run` flag allow to run a Set request without sending it to the targets. This is useful while developing templated Set requests. ### delete The `--delete` flag allows creating a [SetRequest.Delete](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L337) as part of teh SetRequest message. ### replace The `--replace` flag allows creating a [SetRequest.Replace](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L338) as part of a SetRequest message. It is expected to be in the format `$path:::$type:::$value`, where `$path` is the gNMI path of the object to replace, `$type` is the type of the value and `$value` is the replacement value. ### update The `--update` flag allows creating a [SetRequest.Update](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L339) as part of a SetRequest message. It is expected to be in the format `$path:::$type:::$value`, where `$path` is the gNMI path of the object to update, `$type` is the type of the value and `$value` is the update value. ### replace-path and replace-value The `--replace-path` and `--replace-value` flags are equivalent to the `--replace` flag, where the path and value are split and the type is deduced from the `[-e | --encoding]` global flag. ### update-path and update-value The `--update-path` and `--update-value` flags are equivalent to the `--update` flag, where the path and value are split and the type is deduced from the `[-e | --encoding]` global flag. ### replace-path and replace-file The `--replace-path` and `--replace-file` flags are equivalent to the `--replace` flag, where the path and value are split and the type is deduced from the `[-e | --encoding]` global flag. ### update-path and update-file The `--update-path` and `--update-file` flags are equivalent to the `--update` flag, where the path and value are split and the type is deduced from the `[-e | --encoding]` global flag. ### replace-cli The `--replace-cli` flag allows setting a [SetRequest.Replace](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L338) as part of a SetRequest message. It expects a single CLI command which will form the value path of the Replace, the path will be set to the CLI origin `cli`. ### replace-cli-file The `--replace-cli` flag allows setting a [SetRequest.Replace](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L338) as part of a SetRequest message. It expects a file containing one or multiple CLI commands which will form the value path of the Replace, the path will be set to the CLI origin `cli`. ### update-cli The `--update-cli` flag allows setting a [SetRequest.Update](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L339) as part of a SetRequest message. It expects a single CLI command which will form the value path of the Replace, the path will be set to the CLI origin `cli`. ### update-cli-file The `--update-cli` flag allows setting a [SetRequest.Update](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L339) as part of a SetRequest message. It expects a file containing one or multiple CLI commands which will form the value path of the Replace, the path will be set to the CLI origin `cli`. ### request-file and request-vars See [this section](#templated-set-request-file) below. ### commit-id The `--commit-id` flag sets the commit ID when the client needs to perform a commit confirmed set request as per: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-commit-confirmed.md ### commit-request The `--commit-request` flag is used together with the `--commit-id` flag to set the commit action to `Request`, essentially starting a commit request. ### commit-confirm The `--commit-confirm` flag is used together with the `--commit-id` flag to confirm an already started commit confirmed transaction. ### commit-cancel The `--commit-cancel` flag is used together with the `--commit-id` flag to cancel an already started commit confirmed transaction. ### rollback-duration The `--rollback-duration` flag is used together with the `--commit-id` flag to set the rollback duration of a commit confirmed transaction either at creation time or before the previous commit rollback expires. ## Update Request There are several ways to perform an update operation with gNMI Set RPC: #### 1. in-line update, implicit type Using both `--update-path` and `--update-value` flags, a user can update a value for a given path. ```bash gnmic set --update-path /configure/system/name --update-value router1 gnmic set --update-path /configure/router[router-name=Base]/interface[interface-name=system]/admin-state \ --update-value enable ``` The above 2 updates can be combined in the same CLI command: ```bash gnmic set --update-path /configure/system/name \ --update-value router1 \ --update-path /configure/router[router-name=Base]/interface[interface-name=system]/admin-state \ --update-value enable ``` #### 2. in-line update, explicit type Using the update flag `--update`, one can specify the path, value type and value in a single parameter using a delimiter `--delimiter`. Delimiter string defaults to `":::"`. Supported types: json, json_ietf, string, int, uint, bool, decimal, float, bytes, ascii. ```bash # path:::value-type:::value gnmic set --update /configure/system/name:::json:::router1 gnmic set --update /configure/router[router-name=Base]/interface[interface-name=system]/admin-state:::json:::enable gnmic set --update /configure/router[router-name=Base]/interface[interface-name=system]:::json:::'{"admin-state":"enable"}' ``` #### 3. update with a value from JSON or YAML file It is also possible to specify the values from a local JSON or YAML file using `--update-file` flag for the value and `--update-path` for the path. In which case the value encoding will be determined by the global flag `[ -e | --encoding ]`, both `JSON` and `JSON_IETF` are supported The file's format is identified by its extension, json: `.json` and yaml `.yaml` or `.yml`. === "interface.json" ```bash { "admin-state": "enable", "ipv4": { "primary": { "address": "1.1.1.1", "prefix-length": 32 } } } ``` ``` bash gnmic set --update-path /configure/router[router-name=Base]/interface[interface-name=system] \ --update-file interface.json ``` === "interface.yml" ```bash "admin-state": enable "ipv4": "primary": "address": 1.1.1.1 "prefix-length": 32 ``` ``` bash gnmic set --update-path /configure/router[router-name=Base]/interface[interface-name=system] \ --update-file interface.yml ``` ## Replace Request There are 3 main ways to specify a replace operation: #### 1. in-line replace, implicit type Using both `--replace-path` and `--replace-value` flags, a user can replace a value for a given path. The type of the value is implicitly set to `JSON`: ```bash gnmic set --replace-path /configure/system/name --replace-value router1 ``` ```bash gnmic set --replace-path /configure/router[router-name=Base]/interface[interface-name=system]/admin-state \ --replace-value enable ``` The above 2 commands can be packed in the same CLI command: ```bash gnmic set --replace-path /configure/system/name \ --replace-value router1 \ --replace-path /configure/router[router-name=Base]/interface[interface-name=system]/admin-state \ --replace-value enable ``` #### 2. in-line replace, explicit type Using the replace flag `--replace`, you can specify the path, value type and value in a single parameter using a delimiter `--delimiter`. Delimiter string defaults to `":::"`. Supported types: json, json_ietf, string, int, uint, bool, decimal, float, bytes, ascii. ```bash gnmic set --replace /configure/system/name:::json:::router1 ``` ```bash gnmic set --replace /configure/router[router-name=Base]/interface[interface-name=system]/admin-state:::json:::enable ``` #### 3. replace with a value from JSON or YAML file It is also possible to specify the values from a local JSON or YAML file using flag `--replace-file` for the value and `--replace-path` for the path. In which case the value encoding will be determined by the global flag `[ -e | --encoding ]`, both `JSON` and `JSON_IETF` are supported The file is identified by its extension, json: `.json` and yaml `.yaml` or `.yml`. === "interface.json" ```bash { "admin-state": "enable", "ipv4": { "primary": { "address": "1.1.1.1", "prefix-length": 32 } } } ``` === "interface.yml" ```bash "admin-state": enable "ipv4": "primary": "address": 1.1.1.1 "prefix-length": 32 ``` Then refer to the file with `--replace-file` flag ``` bash gnmic set --replace-path /configure/router[router-name=Base]/interface[interface-name=system] \ --replace-file interface.json ``` ## Delete Request A deletion operation within the Set RPC is specified using the delete flag `--delete`. It takes an XPATH pointing to the config node to be deleted: ```bash gnmic set --delete "/configure/router[router-name=Base]/interface[interface-name=dummy_interface]" ``` ## Templated Set Request file A Set Request can also be built based on one or multiple templates and (optionally) a set of variables. The variables allow to generate a Set Request file on per target basis. If no variable file is found, the execution continues and the template is assumed to be a static string. Each template specified with the flag `--request-file` is rendered against the variables defined in the file set with `--request-vars`. Each template results in a single gNMI Set Request. ```bash gnmic set --request-file --request-file --request-vars ``` ### Template Format The rendered template data can be a `JSON` or `YAML` valid string. It has 3 sections, `updates`, `replaces` and `deletes`. In each of the `updates` and `replaces`, a `path`, a `value` and an `encoding` can be configured. If not specified, `path` defaults to `/`, while `encoding` defaults to the value set with `--encoding` flag. `updates` and `replaces` result in a set of gNMI Set Updates in the Set RPC, `deletes` result in a set of gNMI paths to be deleted. The `value` can be any arbitrary data format that the target accepts, it will be encoded based on the value of "encoding". === "JSON" ```json { "updates": [ { "path": "/interface[name=ethernet-1/1]", "value": { "admin-state": "enable", "description": "to_spine1" }, "encoding": "json_ietf" }, { "path": "/interface[name=ethernet-1/2]", "value": { "admin-state": "enable", "description": "to_spine2" }, "encoding": "json_ietf" } ], "replaces": [ { "path": "/interface[name=ethernet-1/3]", "value": { "admin-state": "enable", "description": "to_spine3" } }, { "path": "/interface[name=ethernet-1/4]", "value": { "admin-state": "enable", "description": "to_spine4" } } ], "deletes" : [ "/interface[name=ethernet-1/5]", "/interface[name=ethernet-1/6]" ] } ``` === "YAML" ```yaml updates: - path: "/interface[name=ethernet-1/1]" value: admin-state: enable description: "to_spine1" encoding: "json_ietf" - path: "/interface[name=ethernet-1/2]" value: admin-state: enable description: "to_spine2" encoding: "json_ietf" replaces: - path: "/interface[name=ethernet-1/3]" value: admin-state: enable description: "to_spine3" - path: "/interface[name=ethernet-1/4]" value: admin-state: enable description: "to_spine4" deletes: - "/interface[name=ethernet-1/5]" - "/interface[name=ethernet-1/6]" ``` ### Per Target Template Variables The file `--request-file` can be written as a [Go Text template](https://golang.org/pkg/text/template/). The parsed template is loaded with additional functions from [gomplate](https://docs.gomplate.ca/). `gnmic` generates one gNMI Set request per target. The template will be rendered using variables read from the file `--request-vars`. Just like the template file, the variables file can either be a `JSON` or `YAML` formatted file. If the flag `--request-vars` is not set, `gnmic` looks for a file with the same path, name and **extension** as the `request-file`, appended with `_vars`. Within the template, the variables defined in the `--request-vars` file are accessible using the `.Vars` notation, while the target name is accessible using the `.TargetName` notation. Example request template: ```yaml replaces: {{ $target := index .Vars .TargetName }} {{- range $interface := index $target "interfaces" }} - path: "/interface[name={{ index $interface "name" }}]" encoding: "json_ietf" value: admin-state: {{ index $interface "admin-state" | default "disable" }} description: {{ index $interface "description" | default "" }} {{- range $index, $subinterface := index $interface "subinterfaces" }} subinterface: - index: {{ $index }} admin-state: {{ index $subinterface "admin-state" | default "disable" }} {{- if has $subinterface "ipv4-address" }} ipv4: address: - ip-prefix: {{ index $subinterface "ipv4-address" | toString }} {{- end }} {{- if has $subinterface "ipv6-address" }} ipv6: address: - ip-prefix: {{ index $subinterface "ipv6-address" | toString }} {{- end }} {{- end }} {{- end }} ``` The below variables file defines the input for 3 leafs: ```yaml leaf1:57400: interfaces: - name: ethernet-1/1 admin-state: "enable" description: "leaf1_to_spine1" subinterfaces: - admin-state: enable ipv4-address: 192.168.78.1/30 - name: ethernet-1/2 admin-state: "enable" description: "leaf1_to_spine2" subinterfaces: - admin-state: enable ipv4-address: 192.168.79.1/30 leaf2:57400: interfaces: - name: ethernet-1/1 admin-state: "enable" description: "leaf2_to_spine1" subinterfaces: - admin-state: enable ipv4-address: 192.168.88.1/30 - name: ethernet-1/2 admin-state: "enable" description: "leaf2_to_spine2" subinterfaces: - admin-state: enable ipv4-address: 192.168.89.1/30 leaf3:57400: interfaces: - name: ethernet-1/1 admin-state: "enable" description: "leaf3_to_spine1" subinterfaces: - admin-state: enable ipv4-address: 192.168.98.1/30 - name: ethernet-1/2 admin-state: "enable" description: "leaf3_to_spine2" subinterfaces: - admin-state: enable ipv4-address: 192.168.99.1/30 ``` Result Request file per target: === "leaf1" ```yaml updates: - path: /interface[name=ethernet-1/1] encoding: "json_ietf" value: admin-state: enable description: leaf1_to_spine1 subinterface: - index: 0 admin-state: enable ipv4: address: - ip-prefix: 192.168.78.1/30 - path: /interface[name=ethernet-1/2] encoding: "json_ietf" value: admin-state: enable description: leaf1_to_spine2 subinterface: - index: 0 admin-state: enable ipv4: address: - ip-prefix: 192.168.79.1/30 ``` === "leaf2" ```yaml updates: - path: /interface[name=ethernet-1/1] encoding: "json_ietf" value: admin-state: enable description: leaf2_to_spine1 subinterface: - index: 0 admin-state: enable ipv4: address: - ip-prefix: 192.168.88.1/30 - path: /interface[name=ethernet-1/2] encoding: "json_ietf" value: admin-state: enable description: leaf2_to_spine2 subinterface: - index: 0 admin-state: enable ipv4: address: - ip-prefix: 192.168.89.1/30 ``` === "leaf3" ```yaml updates: - path: /interface[name=ethernet-1/1] encoding: "json_ietf" value: admin-state: enable description: leaf3_to_spine1 subinterface: - index: 0 admin-state: enable ipv4: address: - ip-prefix: 192.168.98.1/30 - path: /interface[name=ethernet-1/2] encoding: "json_ietf" value: admin-state: enable description: leaf3_to_spine2 subinterface: - index: 0 admin-state: enable ipv4: address: - ip-prefix: 192.168.99.1/30 ``` ## Examples ### 1. update #### in-line value ```bash gnmic -a set --update-path /configure/system/name \ --update-value ``` #### value from JSON file ```bash cat jsonFile.json {"name": "router1"} gnmic -a set --update-path /configure/system \ --update-file ``` ```bash echo '{"name": "router1"}' | gnmic -a set \ --update-path /configure/system \ --update-file - ``` #### specify value type ```bash gnmic -a set --update /configure/system/name:::json:::router1 gnmic -a set --update /configure/system/name@json@router1 \ --delimiter @ ``` ### 2. replace ```bash cat interface.json {"address": "1.1.1.1", "prefix-length": 32} gnmic -a --insecure \ set --replace-path /configure/router[router-name=Base]/interface[interface-name=interface1]/ipv4/primary \ --replace-file interface.json ``` ```bash echo '{"address": "1.1.1.1", "prefix-length": 32}' | gnmic -a --insecure \ set --replace-path /configure/router[router-name=Base]/interface[interface-name=interface1]/ipv4/primary \ --replace-file - ``` ### 3. delete ```bash gnmic -a --insecure set --delete /configure/router[router-name=Base]/interface[interface-name=interface1] ``` ================================================ FILE: docs/cmd/subscribe.md ================================================ ### Description The `[subscribe | sub]` command represents the [gNMI Subscribe RPC](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L68). It is used to send a [Subscribe Request](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L208) to the specified target(s) and expects one or multiple [Subscribe Response](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L232) ### Usage `gnmic [global-flags] subscribe [local-flags]` ### Local Flags The subscribe command supports the following local flags: #### prefix The `[--prefix]` flag sets a common [prefix](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#241-path-prefixes) to all paths specified using the local `--path` flag. Defaults to `""`. #### path The path flag `[--path]` is used to specify the [path(s)](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) to which the client wants to subscribe. Multiple paths can be specified by using repeated `--path` flags: ```bash gnmic sub --path "/state/ports[port-id=*]" \ --path "/state/router[router-name=*]/interface[interface-name=*]" ``` If a user needs to provide [origin](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths) information to the Path message, the following pattern should be used for the path string: `"origin:path"`: !!! note The path after the origin value has to start with a `/` ```bash gnmic sub --path "openconfig-interfaces:/interfaces/interface" ``` #### target With the optional `[--target]` flag it is possible to supply the [path target](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target) information in the prefix field of the SubscriptionList message. #### set-target The `[--set-target]` flag is used to set the SubscribeRequest Prefix target value to the configured target name stripped of the port number. #### model The `[--model]` flag is used to specify the schema definition modules that the target should use when extracting the data to stream back. #### qos The `[--qos]` flag specifies the packet marking that is to be used for the responses to the subscription request. Default marking is set to `20`. If qos marking is not supported by a target the marking can be disabled by setting the value to `0`. #### mode The `[--mode]` mode flag specifies the mode of subscription to be created. This may be one of: [ONCE](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35151-once-subscriptions), [STREAM](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35152-stream-subscriptions) or [POLL](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35153-poll-subscriptions). It is case insensitive and defaults to `STREAM`. #### stream subscription mode The `[--stream-mode]` flag is used to specify the stream subscription mode. This may be one of: [ON_CHANGE, SAMPLE or TARGET_DEFINED](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35152-stream-subscriptions) This flag applies only if `--mode` is set to `STREAM`. It is case insensitive and defaults to `SAMPLE`. #### sample interval The `[--sample-interval]` flag is used to specify the sample interval to be used by the target to send samples to the client. This flag applies only in case `--mode` is set to `STREAM` and `--stream-mode` is set to `SAMPLE`. Valid formats: `1s, 1m30s, 1h`. Defaults to `0s` which is the lowest interval supported by a target. #### heartbeat interval The `[--heartbeat-interval]` flag is used to specify the server heartbeat interval. The heartbeat interval value can be specified along with `ON_CHANGE` or `SAMPLE` stream subscriptions modes. * `ON_CHANGE`: The value of the data item(s) MUST be re-sent once per heartbeat interval regardless of whether the value has changed or not. * `SAMPLE`: The target MUST generate one telemetry update per heartbeat interval, regardless of whether the `--suppress-redundant` flag is set to true. #### quiet With `[--quiet]` flag set `gnmic` will not output subscription responses to `stdout`. The `--quiet` flag is useful when `gnmic` exports the received data to one of the export providers. #### suppress redundant When the `[--suppress-redundant]` flag is set to true, the target SHOULD NOT generate a telemetry update message unless the value of the path being reported on has changed since the last update was generated. This flag applies only in case `--mode` is set to `STREAM` and `--stream-mode` is set to `SAMPLE`. #### updates only When the `[--updates-only]` flag is set to true, the target MUST not transmit the current state of the paths that the client has subscribed to, but rather should send only updates to them. #### name The `[--name]` flag is used to trigger one or multiple subscriptions already defined in the configuration file see [defining subscriptions](../user_guide/subscriptions.md) #### output The `[--output]` flag is used to select one or multiple output already defined in the configuration file. Outputs defined under target take precedence over this flag, see [defining outputs](../user_guide/outputs/output_intro.md) and [defining targets](../user_guide/multi_targets) #### watch-config The `[--watch-config]` flag is used to enable automatic target loading from the configuration source at runtime. On each configuration change, gnmic reloads the list of targets, subscribes to new targets and/or deletes subscriptions to the deleted ones. Only addition and deletion of targets are currently supported, changes in an existing target config are not possible. #### backoff The `[--backoff]` flag is used to specify a duration between consecutive subscription towards targets. It defaults to `0s` meaning all subscription are started in parallel. If a locker is configured, the backoff timer is set to `100ms` by default. #### lock-retry The `[--lock-retry]` flag is a duration used to set the wait time between consecutive lock attempts. Defaults to `5s`. #### history-snapshot The `[--history-snapshot]` flag sets the snapshot value in the subscribe request [gNMI History extension](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md). The value can be either nanoseconds since Unix epoch or a date in RFC3339 format. #### history-start The `[--history-start]` flag sets the start value in the subscribe request Time Range [gNMI History extension](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md). The value can be either nanoseconds since Unix epoch or a date in RFC3339 format. #### history-end The `[--history-end]` flag sets the end value in the subscribe request Time Range [gNMI History extension](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md). #### depth The `[--depth]` flag set the gNMI extension depth value as defined [here](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-depth.md) ### Examples #### 1. streaming, target-defined, 10s interval ```bash gnmic -a sub --path /state/port[port-id=*]/statistics ``` #### 2. streaming, sample, 30s interval ```bash gnmic -a sub --path "/state/port[port-id=*]/statistics" \ --sample-interval 30s ``` #### 3. streaming, on-change, heartbeat interval 1min ```bash gnmic -a sub --path "/state/port[port-id=*]/statistics" \ --stream-mode on-change \ --heartbeat-interval 1m ``` #### 4. once subscription ```bash gnmic -a sub --path "/state/port[port-id=*]/statistics" \ --mode once ``` ================================================ FILE: docs/deployments/clusters/containerlab/cluster_with_gnmi_server_and_prometheus_output.md ================================================ The purpose of this deployment is to achieve __redundancy__, __high-availability__ and __data aggregation__ via clustering. This deployment example includes: - A 3 instances [gNMIc cluster](../../../user_guide/HA.md), - A standalone `gNMIc` instance. - A [Prometheus](https://prometheus.io/) Server - A [Grafana](https://grafana.com/docs/) Server - A [Consul](https://www.consul.io/docs/intro) Server The leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro) All members of the cluster expose a gNMI Server that the single gNMIc instance will use to aggregate the collected data. The aggregation `gNMIc` instance exposes a Prometheus output that is registered in `Consul` and is discoverable by the Prometheus server. The whole lab is pretty much self organising: - The `gNMIc` cluster instances discover the targets dynamically using a [Docker Loader](../../../user_guide/targets/target_discovery/docker_discovery.md) - The `gNMIc` standalone instance, discovers the cluster instance using a [Consul Loader](../../../user_guide/targets/target_discovery/consul_discovery.md) - The Prometheus server discovers gNMIc's Prometheus output using [Consul Service Discovery](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config)
Deployment files: - [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/4.gnmi-server/containerlab/gnmi-server.clab.yaml) - [gNMIc cluster config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/4.gnmi-server/containerlab/gnmic.yaml) - [gNMIc aggregator config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/4.gnmi-server/containerlab/gnmic-agg.yaml) - [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/4.gnmi-server/containerlab/prometheus/prometheus.yaml) - [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/4.gnmi-server/containerlab/grafana/datasources/datasource.yaml) Deploy it with: ```bash git clone https://github.com/openconfig/gnmic.git cd gnmic/examples/deployments/2.clusters/4.gnmi-server/containerlab sudo clab deploy -t gnmi-server.clab.yaml ``` ```text +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+ | # | Name | Container ID | Image | Kind | Group | State | IPv4 Address | IPv6 Address | +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+ | 1 | clab-lab24-agg-gnmic | 2e9cc2821b07 | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.7/24 | 2001:172:20:20::7/64 | | 2 | clab-lab24-consul-agent | c17d31d5f41b | consul:latest | linux | | running | 172.20.20.2/24 | 2001:172:20:20::2/64 | | 3 | clab-lab24-gnmic1 | 3d56e09955f2 | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.4/24 | 2001:172:20:20::4/64 | | 4 | clab-lab24-gnmic2 | eba24dacea36 | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.3/24 | 2001:172:20:20::3/64 | | 5 | clab-lab24-gnmic3 | caf473f500f6 | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.6/24 | 2001:172:20:20::6/64 | | 6 | clab-lab24-grafana | eaa224e62243 | grafana/grafana:latest | linux | | running | 172.20.20.8/24 | 2001:172:20:20::8/64 | | 7 | clab-lab24-leaf1 | 6771dc8d3786 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.10/24 | 2001:172:20:20::a/64 | | 8 | clab-lab24-leaf2 | 5cfb1cf68958 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.14/24 | 2001:172:20:20::e/64 | | 9 | clab-lab24-leaf3 | c438f734e44d | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.19/24 | 2001:172:20:20::13/64 | | 10 | clab-lab24-leaf4 | ae4321825a03 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.17/24 | 2001:172:20:20::11/64 | | 11 | clab-lab24-leaf5 | ee7a520fd844 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.18/24 | 2001:172:20:20::12/64 | | 12 | clab-lab24-leaf6 | 59c3c515ef35 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.9/24 | 2001:172:20:20::9/64 | | 13 | clab-lab24-leaf7 | 111f858b19fd | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.22/24 | 2001:172:20:20::16/64 | | 14 | clab-lab24-leaf8 | 0ecc69891eb4 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.20/24 | 2001:172:20:20::14/64 | | 15 | clab-lab24-prometheus | 357821ec726e | prom/prometheus:latest | linux | | running | 172.20.20.5/24 | 2001:172:20:20::5/64 | | 16 | clab-lab24-spine1 | 0f5f6f6dc5fa | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.13/24 | 2001:172:20:20::d/64 | | 17 | clab-lab24-spine2 | b718503d3b3f | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.15/24 | 2001:172:20:20::f/64 | | 18 | clab-lab24-spine3 | e02f18d0e3ff | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.11/24 | 2001:172:20:20::b/64 | | 19 | clab-lab24-spine4 | 3347cba3f277 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.12/24 | 2001:172:20:20::c/64 | | 20 | clab-lab24-super-spine1 | 4abc7bcaf43c | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.16/24 | 2001:172:20:20::10/64 | | 21 | clab-lab24-super-spine2 | 5b2f5f153d43 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.21/24 | 2001:172:20:20::15/64 | +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+ ``` Check the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) and [gNMI Server](../../../user_guide/gnmi_server.md) documentation pages for more configuration options ================================================ FILE: docs/deployments/clusters/containerlab/cluster_with_influxdb_output.md ================================================ The purpose of this deployment is to achieve __redundancy__, __high-availability__ via clustering. This deployment example includes: - A 3 instances [gNMIc cluster](../../../user_guide/HA.md), - A [InfluxDB](https://www.influxdata.com/) Server - A [Grafana](https://grafana.com/docs/) Server - A [Consul](https://www.consul.io/docs/intro) Server The leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro)
Deployment files: - [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/1.influxdb-output/containerlab/lab21.clab.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/1.influxdb-output/containerlab/gnmic.yaml) - [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/1.influxdb-output/containerlab/grafana/datasources/datasource.yaml) Deploy it with: ```bash git clone https://github.com/openconfig/gnmic.git cd gnmic/examples/deployments/2.clusters/1.influxdb-output/containerlab sudo clab deploy -t lab21.clab.yaml ``` ```text +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+ | # | Name | Container ID | Image | Kind | Group | State | IPv4 Address | IPv6 Address | +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+ | 1 | clab-lab21-consul-agent | a6f6eb70965f | consul:latest | linux | | running | 172.20.20.7/24 | 2001:172:20:20::7/64 | | 2 | clab-lab21-gnmic1 | 9758b0761431 | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.5/24 | 2001:172:20:20::5/64 | | 3 | clab-lab21-gnmic2 | 6d6ae91c64bf | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.2/24 | 2001:172:20:20::2/64 | | 4 | clab-lab21-gnmic3 | 5df100a9fa73 | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.4/24 | 2001:172:20:20::4/64 | | 5 | clab-lab21-grafana | fe51bda1830c | grafana/grafana:latest | linux | | running | 172.20.20.3/24 | 2001:172:20:20::3/64 | | 6 | clab-lab21-influxdb | 20712484d835 | influxdb:latest | linux | | running | 172.20.20.6/24 | 2001:172:20:20::6/64 | | 7 | clab-lab21-leaf1 | ce084f636942 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.14/24 | 2001:172:20:20::e/64 | | 8 | clab-lab21-leaf2 | 5cbaba4bc9ff | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.11/24 | 2001:172:20:20::b/64 | | 9 | clab-lab21-leaf3 | a5e92ca08c7e | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.8/24 | 2001:172:20:20::8/64 | | 10 | clab-lab21-leaf4 | 1ccfe0082b15 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.12/24 | 2001:172:20:20::c/64 | | 11 | clab-lab21-leaf5 | 7fd4144277a0 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.9/24 | 2001:172:20:20::9/64 | | 12 | clab-lab21-leaf6 | cb4df0d609db | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.13/24 | 2001:172:20:20::d/64 | | 13 | clab-lab21-leaf7 | 8f09b622365f | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.19/24 | 2001:172:20:20::13/64 | | 14 | clab-lab21-leaf8 | 0ab91010b4a7 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.18/24 | 2001:172:20:20::12/64 | | 15 | clab-lab21-spine1 | 86d00f11b944 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.15/24 | 2001:172:20:20::f/64 | | 16 | clab-lab21-spine2 | 90cf49595ad2 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.20/24 | 2001:172:20:20::14/64 | | 17 | clab-lab21-spine3 | 1c694820eb88 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.16/24 | 2001:172:20:20::10/64 | | 18 | clab-lab21-spine4 | 1e3eac3de55f | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.10/24 | 2001:172:20:20::a/64 | | 19 | clab-lab21-super-spine1 | aafc478de31d | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.21/24 | 2001:172:20:20::15/64 | | 20 | clab-lab21-super-spine2 | bb27b743c97f | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.17/24 | 2001:172:20:20::11/64 | +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+ ``` Check the [InfluxDB Output](../../../user_guide/outputs/influxdb_output.md) documentation page for more configuration options. ================================================ FILE: docs/deployments/clusters/containerlab/cluster_with_nats_input_and_prometheus_output.md ================================================ The purpose of this deployment is to achieve __redundancy__, __high-availability__ as well as __data replication__. The redundancy and high-availability are guaranteed by deploying a `gnmic` cluster. The data replication is achieved using a `NATS` server acting as both a gnmic input and output. This deployment example includes a: - 3 instances [gNMIc cluster](../../../user_guide/HA.md), - A [NATS](https://nats.io/) Server acting as both [input](../../../user_guide/inputs/nats_input.md) and [output](../../../user_guide/outputs/nats_output.md) - A [Prometheus](https://prometheus.io/) Server - A [Grafana](https://grafana.com/docs/) Server - A [Consul](https://www.consul.io/docs/intro) Server The leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro) Each `gnmic` instance outputs the streamed gNMI data to NATS, and reads back all the data from the same NATS server (including its own), This effectively guarantees that each instance holds the data streamed by the whole cluster. Like in the previous examples, each `gnmic` instance will also register its Prometheus output service in `Consul`. But before doing so, it will attempt to acquire a key lock `gnmic/$CLUSTER_NAME/prometheus-output`, (`use-lock: true`) ```yaml prom-output: type: prometheus listen: ":9804" service-registration: address: consul-agent:8500 use-lock: true # <=== ``` Since only one instance can hold a lock, only one prometheus output is registered, so only one output is scraped by Prometheus.
Deployment files: - [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/lab23.clab.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/gnmic.yaml) - [prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/prometheus/prometheus.yaml) - [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/grafana/datasources/datasource.yaml) Deploy it with: ```bash git clone https://github.com/openconfig/gnmic.git cd gnmic/examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab sudo clab deploy -t lab23.clab.yaml ``` ```text +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+ | # | Name | Container ID | Image | Kind | Group | State | IPv4 Address | IPv6 Address | +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+ | 1 | clab-lab23-consul-agent | cfdaf19e9435 | consul:latest | linux | | running | 172.20.20.8/24 | 2001:172:20:20::8/64 | | 2 | clab-lab23-gnmic1 | 7e2a4060a1ae | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.3/24 | 2001:172:20:20::3/64 | | 3 | clab-lab23-gnmic2 | 9e27e4620104 | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.4/24 | 2001:172:20:20::4/64 | | 4 | clab-lab23-gnmic3 | bb7471eb5f49 | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.5/24 | 2001:172:20:20::5/64 | | 5 | clab-lab23-grafana | 3fbf7755c49e | grafana/grafana:latest | linux | | running | 172.20.20.2/24 | 2001:172:20:20::2/64 | | 6 | clab-lab23-leaf1 | a61624d5312b | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.21/24 | 2001:172:20:20::15/64 | | 7 | clab-lab23-leaf2 | ef86f701b379 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.14/24 | 2001:172:20:20::e/64 | | 8 | clab-lab23-leaf3 | 352433a2ab3b | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.22/24 | 2001:172:20:20::16/64 | | 9 | clab-lab23-leaf4 | 5ddba813d36f | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.19/24 | 2001:172:20:20::13/64 | | 10 | clab-lab23-leaf5 | aad20f4b9969 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.11/24 | 2001:172:20:20::b/64 | | 11 | clab-lab23-leaf6 | 757c76527a75 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.15/24 | 2001:172:20:20::f/64 | | 12 | clab-lab23-leaf7 | d85e94aaa0dd | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.10/24 | 2001:172:20:20::a/64 | | 13 | clab-lab23-leaf8 | ef6210c0e5aa | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.20/24 | 2001:172:20:20::14/64 | | 14 | clab-lab23-nats | f1a1f351bbf8 | nats:latest | linux | | running | 172.20.20.6/24 | 2001:172:20:20::6/64 | | 15 | clab-lab23-prometheus | f7f194a934c5 | prom/prometheus:latest | linux | | running | 172.20.20.7/24 | 2001:172:20:20::7/64 | | 16 | clab-lab23-spine1 | ddbf4e804097 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.16/24 | 2001:172:20:20::10/64 | | 17 | clab-lab23-spine2 | f48323a4de88 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.17/24 | 2001:172:20:20::11/64 | | 18 | clab-lab23-spine3 | 2a65eed26a7e | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.18/24 | 2001:172:20:20::12/64 | | 19 | clab-lab23-spine4 | ea59d0e5d9ed | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.12/24 | 2001:172:20:20::c/64 | | 20 | clab-lab23-super-spine1 | 37af6cd04dd8 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.9/24 | 2001:172:20:20::9/64 | | 21 | clab-lab23-super-spine2 | 3408891a0718 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.13/24 | 2001:172:20:20::d/64 | +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+ ``` Check the [NATS Output](../../../user_guide/outputs/nats_output.md), [NATS Input](../../../user_guide/inputs/nats_input.md) and [Prometheus Output](../../../user_guide/outputs/influxdb_output.md) documentation pages for more configuration options. ================================================ FILE: docs/deployments/clusters/containerlab/cluster_with_prometheus_output.md ================================================ The purpose of this deployment is to achieve __redundancy__, __high-availability__ via clustering. This deployment example includes: - A 3 instances [gNMIc cluster](../../../user_guide/HA.md), - A [Prometheus](https://prometheus.io/) Server - A [Grafana](https://grafana.com/docs/) Server - A [Consul](https://www.consul.io/docs/intro) Server The leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro) `gnmic` will also register its Prometheus output service in `Consul` so that Prometheus can discover which Prometheus servers are available to be scraped.
Deployment files: - [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/containerlab/lab22.clab.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/containerlab/gnmic.yaml) - [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/containerlab/prometheus/prometheus.yaml) - [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/containerlab/grafana/datasources/datasource.yaml) Deploy it with: ```bash git clone https://github.com/openconfig/gnmic.git cd gnmic/examples/deployments/2.clusters/2.prometheus-output/containerlab sudo clab deploy -t lab22.clab.yaml ``` ```text +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+ | # | Name | Container ID | Image | Kind | Group | State | IPv4 Address | IPv6 Address | +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+ | 1 | clab-lab22-consul-agent | 542169159f8b | consul:latest | linux | | running | 172.20.20.2/24 | 2001:172:20:20::2/64 | | 2 | clab-lab22-gnmic1 | c04b2b597e7a | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.4/24 | 2001:172:20:20::4/64 | | 3 | clab-lab22-gnmic2 | 49604280d82d | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.3/24 | 2001:172:20:20::3/64 | | 4 | clab-lab22-gnmic3 | 49e910460cad | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.5/24 | 2001:172:20:20::5/64 | | 5 | clab-lab22-grafana | c0a37b012d29 | grafana/grafana:latest | linux | | running | 172.20.20.7/24 | 2001:172:20:20::7/64 | | 6 | clab-lab22-leaf1 | c6429b499c11 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.19/24 | 2001:172:20:20::13/64 | | 7 | clab-lab22-leaf2 | 62f235b39a62 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.17/24 | 2001:172:20:20::11/64 | | 8 | clab-lab22-leaf3 | 78d3b4e62a6b | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.11/24 | 2001:172:20:20::b/64 | | 9 | clab-lab22-leaf4 | 8c5d80b4d916 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.13/24 | 2001:172:20:20::d/64 | | 10 | clab-lab22-leaf5 | 508d4d2389b4 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.16/24 | 2001:172:20:20::10/64 | | 11 | clab-lab22-leaf6 | 14ce19a8c5da | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.9/24 | 2001:172:20:20::9/64 | | 12 | clab-lab22-leaf7 | c4f6e586baa3 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.20/24 | 2001:172:20:20::14/64 | | 13 | clab-lab22-leaf8 | 1e00e6346bf1 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.12/24 | 2001:172:20:20::c/64 | | 14 | clab-lab22-prometheus | 5ed38ce63113 | prom/prometheus:latest | linux | | running | 172.20.20.6/24 | 2001:172:20:20::6/64 | | 15 | clab-lab22-spine1 | 38247b0f81e7 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.10/24 | 2001:172:20:20::a/64 | | 16 | clab-lab22-spine2 | 76bf66748acd | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.21/24 | 2001:172:20:20::15/64 | | 17 | clab-lab22-spine3 | 5c8776e2fc77 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.15/24 | 2001:172:20:20::f/64 | | 18 | clab-lab22-spine4 | de67e5b92f36 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.14/24 | 2001:172:20:20::e/64 | | 19 | clab-lab22-super-spine1 | 00f0aee0265a | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.18/24 | 2001:172:20:20::12/64 | | 20 | clab-lab22-super-spine2 | 418888eb7325 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.8/24 | 2001:172:20:20::8/64 | +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+-----------------------+ ``` Check the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) documentation page for more configuration options. ================================================ FILE: docs/deployments/clusters/docker-compose/cluster_with_influxdb_output.md ================================================ The purpose of this deployment is to achieve __redundancy__, __high-availability__ via clustering. This deployment example includes: - A 3 instances [`gnmic` cluster](../../../user_guide/HA.md), - A single [InfluxDB output](../../../user_guide/outputs/influxdb_output.md) The leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro)
Deployment files: - [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/1.influxdb-output/docker-compose/docker-compose.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/1.influxdb-output/docker-compose/gnmic.yaml) Download the files, update the `gnmic` config files with the desired subscriptions and targets. Deploy it with: ```bash sudo docker-compose up -d ``` Check the [InfluxDB Output](../../../user_guide/outputs/influxdb_output.md) documentation page for more configuration options. ================================================ FILE: docs/deployments/clusters/docker-compose/cluster_with_nats_input_and_prometheus_output.md ================================================ The purpose of this deployment is to achieve __redundancy__, __high-availability__ as well as __data replication__. The redundancy and high-availability are guaranteed by deploying a `gnmic` cluster. The data replication is achieved using a `NATS` server acting as both a gnmic input and output. This deployment example includes a: - 3 instances [`gnmic` cluster](../../../user_guide/HA.md), - A NATS [input](../../../user_guide/inputs/nats_input.md) and [output](../../../user_guide/outputs/nats_output.md) - A [Prometheus output](../../../user_guide/outputs/prometheus_output.md) The leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro) Each `gnmic` instance outputs the streamed gNMI data to NATS, and reads back all the data from the same NATS server (including its own), This effectively guarantees that each instance holds the data streamed by the whole cluster. Like in the previous examples, each `gnmic` instance will also register its Prometheus output service in `Consul`. But before doing so, it will attempt to acquire a key lock `gnmic/$CLUSTER_NAME/prometheus-output`, (`use-lock: true`) ```yaml prom-output: type: prometheus listen: ":9804" service-registration: address: consul-agent:8500 use-lock: true # <=== ``` Since only one instance can hold a lock, only one prometheus output is registered, so only one output is scraped by Prometheus.
Deployment files: - [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/docker-compose/docker-compose.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/docker-compose/gnmic.yaml) - [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/3.nats-input-prometheus-output/docker-compose/prometheus/prometheus.yaml) Download the files, update the `gnmic` config files with the desired subscriptions and targets. !!! note The targets outputs list should include the nats output name Deploy it with: ```bash sudo docker-compose up -d ``` Check the [NATS Output](../../../user_guide/outputs/nats_output.md), [NATS Input](../../../user_guide/inputs/nats_input.md) and [Prometheus Output](../../../user_guide/outputs/influxdb_output.md) documentation pages for more configuration options. ================================================ FILE: docs/deployments/clusters/docker-compose/cluster_with_prometheus_output.md ================================================ The purpose of this deployment is to achieve __redundancy__, __high-availability__ via clustering. This deployment example includes: - A 3 instances [`gnmic` cluster](../../../user_guide/HA.md), - A single [Prometheus output](../../../user_guide/outputs/prometheus_output.md) The leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro) `gnmic` will also register its Prometheus output service in `Consul` so that Prometheus can discover which Prometheus servers are available to be scraped
Deployment files: - [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/docker-compose/docker-compose.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/docker-compose/gnmic.yaml) - [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/docker-compose/prometheus/prometheus.yaml) Download the files, update the `gnmic` config files with the desired subscriptions and targets. Deploy it with: ```bash sudo docker-compose up -d ``` Check the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) documentation page for more configuration options. ================================================ FILE: docs/deployments/clusters/kubernetes/cluster_with_prometheus_output.md ================================================ The purpose of this deployment is to achieve __redundancy__, __high-availability__ using Kubernetes and `gnmic`'s internal clustering mechanism. This deployment example includes: - A 3 instances [`gnmic` cluster](../../../user_guide/HA.md), - A single [Prometheus output](../../../user_guide/outputs/prometheus_output.md) The leader election and target distribution is done with the help of a [Consul server](https://www.consul.io/docs/intro) `gnmic` can be discovered by `Prometheus` using Kubernetes service discovery. Kubernetes uses a [headless service](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services) with a StatefulSet to disable the internal load balancing across multiple pods of the same StatefulSet and allow `Prometheus` to discover all instances of `gnmic`.
[Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator#quickstart) must be installed prior to `gnmic` deployment. (Can also be installed via [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) helm chart or [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus)) Deployment files: - [gnmic](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/kubernetes/gnmic-app) - [consul](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/kubernetes/consul) - [prometheus servicemonitor](https://github.com/openconfig/gnmic/blob/main/examples/deployments/2.clusters/2.prometheus-output/kubernetes/prometheus/servicemonitor.yaml) Download the files, update the `gnmic` ConfigMap with the desired subscriptions and targets and make sure that `prometheus servicemonitor` is in a namespace or has a label that `Prometheus operator` is watching. Deploy it with: ```bash kubectl create ns gnmic kubectl apply -n gnmic -f kubernetes/consul kubectl apply -n gnmic -f kubernetes/gnmic-app # Before deploying the Prometheus ServiceMonitor # Install Prometheus operator or kube-prometheus or kube-prometheus-stack helm chart # Otherwise the command will fail kubectl apply -f kubernetes/prometheus ``` Check the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) documentation page for more configuration options. ================================================ FILE: docs/deployments/deployments_intro.md ================================================ There are numerous ways `gnmic` can be deployed, each fulfilling a specific use case. Whether it is gNMI telemetry collection and export to a single output, or clustered data pipelines with high availability and redundancy, the below examples should cover the most common use cases. In this section you will find multiple deployment examples, using [docker-compose](https://docs.docker.com/compose/) or [containerlab](https://containerlab.srlinux.dev/). Each deployment comes with: - a `docker-compose` or `clab` file - one or multiple `gnmic` configuration file(s) - extra configuration files if required by the use case (e.g: prometheus, grafana,...) The [containerlab](https://containerlab.srlinux.dev/) examples come with a fabric deployed using Nokia's [SR Linux](https://learn.srlinux.dev) If you don't find an example that fits your needs, feel free to open an issue on [github](https://github.com/openconfig/gnmic/issues/new) ### Single Instance These examples showcase single `gnmic` instance deployments with the most commonly used outputs - NATS output: [clab](single-instance/containerlab/nats-output.md), [docker-compose](single-instance/docker-compose/nats-output.md) - Kafka output: [clab](single-instance/containerlab/kafka-output.md), [docker-compose](single-instance/docker-compose/kafka-output.md) - InfluxDB output: [clab](single-instance/containerlab/influxdb-output.md), [docker-compose](single-instance/docker-compose/influxdb-output.md) - Prometheus output: [clab](single-instance/containerlab/prometheus-output.md), [docker-compose](single-instance/docker-compose/prometheus-output.md) - Multiple outputs: [clab](single-instance/containerlab/multiple-outputs.md), [docker-compose](single-instance/docker-compose/multiple-outputs.md) ### Clusters `gnmic` can also be deployed in [clustered mode](../user_guide/HA.md) to either load share the targets connections between multiple instances and offer connection resiliency, and/or replicate the collected data among all the cluster members - InfluxDB output: [clab](clusters/containerlab/cluster_with_influxdb_output.md), [docker-compose](clusters/docker-compose/cluster_with_influxdb_output.md) - Prometheus output: [clab](clusters/containerlab/cluster_with_prometheus_output.md), [docker-compose](clusters/docker-compose/cluster_with_prometheus_output.md) - Prometheus output with data replication: [clab](clusters/containerlab/cluster_with_nats_input_and_prometheus_output.md), [docker-compose](clusters/docker-compose/cluster_with_nats_input_and_prometheus_output.md) ### Pipelines Building data pipelines using `gnmic` is achieved using the [outputs](../user_guide/outputs/output_intro.md) and [inputs](../user_guide/inputs/input_intro.md) plugins. You will be able to process the data in a serial fashion, split it for parallel processing or mirror it to create a forked pipeline. - NATS to Prometheus: [docker-compose](pipelines/docker-compose/nats_prometheus.md) - NATS to InfluxDB: [docker-compose](pipelines/docker-compose/nats_influxdb.md) - Clustered pipeline: [docker-compose](pipelines/docker-compose/gnmic_cluster_nats_prometheus.md) - Forked pipeline: [docker-compose](pipelines/docker-compose/forked_pipeline.md) ================================================ FILE: docs/deployments/pipelines/docker-compose/forked_pipeline.md ================================================ The purpose of this deployment is to create a forked data pipeline using `NATS` , `Influxdb` and `Prometheus` The example includes 3 `gnmic` instances. - The first, called `collector`, is responsible for streaming the gNMI data from the targets and output it to a `NATS` server. - The second and third, called `relay1` and `relay2`, reads the data from `NATS` and writes it to either `InfluxDB` or `Prometheus` This deployment enables a few use cases: - Apply different [processors](../../../user_guide/event_processors/intro.md) by the collector and relay. - Scale the collector and relay separately, see this [example](gnmic_cluster_nats_prometheus.md) for a scaled-out version. - Fork the data into a separate pipeline for a different use case.
Deployment files: - [docker compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/docker-compose/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/docker-compose.yaml) - [gnmic collector config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/docker-compose/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/gnmic-collector.yaml) - [gnmic relay1 config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/docker-compose/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/gnmic-relay1.yaml) - [gnmic relay2 config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/docker-compose/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/gnmic-relay2.yaml) - [prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/docker-compose/3.pipelines//4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/prometheus/prometheus.yaml) Download the files, update the `gnmic` collector config files with the desired subscriptions and targets. Deploy it with: ```bash sudo docker-compose up -d ``` Check the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) and [NATS Input](../../../user_guide/inputs/nats_input.md) documentation page for more configuration options ================================================ FILE: docs/deployments/pipelines/docker-compose/gnmic_cluster_nats_prometheus.md ================================================ The purpose of this deployment is to create a clustered data pipeline using `NATS` and `Prometheus`. Achieving __redundancy__, __high-availability__ and __data replication__, all in clustered data pipeline. The example is divided in 2 parts: - Clustered collectors and single relay - Clustered collectors and clustered relays These 2 examples are essentially scaled-out versions of this [example](nats_prometheus.md) ### Clustered collectors and single relay
Deployment files: - [docker compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/docker-compose.yaml) - [gnmic collector config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/gnmic-collector.yaml) - [gnmic relay config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/gnmic-relay.yaml) - [prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/prometheus/prometheus.yaml) Download the files, update the `gnmic` collectors config files with the desired subscriptions and targets. Deploy it with: ```bash sudo docker-compose up -d ``` Check the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) and [NATS Input](../../../user_guide/inputs/nats_input.md) documentation page for more configuration options ### Clustered collectors and clustered relays
Deployment files: - [docker compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/docker-compose.yaml) - [gnmic collector config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/gnmic-collector.yaml) - [gnmic relay config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/gnmic-relay.yaml) - [prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/prometheus/prometheus.yaml) Download the files, update the `gnmic` collectors config files with the desired subscriptions and targets. Deploy it with: ```bash sudo docker-compose up -d ``` Check the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) and [NATS Input](../../../user_guide/inputs/nats_input.md) documentation page for more configuration options ================================================ FILE: docs/deployments/pipelines/docker-compose/nats_influxdb.md ================================================ The purpose of this deployment is to create data pipeline using `NATS` and `InfluxDB` The example includes 2 `gnmic` instances. - The first, called `collector`, is responsible for streaming the gNMI data from the targets and output it to a `NATS` server. - The second, called `relay`, reads the data from `NATS` and writes it to `InfluxDB` This deployment enables a few use cases: - Apply different [processors](../../../user_guide/event_processors/intro.md) by the collector and relay. - Scale the collector and relay separately, see this [example](gnmic_cluster_nats_prometheus.md) for a scaled-out version. - Fork the data into a separate pipeline for a different use case.
Deployment files: - [docker compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/docker-compose/docker-compose.yaml) - [gnmic collector config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/docker-compose/gnmic-collector.yaml) - [gnmic relay config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/docker-compose/gnmic-relay.yaml) Download the files, update the `gnmic` collector config files with the desired subscriptions and targets. Deploy it with: ```bash sudo docker-compose up -d ``` Check the [InfluxDB Output](../../../user_guide/outputs/influxdb_output.md) and [NATS Input](../../../user_guide/inputs/nats_input.md) documentation page for more configuration options ================================================ FILE: docs/deployments/pipelines/docker-compose/nats_prometheus.md ================================================ The purpose of this deployment is to create data pipeline using `NATS` and `Prometheus` The example includes 2 `gnmic` instances. - The first, called `collector`, is responsible for streaming the gNMI data from the targets and output it to a `NATS` server. - The second, called `relay`, reads the data from `NATS` and writes it to `Prometheus` This deployment enables a few use cases: - Apply different [processors](../../../user_guide/event_processors/intro.md) by the collector and relay. - Scale the collector and relay separately, see this [example](gnmic_cluster_nats_prometheus.md) for a scaled-out version. - Fork the data into a separate pipeline for a different use case.
Deployment files: - [docker compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/docker-compose.yaml) - [gnmic collector config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/gnmic-collector.yaml) - [gnmic relay config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/gnmic-relay.yaml) Download the files, update the `gnmic` collector config files with the desired subscriptions and targets. Deploy it with: ```bash sudo docker-compose up -d ``` Check the [Prometheus Output](../../../user_guide/outputs/prometheus_output.md) and [NATS Input](../../../user_guide/inputs/nats_input.md) documentation page for more configuration options ================================================ FILE: docs/deployments/single-instance/containerlab/influxdb-output.md ================================================ The purpose of this deployment is to collect gNMI data and write it to an `InfluxDB` instance. This deployment example includes a single `gnmic` instance, a single [InfluxDB](https://www.influxdata.com/) server acting as an [InfluxDB output](../../../user_guide/outputs/influxdb_output.md) and a [Grafana](https://grafana.com/docs/) server
Deployment files: - [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/3.influxdb-output/containerlab/influxdb.clab.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/3.influxdb-output/containerlab/gnmic.yaml) - [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/3.influxdb-output/containerlab/grafana/datasources/datasource.yaml) The deployed SR Linux nodes are discovered using Docker API and are loaded as gNMI targets. Edit the subscriptions section if needed. Deploy it with: ```bash git clone https://github.com/openconfig/gnmic.git cd gnmic/examples/deployments/1.single-instance/3.influxdb-output/containerlab sudo clab deploy -t influxdb.clab.yaml ``` ```text +---+---------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+ | # | Name | Container ID | Image | Kind | Group | State | IPv4 Address | IPv6 Address | +---+---------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+ | 1 | clab-lab13-gnmic | 1ee4c75ff443 | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.3/24 | 2001:172:20:20::3/64 | | 2 | clab-lab13-grafana | a932207780bb | grafana/grafana:latest | linux | | running | 172.20.20.2/24 | 2001:172:20:20::2/64 | | 3 | clab-lab13-influxdb | 0768ba6ca10b | influxdb:latest | linux | | running | 172.20.20.4/24 | 2001:172:20:20::4/64 | | 4 | clab-lab13-leaf1 | e0e2045fca7f | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.7/24 | 2001:172:20:20::7/64 | | 5 | clab-lab13-leaf2 | 75b8978e734c | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.6/24 | 2001:172:20:20::6/64 | | 6 | clab-lab13-leaf3 | 7b03eed78f5d | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.5/24 | 2001:172:20:20::5/64 | | 7 | clab-lab13-leaf4 | 19007ce81e04 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.8/24 | 2001:172:20:20::8/64 | | 8 | clab-lab13-spine1 | c044fc51196d | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.10/24 | 2001:172:20:20::a/64 | | 9 | clab-lab13-spine2 | bcfa52ad2772 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.9/24 | 2001:172:20:20::9/64 | +---+---------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+ ``` Check the [InfluxDB Output](../../../user_guide/outputs/influxdb_output.md) documentation page for more configuration options. ================================================ FILE: docs/deployments/single-instance/containerlab/kafka-output.md ================================================ The purpose of this deployment is to collect gNMI data and write it to a `Kafka` broker. Multiple 3rd Party systems (acting as a Kafka consumers) can then read the data from the `Kafka` broker for further processing. This deployment example includes a single `gnmic` instance and a single [Kafka output](../../../user_guide/outputs/kafka_output.md)
Deployment files: - [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/2.kafka-output/containerlab/kafka.clab.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/2.kafka-output/containerlab/gnmic.yaml) The deployed SR Linux nodes are discovered using Docker API and are loaded as gNMI targets. Edit the subscriptions section if needed. Deploy it with: ```bash git clone https://github.com/openconfig/gnmic.git cd gnmic/examples/deployments/1.single-instance/2.kafka-output/containerlab sudo clab deploy -t kafka.clab.yaml ``` ```text +---+-----------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+ | # | Name | Container ID | Image | Kind | Group | State | IPv4 Address | IPv6 Address | +---+-----------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+ | 1 | clab-lab12-gnmic | e79d31f92a7a | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.2/24 | 2001:172:20:20::2/64 | | 2 | clab-lab12-kafka-server | 004a338cdb3d | bitnami/kafka:latest | linux | | running | 172.20.20.4/24 | 2001:172:20:20::4/64 | | 3 | clab-lab12-leaf1 | b9269bac3adf | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.7/24 | 2001:172:20:20::7/64 | | 4 | clab-lab12-leaf2 | baaeea0ad1a6 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.8/24 | 2001:172:20:20::8/64 | | 5 | clab-lab12-leaf3 | 08127014b3cd | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.5/24 | 2001:172:20:20::5/64 | | 6 | clab-lab12-leaf4 | da037997c5ff | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.10/24 | 2001:172:20:20::a/64 | | 7 | clab-lab12-spine1 | c3bcfe40fcc7 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.9/24 | 2001:172:20:20::9/64 | | 8 | clab-lab12-spine2 | 842b259d01b0 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.6/24 | 2001:172:20:20::6/64 | | 9 | clab-lab12-zookeeper-server | 5c89e48fdff1 | bitnami/zookeeper:latest | linux | | running | 172.20.20.3/24 | 2001:172:20:20::3/64 | +---+-----------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+ ``` Check the [Kafka Output](../../../user_guide/outputs/kafka_output.md) documentation page for more configuration options. ================================================ FILE: docs/deployments/single-instance/containerlab/multiple-outputs.md ================================================ The purpose of this deployment is to collect gNMI data and write it to multiple outputs. This deployment example includes: - A single `gnmic` instance - A [Prometheus](../../../user_guide/outputs/prometheus_output.md) Server - An [InfluxDB](../../../user_guide/outputs/influxdb_output.md) Server - A [NATS](../../../user_guide/outputs/nats_output.md) Server - A [Kafka](../../../user_guide/outputs/kafka_output.md) Server - A [File](../../../user_guide/outputs/file_output.md) output - A [Consul Agent](https://www.consul.io/docs/agent) - A [Grafana Server](https://grafana.com/docs/)
Deployment files: - [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/containerlab/multiple-outputs.clab.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/containerlab/gnmic.yaml) - [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/containerlab/prometheus/prometheus.yaml) - [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/containerlab/grafana/datasources/datasource.yaml) Deploy it with: ```bash git clone https://github.com/openconfig/gnmic.git cd gnmic/examples/deployments/1.single-instance/5.multiple-outputs/containerlab sudo clab deploy -t multiple-outputs.clab.yaml ``` ```text +----+-----------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+ | # | Name | Container ID | Image | Kind | Group | State | IPv4 Address | IPv6 Address | +----+-----------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+ | 1 | clab-lab15-consul-agent | 14f864fb1da9 | consul:latest | linux | | running | 172.20.20.4/24 | 2001:172:20:20::4/64 | | 2 | clab-lab15-gnmic | cfb8bfca7547 | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.6/24 | 2001:172:20:20::6/64 | | 3 | clab-lab15-grafana | 56c19565e27c | grafana/grafana:latest | linux | | running | 172.20.20.2/24 | 2001:172:20:20::2/64 | | 4 | clab-lab15-influxdb | f2d0b2186e10 | influxdb:latest | linux | | running | 172.20.20.9/24 | 2001:172:20:20::9/64 | | 5 | clab-lab15-kafka-server | efe445dbf0f0 | bitnami/kafka:latest | linux | | running | 172.20.20.7/24 | 2001:172:20:20::7/64 | | 6 | clab-lab15-leaf1 | 42d57c79385e | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.10/24 | 2001:172:20:20::a/64 | | 7 | clab-lab15-leaf2 | e4b041046779 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.11/24 | 2001:172:20:20::b/64 | | 8 | clab-lab15-leaf3 | ba87204f2678 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.13/24 | 2001:172:20:20::d/64 | | 9 | clab-lab15-leaf4 | 327461ee913e | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.15/24 | 2001:172:20:20::f/64 | | 10 | clab-lab15-nats | 0363dae05edf | nats:latest | linux | | running | 172.20.20.3/24 | 2001:172:20:20::3/64 | | 11 | clab-lab15-prometheus | 44611ebe4a03 | prom/prometheus:latest | linux | | running | 172.20.20.8/24 | 2001:172:20:20::8/64 | | 12 | clab-lab15-spine1 | 8b2b430eea87 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.12/24 | 2001:172:20:20::c/64 | | 13 | clab-lab15-spine2 | 425bea3a243e | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.14/24 | 2001:172:20:20::e/64 | | 14 | clab-lab15-zookeeper-server | 91b546eb7bf9 | bitnami/zookeeper:latest | linux | | running | 172.20.20.5/24 | 2001:172:20:20::5/64 | +----+-----------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+ ``` Check the [gnmic outputs](../../../user_guide/outputs/output_intro.md) documentation page for more configuration options. ================================================ FILE: docs/deployments/single-instance/containerlab/nats-output.md ================================================ The purpose of this deployment is to collect gNMI data and write it to a `NATS` server. Multiple 3rd Party systems (acting as a NATS clients) can then read the data from the `NATS` server for further processing. This deployment example includes a single `gnmic` instance and a single [NATS output](../../../user_guide/outputs/nats_output.md)
Deployment files: - [containerlab](https://github.com/openconfig/gnmic/tree/main/examples/deployments/1.single-instance/1.nats-output/containerlab/nats.clab.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/tree/main/examples/deployments/1.single-instance/1.nats-output/containerlab/gnmic.yaml) The deployed SR Linux nodes are discovered using Docker API and are loaded as gNMI targets. Edit the subscriptions section if needed. Deploy it with: ```bash git clone https://github.com/openconfig/gnmic.git cd gnmic/examples/deployments/1.single-instance/1.nats-output/containerlab sudo clab deploy -t nats.clab.yaml ``` ```text +---+-------------------+--------------+------------------------------+-------+-------+---------+----------------+----------------------+ | # | Name | Container ID | Image | Kind | Group | State | IPv4 Address | IPv6 Address | +---+-------------------+--------------+------------------------------+-------+-------+---------+----------------+----------------------+ | 1 | clab-lab11-gnmic | 955eaa35b730 | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.3/24 | 2001:172:20:20::3/64 | | 2 | clab-lab11-leaf1 | f0f61a79124e | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.4/24 | 2001:172:20:20::4/64 | | 3 | clab-lab11-leaf2 | de714ee79856 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.9/24 | 2001:172:20:20::9/64 | | 4 | clab-lab11-leaf3 | c674b7bbb898 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.8/24 | 2001:172:20:20::8/64 | | 5 | clab-lab11-leaf4 | c37033f30e99 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.7/24 | 2001:172:20:20::7/64 | | 6 | clab-lab11-nats | ebbd346d2aee | nats:latest | linux | | running | 172.20.20.2/24 | 2001:172:20:20::2/64 | | 7 | clab-lab11-spine1 | 0fe91271bdfe | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.6/24 | 2001:172:20:20::6/64 | | 8 | clab-lab11-spine2 | 6b05f4e42cc4 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.5/24 | 2001:172:20:20::5/64 | +---+-------------------+--------------+------------------------------+-------+-------+---------+----------------+----------------------+ ``` Check the [NATS Output](../../../user_guide/outputs/nats_output.md) documentation page for more configuration options. ================================================ FILE: docs/deployments/single-instance/containerlab/prometheus-output.md ================================================ The purpose of this deployment is to collect gNMI data and make it available for scraping by a `Prometheus` client. This deployment example includes a single `gnmic` instance, a [Prometheus Server](https://prometheus.io/), a [Consul agent](https://www.consul.io/docs/agent) used by Prometheus to discover gNMIc's [Prometheus output](../../../user_guide/outputs/prometheus_output.md) and a [Grafana](https://grafana.com/docs/) server.
Deployment files: - [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/containerlab/prometheus.clab.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/containerlab/gnmic.yaml) - [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/containerlab/prometheus/prometheus.yaml) - [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/containerlab/grafana/datasources/datasource.yaml) The deployed SR Linux nodes are discovered using Docker API and are loaded as gNMI targets. Edit the subscriptions section if needed. Deploy it with: ```bash git clone https://github.com/openconfig/gnmic.git cd gnmic/examples/deployments/1.single-instance/4.prometheus-output/containerlab sudo clab deploy -t prometheus.clab.yaml ``` ```text +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+ | # | Name | Container ID | Image | Kind | Group | State | IPv4 Address | IPv6 Address | +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+ | 1 | clab-lab14-consul-agent | e402b0516753 | consul:latest | linux | | running | 172.20.20.4/24 | 2001:172:20:20::4/64 | | 2 | clab-lab14-gnmic | 53943cdb8cde | ghcr.io/openconfig/gnmic:latest | linux | | running | 172.20.20.3/24 | 2001:172:20:20::3/64 | | 3 | clab-lab14-grafana | 1a57efb74f37 | grafana/grafana:latest | linux | | running | 172.20.20.2/24 | 2001:172:20:20::2/64 | | 4 | clab-lab14-leaf1 | 8343848fbd7a | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.9/24 | 2001:172:20:20::9/64 | | 5 | clab-lab14-leaf2 | 9986ff987048 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.8/24 | 2001:172:20:20::8/64 | | 6 | clab-lab14-leaf3 | 25a212fcb7a1 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.11/24 | 2001:172:20:20::b/64 | | 7 | clab-lab14-leaf4 | 025373e9f192 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.10/24 | 2001:172:20:20::a/64 | | 8 | clab-lab14-prometheus | ae9b47c49c8d | prom/prometheus:latest | linux | | running | 172.20.20.5/24 | 2001:172:20:20::5/64 | | 9 | clab-lab14-spine1 | fb9abd5b4c5c | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.7/24 | 2001:172:20:20::7/64 | | 10 | clab-lab14-spine2 | f32906f19d55 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.6/24 | 2001:172:20:20::6/64 | +----+-------------------------+--------------+------------------------------+-------+-------+---------+-----------------+----------------------+ ``` Check the [Prometheus output](../../../user_guide/outputs/prometheus_output.md) documentation page for more configuration options. ================================================ FILE: docs/deployments/single-instance/containerlab/prometheus-remote-write-output.md ================================================ The purpose of this deployment is to collect gNMI data and use [Prometheus remote write API](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) to push it to different monitoring systems like [Prometheus](https://prometheus.io), [Mimir](https://grafana.com/oss/mimir/), [CortexMetrics](https://cortexmetrics.io/), [VictoriaMetrics](https://victoriametrics.com/), [Thanos](https://thanos.io/)... This deployment example includes a single `gnmic` instance, a [Prometheus Server](https://prometheus.io/), and a [Grafana](https://grafana.com/docs/) server.
Deployment files: - [containerlab](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/prom_write.clab.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/gnmic.yaml) - [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/prometheus/prometheus.yaml) - [Grafana datasource](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/grafana/datasources/datasource.yaml) The deployed SR Linux nodes are discovered using Docker API and are loaded as gNMI targets. Edit the subscriptions section if needed. Deploy it with: ```bash git clone https://github.com/openconfig/gnmic.git cd gnmic/examples/deployments/1.single-instance/6.prometheus-write-output/containerlab sudo clab deploy -t prometheus.clab.yaml ``` ```text +----+-------------------------+--------------+------------------------------+-------+---------+-----------------+--------------+ | # | Name | Container ID | Image | Kind | State | IPv4 Address | IPv6 Address | +----+-------------------------+--------------+------------------------------+-------+---------+-----------------+--------------+ | 1 | clab-lab16-consul-agent | 10054b55e722 | consul:latest | linux | running | 172.19.19.3/24 | N/A | | 2 | clab-lab16-gnmic | 1eeab0771731 | ghcr.io/openconfig/gnmic:latest | linux | running | 172.19.19.5/24 | N/A | | 3 | clab-lab16-grafana | fd09146937ef | grafana/grafana:latest | linux | running | 172.19.19.2/24 | N/A | | 4 | clab-lab16-leaf1 | 0c8f5bf7bafb | ghcr.io/nokia/srlinux | srl | running | 172.19.19.11/24 | N/A | | 5 | clab-lab16-leaf2 | a33868bef0a3 | ghcr.io/nokia/srlinux | srl | running | 172.19.19.9/24 | N/A | | 6 | clab-lab16-leaf3 | 3fb3b459cd48 | ghcr.io/nokia/srlinux | srl | running | 172.19.19.10/24 | N/A | | 7 | clab-lab16-leaf4 | bb2cbc064b05 | ghcr.io/nokia/srlinux | srl | running | 172.19.19.6/24 | N/A | | 8 | clab-lab16-prometheus | 63b6fb1551de | prom/prometheus:latest | linux | running | 172.19.19.4/24 | N/A | | 9 | clab-lab16-spine1 | 76853ab9c4a8 | ghcr.io/nokia/srlinux | srl | running | 172.19.19.8/24 | N/A | | 10 | clab-lab16-spine2 | fdf42ca0fec1 | ghcr.io/nokia/srlinux | srl | running | 172.19.19.7/24 | N/A | +----+-------------------------+--------------+------------------------------+-------+---------+-----------------+--------------+ ``` Check the [Prometheus Remote Write output](../../../user_guide/outputs/prometheus_write_output.md) documentation page for more configuration options. ================================================ FILE: docs/deployments/single-instance/docker-compose/influxdb-output.md ================================================ The purpose of this deployment is to collect gNMI data and write it to an `InfluxDB` instance. This deployment example includes a single `gnmic` instance and a single [InfluxDB output](../../../user_guide/outputs/influxdb_output.md)
Deployment files: - [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/3.influxdb-output/docker-compose/docker-compose.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/3.influxdb-output/docker-compose/gnmic1.yaml) Download both files, update the `gnmic` config file with the desired subscriptions and targets. Deploy it with: ```bash sudo docker-compose up -d ``` Check the [InfluxDB Output](../../../user_guide/outputs/influxdb_output.md) documentation page for more configuration options ================================================ FILE: docs/deployments/single-instance/docker-compose/kafka-output.md ================================================ The purpose of this deployment is to collect gNMI data and write it to a `Kafka` broker. Multiple 3rd Party systems (acting as a Kafka consumers) can then read the data from the `Kafka` broker for further processing. This deployment example includes a single `gnmic` instance and a single [Kafka output](../../../user_guide/outputs/kafka_output.md)
Deployment files: - [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/2.kafka-output/docker-compose/docker-compose.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/2.kafka-output/docker-compose/gnmic1.yaml) Download both files, update the `gnmic` config file with the desired subscriptions and targets. Deploy it with: ```bash sudo docker-compose up -d ``` Check the [Kafka Output](../../../user_guide/outputs/kafka_output.md) documentation page for more configuration options ================================================ FILE: docs/deployments/single-instance/docker-compose/multiple-outputs.md ================================================ The purpose of this deployment is to collect gNMI data and write it to multiple outputs. This deployment example includes: - A single `gnmic` instance - A [Prometheus output](../../../user_guide/outputs/prometheus_output.md) - An [InfluxDB output](../../../user_guide/outputs/influxdb_output.md) - A [NATS output](../../../user_guide/outputs/nats_output.md) - A [Kafka output](../../../user_guide/outputs/kafka_output.md) - A [File output](../../../user_guide/outputs/file_output.md)
Deployment files: - [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/docker-compose/docker-compose.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/docker-compose/gnmic1.yaml) - [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/5.multiple-outputs/docker-compose/prometheus/prometheus.yaml) Download both files, update the `gnmic` config file with the desired subscriptions and targets. Deploy it with: ```bash sudo docker-compose up -d ``` Check the [gnmic outputs](../../../user_guide/outputs/output_intro.md) documentation page for more configuration options ================================================ FILE: docs/deployments/single-instance/docker-compose/nats-output.md ================================================ The purpose of this deployment is to collect gNMI data and write it to a `NATS` server. Multiple 3rd Party systems (acting as a NATS clients) can then read the data from the `NATS` server for further processing. This deployment example includes a single `gnmic` instance and a single [NATS output](../../../user_guide/outputs/nats_output.md)
Deployment files: - [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/1.nats-output/docker-compose/docker-compose.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/1.nats-output/docker-compose/gnmic1.yaml) Download both files, update the `gnmic` config file with the desired subscriptions and targets. Deploy it with: ```bash sudo docker-compose up -d ``` Check the [NATS Output](../../../user_guide/outputs/nats_output.md) documentation page for more configuration options ================================================ FILE: docs/deployments/single-instance/docker-compose/prometheus-output.md ================================================ The purpose of this deployment is to collect gNMI data and make it available for scraping by a `Prometheus` client. This deployment example includes a single `gnmic` instance and a single [Prometheus output](../../../user_guide/outputs/prometheus_output.md)
Deployment files: - [Docker Compose](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/docker-compose/docker-compose.yaml) - [gNMIc config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/docker-compose/gnmic1.yaml) - [Prometheus config](https://github.com/openconfig/gnmic/blob/main/examples/deployments/1.single-instance/4.prometheus-output/docker-compose/prometheus/prometheus.yaml) Download both files, update the `gnmic` config file with the desired subscriptions and targets. Deploy it with: ```bash sudo docker-compose up -d ``` Check the [Prometheus output](../../../user_guide/outputs/prometheus_output.md) documentation page for more configuration options ================================================ FILE: docs/global_flags.md ================================================ ### address The address flag `[-a | --address]` is used to specify the target's gNMI server address in address:port format, for e.g: `192.168.113.11:57400` Multiple target addresses can be specified, either as comma separated values: ```bash gnmic --address 192.168.113.11:57400,192.168.113.12:57400 ``` or by using the `--address` flag multiple times: ```bash gnmic -a 192.168.113.11:57400 --address 192.168.113.12:57400 ``` ### auth-scheme The auth-scheme flag `--auth-scheme` is used to specify the authorization header type. For example, if `auth-scheme` is set to `Basic`, the gNMI requests headers will include an `Authorization` header with value `Basic base64enc(username:password)`. ### cluster-name The `[--cluster-name]` flag is used to specify the cluster name the `gnmic` instance will join. The cluster name is used as part of the locked keys to share targets between multiple gnmic instances. Defaults to `default-cluster` ### config The `--config` flag specifies the location of a configuration file that `gnmic` will read. If not specified, gnmic searches for a file named `.gnmic` with extensions `yaml, yml, toml or json` in the following locations: * `$PWD` * `$HOME` * `$XDG_CONFIG_HOME` * `$XDG_CONFIG_HOME/gnmic` ### debug The debug flag `[-d | --debug]` enables the printing of extra information when sending/receiving an RPC ### dir A path to a directory which `gnmic` would recursively traverse in search for the additional YANG files which may be required by YANG files specified with `--file` to build the YANG tree. Can also point to a single YANG file instead of a directory. Multiple `--dir` flags can be supplied. ### encoding The encoding flag `[-e | --encoding]` is used to specify the [gNMI encoding](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#23-structured-data-types) of the Update part of a [Notification](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#21-reusable-notification-message-format) message. It is case insensitive and must be one of: JSON, BYTES, PROTO, ASCII, JSON_IETF ### exclude The `--exclude` flag specifies the YANG module __names__ to be excluded from the tree generation when YANG modules names clash. Multiple `--exclude` flags can be supplied. ### file A path to a YANG file or a directory with YANG files which `gnmic` will use with prompt, generate and path commands. Multiple `--file` flags can be supplied. ### format Five output formats can be configured by means of the `--format` flag. `[proto, protojson, prototext, json, event]` The default format is `json`. The `proto` format outputs the gnmi message as raw bytes, this value is not allowed when the output type is file (file system, stdout or stderr) see [outputs](user_guide/outputs/output_intro.md) The `prototext` and `protojson` formats are the message representation as defined in [prototext](https://godoc.org/google.golang.org/protobuf/encoding/prototext) and [protojson](https://godoc.org/google.golang.org/protobuf/encoding/protojson) The `event` format emits the received gNMI SubscribeResponse updates and deletes as a list of events tagged with the keys present in the subscribe path (as well as some metadata) and a timestamp Here goes an example of the same response emitted to stdout in the respective formats: === "protojson" ```json { "update": { "timestamp": "1595584408456503938", "prefix": { "elem": [ { "name": "state" }, { "name": "system" }, { "name": "version" } ] }, "update": [ { "path": { "elem": [ { "name": "version-string" } ] }, "val": { "stringVal": "TiMOS-B-20.5.R1 both/x86_64 Nokia 7750 SR Copyright (c) 2000-2020 Nokia.\r\nAll rights reserved. All use subject to applicable license agreements.\r\nBuilt on Wed May 13 14:08:50 PDT 2020 by builder in /builds/c/205B/R1/panos/main/sros" } } ] } } ``` === "prototext" ```yaml update: { timestamp: 1595584168675434221 prefix: { elem: { name: "state" } elem: { name: "system" } elem: { name: "version" } } update: { path: { elem: { name: "version-string" } } val: { string_val: "TiMOS-B-20.5.R1 both/x86_64 Nokia 7750 SR Copyright (c) 2000-2020 Nokia.\r\nAll rights reserved. All use subject to applicable license agreements.\r\nBuilt on Wed May 13 14:08:50 PDT 2020 by builder in /builds/c/205B/R1/panos/main/sros" } } } ``` === "json" ```json { "source": "172.17.0.100:57400", "subscription-name": "default", "timestamp": 1595584326775141151, "time": "2020-07-24T17:52:06.775141151+08:00", "prefix": "state/system/version", "updates": [ { "Path": "version-string", "values": { "version-string": "TiMOS-B-20.5.R1 both/x86_64 Nokia 7750 SR Copyright (c) 2000-2020 Nokia.\r\nAll rights reserved. All use subject to applicable license agreements.\r\nBuilt on Wed May 13 14:08:50 PDT 2020 by builder in /builds/c/205B/R1/panos/main/sros" } } ] } ``` === "event" ```json [ { "name": "default", "timestamp": 1595584587725708234, "tags": { "source": "172.17.0.100:57400", "subscription-name": "default" }, "values": { "/state/system/version/version-string": "TiMOS-B-20.5.R1 both/x86_64 Nokia 7750 SR Copyright (c) 2000-2020 Nokia.\r\nAll rights reserved. All use subject to applicable license agreements.\r\nBuilt on Wed May 13 14:08:50 PDT 2020 by builder in /builds/c/205B/R1/panos/main/sros" } } ] ``` ### gzip The `[--gzip]` flag enables gRPC gzip compression. ### insecure The insecure flag `[--insecure]` is used to indicate that the client wishes to establish an non-TLS enabled gRPC connection. To disable certificate validation in a TLS-enabled connection use [`skip-verify`](#skip-verify) flag. ### instance-name The `[--instance-name]` flag is used to give a unique name to the running `gnmic` instance. This is useful when there are multiple instances of `gnmic` running at the same time, either for high-availability and/or scalability ### log The `--log` flag enables log messages to appear on stderr output. By default logging is disabled. ### log-file The log-file flag `[--log-file ]` sets the log output to a file referenced by the path. This flag supersede the `--log` flag ### log-max-size The `[--log-max-size]` flag enables log rotation and sets the maximum size of the log file in megabytes before it gets rotated. ### log-max-backups The `[--log-max-backups]` flag sets the maximum number of old log files to retain. The default is to retain all old log files. ### log-compress The `[--log-compress]` flag determines if the rotated log files should be compressed using gzip. The default is not to perform compression. ### no-prefix The no prefix flag `[--no-prefix]` disables prefixing the json formatted responses with `[ip:port]` string. Note that in case a single target is specified, the prefix is not added. ### password The password flag `[-p | --password]` is used to specify the target password as part of the user credentials. Note that in case multiple targets are used, all should use the same credentials. ### proto-dir The `[--proto-dir]` flag is used to specify a list of directories where `gnmic` will search for the proto file names specified with `--proto-file`. ### proto-file The `[--proto-file]` flag is used to specify a list of proto file names that `gnmic` will use to decode ProtoBytes values. only Nokia SROS proto is currently supported. ### proxy-from-env The proxy-from-env flag `[--proxy-from-env]` indicates that the gnmic should use the HTTP/HTTPS proxy addresses defined in the environment variables `http_proxy` and `https_proxy` to reach the targets specified using the `--address` flag. ### retry The retry flag `[--retry]` specifies the wait time before each retry. Valid formats: 10s, 1m30s, 1h. Defaults to 10s ### skip-verify The skip verify flag `[--skip-verify]` indicates that the target should skip the signature verification steps, in case a secure connection is used. ### targets-file The `[--targets-file]` flag is used to configure a [file target loader](user_guide/targets/target_discovery/file_discovery.md) ### timeout The timeout flag `[--timeout]` specifies the gRPC timeout after which the connection attempt fails. Valid formats: 10s, 1m30s, 1h. Defaults to 10s ### tls-ca The TLS CA flag `[--tls-ca]` specifies the root certificates for verifying server certificates encoded in PEM format. ### tls-cert The TLS cert flag `[--tls-cert]` specifies the public key for the client encoded in PEM format. ### tls-key The TLS key flag `[--tls-key]` specifies the private key for the client encoded in PEM format. ### tls-max-version The TLS max version flag `[--tls-max-version]` specifies the maximum supported TLS version supported by gNMIc when creating a secure gRPC connection. ### tls-min-version The tls min version flag `[--tls-min-version]` specifies the minimum supported TLS version supported by gNMIc when creating a secure gRPC connection. ### tls-server-name The TLS server name flag `[--tls-server-name]` sets the server name to be used when verifying the hostname on the returned certificates unless `--skip-verify` is set. This global flag applies to all targets. ### tls-version The tls version flag `[--tls-version]` specifies a single supported TLS version gNMIc when creating a secure gRPC connection. This flag overwrites the previously listed flags `--tls-max-version` and `--tls-min-version`. ### log-tls-secret The log TLS secret flag `[--log-tls-secret]` makes gnmic to log the per-session pre-master secret so that it can be used to [decrypt TLS](https://gitlab.com/wireshark/wireshark/-/wikis/TLS#tls-decryption) secured gNMI communications with, for example, Wireshark. The secret will be saved to a file named `.tlssecret.log`. ### token The token flag `[--token]` sets a token value to be added to each RPC as an Authorization Bearer Token. Applied only in the case of a secure gRPC connection. ### username The username flag `[-u | --username]` is used to specify the target username as part of the user credentials. ### calculate-latency The `--calculate-latency` flag augments subscribe et get responses by calculating the delta between the message timestamp and the receive timestamp. The resulting message will include 4 extra fields: * `recv-timestamp`:The receive timestamp in nanoseconds. * `recv-time`: The receive time in ISO 8601 date and time representation, extended to include fractional seconds and a time zone offset.. * `latency-nano`: The difference between the message timestamp and the receive time in nanoseconds. * `latency-milli`: The difference between the message timestamp and the receive time in milliseconds. ### metadata The `[-H | --metadata]` flag adds custom headers to any gRPC request. `gnmic -H header1=value1 -H header2=value2` ================================================ FILE: docs/index.md ================================================

[![github release](https://img.shields.io/github/release/openconfig/gnmic.svg?style=flat-square&color=00c9ff&labelColor=bec8d2)](https://github.com/openconfig/gnmic/releases/) [![Github all releases](https://img.shields.io/github/downloads/openconfig/gnmic/total.svg?style=flat-square&color=00c9ff&labelColor=bec8d2)](https://github.com/openconfig/gnmic/releases/) --- `gnmic` _(pronoun.: gee·en·em·eye·see)_ is a gNMI CLI client that provides full support for Capabilities, Get, Set and Subscribe RPCs with collector capabilities. ## Features * **Full support for gNMI RPCs** Every gNMI RPC has a [corresponding command](https://gnmic.openconfig.net/basic_usage/) with all of the RPC options configurable by means of the local and global flags. * **Flexible collector deployment** `gnmic` can be deployed as a gNMI collector that supports multiple output types ([NATS](user_guide/outputs/nats_output.md), [Kafka](user_guide/outputs/kafka_output.md), [Prometheus](user_guide/outputs/prometheus_output.md), [InfluxDB](user_guide/outputs/influxdb_output.md),...). The collector can be deployed either as a [single instance](deployments/deployments_intro/#single-instance), as part of a [cluster](user_guide/HA/), or used to form [data pipelines](deployments/deployments_intro/#pipelines). * **gNMI data manipulation** `gnmic` collector supports [data transformation](user_guide/event_processors/intro/) capabilities that can be used to adapt the collected data to your specific use case. * **Dynamic targets loading** `gnmic` support [target loading at runtime](user_guide/targets/target_discovery/discovery_intro.md) based on input from external systems. * **YANG-based path suggestions** Your CLI magically becomes a YANG browser when `gnmic` is executed in [prompt](user_guide/prompt_suggestions.md) mode. In this mode the flags that take XPATH values will get auto-suggestions based on the provided YANG modules. In other words - voodoo magic :exploding_head: * **Multiple configuration sources** gnmic supports [flags](user_guide/configuration_flags), [environment variables](user_guide/configuration_env/) as well as [file based](https://gnmic.openconfig.net/user_guide/configuration_file/) configurations. * **Multi-target operations** Commands can operate on [multiple gNMI targets](https://gnmic.openconfig.net/user_guide/targets/) for bulk configuration/retrieval/subscription. * **Multiple subscriptions** With file based configuration it is possible to define and configure [multiple subscriptions](https://gnmic.openconfig.net/user_guide/subscriptions/) which can be independently associated with gNMI targets. * **Inspect gNMI messages** With the `textproto` output format and the logging capabilities of `gnmic` you can see the actual gNMI messages being sent/received. Its like having a gNMI looking glass! * **Configurable TLS enforcement** gNMI client supports both TLS and [non-TLS](https://gnmic.openconfig.net/global_flags/#insecure) transports so you can start using it in a lab environment without having to care about the PKI. * **Dial-out telemetry** The [dial-out telemetry server](https://gnmic.openconfig.net/cmd/listen/) is provided for Nokia SR OS. * **Pre-built multi-platform binaries** Statically linked [binaries](https://github.com/openconfig/gnmic/releases) made in our release pipeline are available for major operating systems and architectures. Making [installation](https://gnmic.openconfig.net/install/) a breeze! * **Extensive and friendly documentation** You won't be in need to dive into the source code to understand how `gnmic` works, our [documentation site](https://gnmic.openconfig.net) has you covered. ## Quick start guide ### Installation ``` bash -c "$(curl -sL https://get-gnmic.openconfig.net)" ``` ### Capabilities request ``` gnmic -a 10.1.0.11:57400 -u admin -p admin --insecure capabilities ``` ### Get request ``` gnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \ get --path /state/system/platform ``` ### Set request ``` gnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \ set --update-path /configure/system/name \ --update-value gnmic_demo ``` ### Subscribe request ``` gnmic -a 10.1.0.11:57400 -u admin -p admin --insecure \ sub --path "/state/port[port-id=1/1/c1/1]/statistics/in-packets" ``` ================================================ FILE: docs/install.md ================================================ `gnmic` is a single binary built for the Linux, Mac OS and Windows operating systems distributed via [Github releases](https://github.com/openconfig/gnmic/releases). ### Linux/Mac OS To download & install the latest release the following automated [installation script](https://github.com/openconfig/gnmic/blob/main/install.sh) can be used: ```bash bash -c "$(curl -sL https://get-gnmic.openconfig.net)" ``` As a result, the latest `gnmic` version will be installed in the `/usr/local/bin` directory and the version information will be printed out. ```text Downloading gnmic_0.0.3_Darwin_x86_64.tar.gz... Moving gnmic to /usr/local/bin version : 0.0.3 commit : f541948 date : 2020-04-23T12:06:07Z gitURL : https://github.com/openconfig/gnmic.git docs : https://gnmic.openconfig.net Installation complete! ``` To install a specific version of `gnmic`, provide the version with `-v` flag to the installation script: ```bash bash -c "$(curl -sL https://get-gnmic.openconfig.net)" -- -v 0.5.0 ``` #### Packages Linux users running distributions with support for `deb`/`rpm` packages can install `gnmic` using pre-built packages: ```bash bash -c "$(curl -sL https://get-gnmic.openconfig.net)" -- --use-pkg ``` #### Upgrade To upgrade `gnmic` to the latest version use the `upgrade` command: ```bash # upgrade using binary file gnmic version upgrade # upgrade using package gnmic version upgrade --use-pkg ``` ### Windows Windows users should use [WSL](https://en.wikipedia.org/wiki/Windows_Subsystem_for_Linux) on Windows and install the linux version of the tool. ### Docker The `gnmic` container image can be pulled from Dockerhub or GitHub container registries. The tag of the image corresponds to the release version and `latest` tag points to the latest available release: ```bash # pull latest release from dockerhub docker pull gnmic/gnmic:latest # pull a specific release from dockerhub docker pull gnmic/gnmic:0.7.0 # pull latest release from github registry docker pull ghcr.io/openconfig/gnmic:latest # pull a specific release from github registry docker pull ghcr.io/openconfig/gnmic:0.5.2 ``` Example running `gnmic get` command using the docker image: ```bash docker run \ --network host \ --rm ghcr.io/openconfig/gnmic get --log --username admin --password admin --insecure --address router1.local --path /interfaces ``` ### Docker Compose `gnmic` docker-compose file example: ```yaml version: '2' networks: gnmic-net: driver: bridge services: gnmic-1: image: ghcr.io/openconfig/gnmic:latest container_name: gnmic-1 networks: - gnmic-net volumes: - ./gnmic.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" ``` See [here](deployments/deployments_intro.md) for more deployment options ================================================ FILE: docs/stylesheets/extra.css ================================================ .md-typeset code { background-color: transparent ; } ================================================ FILE: docs/user_guide/HA.md ================================================ Multiple instances of`gnmic` can be run in clustered mode in order to load share the targets connections and protect against failures. The cluster mode allows `gnmic` to scale and be highly available at the same time To join the cluster, the instances rely on a service discovery system and distributed KV store such as `Consul`, ### Clustering process At startup, all instances belonging to a cluster: * Enter an election process in order to become the cluster leader. * Register their API service `gnmic-api` in a configured service discovery system. Upon becoming the leader: * The `gnmic` instance starts watching the registered `gnmic-api` services, and maintains a local cache of the active ones. These are essentially the instances restAPI addresses. * The leader then waits for `clustering/leader-wait-timer` to allow the other instances to register their API services as well. This is useful in case an instance is slow to boot, which leaves it out of the initial load sharing process. * The leader then enters a "target watch loop" (`clustering/targets-watch-timer`), at each iteration the leader tries to determine if all configured targets are handled by an instance of the cluster, this is done by checking if there is a lock maintained for each configured target. The instances which failed to become the leader, continue to try to acquire the leader lock. ### Target distribution process If the leader detects that a target does not have a lock, it triggers the target distribution process: * Query all the targets keys from the KV store and calculate each instance load (number of maintained gNMI targets). * If the target configuration includes `tags`, the leader selects the instance with the most matching tags (in order). If multiple instances have the same matching tags, the one with the lowest load is selected. * If the target doesn't have configured tags, the leader simply select the least loaded instance to handle the target's subscriptions. * Retrieve the selected instance API address from the local services cache. * Send both the target configuration as well as a target activation action to the selected instance. When a cluster instance gets assigned a target (target activation): * Acquire a key lock for that specific target. * Once the lock is acquired, create the configured gNMI subscriptions. * Maintain the target lock for the duration of the gNMI subscription. The whole target distribution process is repeated for each target missing a lock. ### Configuration The cluster configuration is as simple as: ```yaml # rest api address, format "address:port" api: "" # clustering related configuration fields clustering: # the cluster name, tells with instances belong to the same cluster # it is used as part of the leader key lock, and the targets key locks # if no value is configured, the value from flag --cluster-name is used. # if the flag has the empty string as value, "default-cluster" is used. cluster-name: default-cluster # unique instance name within the cluster, # used as the value in the target locks, # used as the value in the leader lock. # if no value is configured, the value from flag --instance-name is used. # if the flag has the empty string as value, a value is generated in # the format `gnmic-$UUID` instance-name: "" # service address to be registered in the locker(Consul) # if not defined, it defaults to the address part of the API address:port service-address: "" # gnmic instances API service watch timer # this is a long timer used by the cluster leader # in a consul long-blocking query: # https://www.consul.io/api-docs/features/blocking#implementation-details services-watch-timer: 60s # targets-watch-timer, targets watch timer, duration the leader waits # between consecutive targets distributions targets-watch-timer: 20s # target-assignment-timeout, max time a leader waits for an instance to # lock an assigned target. # if the timeout is reached the leader unassigns the target and reselects # a different instance. target-assignment-timeout: 10s # leader wait timer, allows to configure a wait time after an instance # acquires the leader key. # this wait time goal is to give more chances to other instances to register # their API services before the target distribution starts leader-wait-timer: 5s # ordered list of strings to be added as tags during api service # registration in addition to `cluster-name=${cluster-name}` and # `instance-name=${instance-name}` tags: [] # locker is used to configure the KV store used for # service registration, service discovery, leader election and targets locks locker: # type of locker, only consul is supported currently type: consul # address of the locker server address: localhost:8500 # Consul Data center, defaults to dc1 datacenter: # Consul username, to be used as part of HTTP basicAuth username: # Consul password, to be used as part of HTTP basicAuth password: # Consul Token, is used to provide a per-request ACL token which overrides # the agent's default token token: # session-ttl, session time-to-live after which a session is considered # invalid if not renewed # upon session invalidation, all services and locks created using this session # are considered invalid. session-ttl: 10s # delay, a time duration (0s to 60s), in the event of a session invalidation # consul will prevent the lock from being acquired for this duration. # The purpose is to allow a gnmic instance to stop active subscriptions before # another one takes over. delay: 5s # retry-timer, wait period between retries to acquire a lock # in the event of client failure, key is already locked or lock lost. retry-timer: 2s # renew-period, session renew period, must be lower that session-ttl. # if the value is greater or equal than session-ttl, is will be set to half # of session-ttl. renew-period: 5s # debug, enable extra logging messages debug: false # tls config for the REST API client tls: # string, path to the CA certificate file, # this will be used to verify the certificates of the gNMIc cluster members # when `skip-verify` is false ca-file: # string, client certificate file. cert-file: # string, client key file. key-file: # boolean, if true, the client will not verify the server # certificate against the available certificate chain. skip-verify: false ``` A `gnmic` instance creates gNMI subscriptions only towards targets for which it acquired locks. It is also responsible for maintaining that lock for the duration of the subscription.
### Instance affinity The target distribution process can be influenced using `tags` added to the target configuration. By default, `gnmic` instances register their API service with 2 tags; `cluster-name=${clustering/cluster-name}` `instance-name=${clustering/instance-name}` By adding the same tags to a target `router1` configuration (below YAML), the cluster leader will "assign" `router1` to instance `gnmic1` in cluster `my-cluster` regardless of the instance load. ```yaml targets: router1: tags: - cluster-name=my-cluster - instance-name=gnmic1 ``` Custom tags can be added to an instance API service registration in order to customize the instance affinity logic. ```yaml clustering: tags: - my-custom-tag=value1 ``` ### Instance failure In the event of an instance failure, its maintained targets locks expire, which on the next `clustering/targets-watch-timer` interval will be detected by the cluster leader. The leader then performs the same target distribution process for those targets without a lock. ### Leader reelection If a cluster leader fails, one of the other instances in the cluster eventually acquires the leader lock and becomes the cluster leader. It then, proceeds with the targets distribution process to assign the unhandled targets to an instance in the cluster. ### Scalability Using the same above-mentioned clustering mechanism, `gnmic` can horizontally scale the number of supported gNMI connections distributed across multiple `gnmic` instances. The collected gNMI data can then be aggregated and made available through any of the running `gnmic` instances, regardless of whether that instance collected the data from the target or not. The data aggregation is done by chaining `gnmic` [outputs](../user_guide/outputs/output_intro.md) and [inputs](../user_guide/inputs/input_intro.md) to build a gNMI data pipeline. In the diagram below, the `gnmic` instances on the left and right side of NATS server can be identical.
================================================ FILE: docs/user_guide/actions/actions.md ================================================ # Actions `gNMIc` supports running actions as result of an event, possible triggering events are: - A gNMI SubscribeResponse or GetReponse message is received and matches certain criteria. - A target is discovered or deleted by a target loader. There are 4 types of actions: - [http](#http-action): build and send an HTTP request - [gNMI](#gnmi-action): run a Get, Set or Subscribe ONCE gNMI RPC as a gNMI client - [template](#template-action): execute a Go template against the received input - [script](#script-action): run arbitrary shell scripts/commands. The actions are executed in sequence. An action can use the result of any previous action as one of it inputs using the [Go Template](https://golang.org/pkg/text/template/) syntax `{{ .Env.$action_name }}` or `{{ index .Env "$action_name"}}` ### HTTP Action Using the `HTTP action` you can send an HTTP request to a server. The request body can be customized using [Go Templates](https://golang.org/pkg/text/template/) that take the event message or the discovered target as input. ```yaml actions: counter1_alert: # action type type: http # HTTP method method: POST # target url, can be a go template url: http://remote-server:8080/ # http headers to add to the request headers: content-type: application/text # http request timeout timeout: 5s # go template used to build the request body. # if left empty the whole event message is added as a json object to the request's body body: '"counter1" crossed threshold, value={{ index .Values "counter1" }}' # enable extra logging debug: false ``` ### gNMI Action Using the `gNMI action` you can trigger a gNMI Get, Set or Subscribe ONCE RPC. Just like the `HTTP action` the RPC fields can be customized using [Go Templates](https://golang.org/pkg/text/template/) ```yaml actions: my_gnmi_action: # action type type: gnmi # gNMI rpc, defaults to `get`, # if `set` is used it will default to a set update. # to trigger a set replace, use `set-replace`. # `subscribe` is always a subscribe with mode=ONCE # possible values: `get`, `set`, `set-update`, `set-replace`, `set-delete`, `sub`, `subscribe` rpc: set # the target router, it defaults to the value in tag "source" # the value `all` means all known targets target: '{{ index .Event.Tags "source" }}' # paths templates to build xpaths paths: - | {{ if eq ( index .Event.Tags "interface_name" ) "ethernet-1/1"}} {{$interfaceName := "ethernet-1/2"}} {{else}} {{$interfaceName := "ethernet-1/1"}} {{end}} /interfaces/interface[name={{$interfaceName}}]/admin-state # values templates to build the values in case of set-update or set-replace values: - "enable" # data-type in case of get RPC, one of: ALL, CONFIG, STATE, OPERATIONAL data-type: ALL # gNMI encoding, defaults to json encoding: json # debug, enable extra logging debug: false ``` ### Template Action The `Template action` allows to combine different data sources and produce custom payloads to be writen to a remote server or simply to a file. The template is a Go Template that is executed against the `Input` message that triggered the action, any variable defined by the trigger processor as well as the results of any previous action. **Data** | **Template syntax** | ----------------------------- | --------------------------------------------------------------| **Input Messge** | `{{ .Input }}` | **Trigger Variables** | `{{ .Vars }}` | **Previous actions results** | `{{ .Env.$action_name }}` or `{{ index .Env "$action_name"}}` | ```yaml actions: awesome_template: # action type type: template # template string, if not present template-file applies. template: '{{ . }}' # path to a file, or a glob. # applies only if `.template `is not set. # if not template and template-file are not set, # the default template `{{ . }}` is used. template-file: # string, either `stdout` or a path to a file # the result of executing to template will be written to the file # specified by .output output: # debug, enable extra logging debug: false ``` ### Script Action The `Script action` allows to run arbitrary scripts as a result of an event trigger. The commands to be executed can be specified using the field `command`, e.g: ```yaml actions: weather: type: script shell: /bin/bash command: | curl wttr.in curl cheat.sh ``` Or using the field `file`, e.g: ```yaml actions: exec: type: script file: ./my_executable_script.sh ``` When using `command`, the shell interpreter can be set using `shell` field. Otherwise it defaults to `/bin/bash`. ### Examples #### Add basic configuration to targets upon discovery Referencing Actions under a target loader allows to run then in sequence when a target is discovered. This allows to add some basic configuration to a target upon discovery before starting the gNMI subscriptions In the below example, a `docker` loader is defined. It discovers Docker containers with label `clab-node-kind=srl` and adds them as gNMI targets. Before the targets are added to the target's list for subscriptions, a list of actions are executed: `config_interfaces`, `config_subinterfaces` and `config_network_instances` ```yaml username: admin password: NokiaSrl1! skip-verify: true encoding: ascii log: true subscriptions: sub1: paths: - /interface/statistics - /network-instance/statistics loader: type: docker filters: - containers: - label: clab-node-kind=srl on-add: - config_interfaces - config_sub_interfaces - config_netins outputs: out: type: file format: event filename: /path/to/file actions: config_interfaces: name: config_interfaces type: gnmi target: '{{ .Input }}' rpc: set encoding: json_ietf debug: true paths: - /interface[name=ethernet-1/1]/admin-state - /interface[name=ethernet-1/2]/admin-state values: - enable - enable config_subinterfaces: name: config_subinterfaces type: gnmi target: '{{ .Input }}' rpc: set encoding: json_ietf debug: true paths: - /interface[name=ethernet-1/1]/subinterface[index=0]/admin-state - /interface[name=ethernet-1/2]/subinterface[index=0]/admin-state values: - enable - enable config_network_instances: name: config_network_instances type: gnmi target: '{{ .Input }}' rpc: set encoding: json_ietf debug: true paths: - /network-instance[name=default]/admin-state - /network-instance[name=default]/interface - /network-instance[name=default]/interface values: - enable - '{"name": "ethernet-1/1.0"}' - '{"name": "ethernet-1/2.0"}' ``` #### Clone a network topology and deploy it using containerlab Using lldp neighbor information it's possible to build a containerlab topology using `gnmic` actions. In the below confoguration file, an event processor called `clone-topology` is defined. When triggered it will run a series of actions to gather information (chassis type, lldp neighbors, configuration,...) from the defined targets. It then builds a containerlab topology from a defined template and the gathered info, writes it to a file and runs a `clab deploy` command. ```yaml username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf # log: true targets: srl1: srl2: srl3: processors: clone-topology: event-trigger: # debug: true actions: - chassis - lldp - read_config - write_config - clab_topo - deploy_topo actions: chassis: name: chassis type: gnmi target: all rpc: sub encoding: json_ietf #debug: true format: event paths: - /platform/chassis/type lldp: name: lldp type: gnmi target: all rpc: sub encoding: json_ietf #debug: true format: event paths: - /system/lldp/interface[name=ethernet-*] read_config: name: read_config type: gnmi target: all rpc: get data-type: config encoding: json_ietf #debug: true paths: - / write_config: name: write_config type: template template: | {{- range $n, $m := .Env.read_config }} {{- $filename := print $n ".json"}} {{ file.Write $filename (index $m 0 "updates" 0 "values" "" | data.ToJSONPretty " " ) }} {{- end }} #debug: true clab_topo: name: clab_topo type: template #debug: true output: gnmic.clab.yaml template: | name: gNMIc-action-generated topology: defaults: kind: srl kinds: srl: image: ghcr.io/nokia/srlinux:latest nodes: {{- range $n, $m := .Env.lldp }} {{- $type := index $.Env.chassis $n 0 0 "values" "/srl_nokia-platform:platform/srl_nokia-platform-chassis:chassis/type" }} {{- $type = $type | strings.ReplaceAll "7220 IXR-D1" "ixrd1" }} {{- $type = $type | strings.ReplaceAll "7220 IXR-D2" "ixrd2" }} {{- $type = $type | strings.ReplaceAll "7220 IXR-D3" "ixrd3" }} {{- $type = $type | strings.ReplaceAll "7250 IXR-6" "ixr6" }} {{- $type = $type | strings.ReplaceAll "7250 IXR-10" "ixr10" }} {{- $type = $type | strings.ReplaceAll "7220 IXR-H1" "ixrh1" }} {{- $type = $type | strings.ReplaceAll "7220 IXR-H2" "ixrh2" }} {{- $type = $type | strings.ReplaceAll "7220 IXR-H3" "ixrh3" }} {{ $n | strings.TrimPrefix "clab-test1-" }}: type: {{ $type }} startup-config: {{ print $n ".json"}} {{- end }} links: {{- range $n, $m := .Env.lldp }} {{- range $rsp := $m }} {{- range $ev := $rsp }} {{- if index $ev.values "/srl_nokia-system:system/srl_nokia-lldp:lldp/interface/neighbor/system-name" }} {{- $node1 := $ev.tags.source | strings.TrimPrefix "clab-test1-" }} {{- $iface1 := $ev.tags.interface_name | strings.ReplaceAll "ethernet-" "e" | strings.ReplaceAll "/" "-" }} {{- $node2 := index $ev.values "/srl_nokia-system:system/srl_nokia-lldp:lldp/interface/neighbor/system-name" }} {{- $iface2 := index $ev.values "/srl_nokia-system:system/srl_nokia-lldp:lldp/interface/neighbor/port-id" | strings.ReplaceAll "ethernet-" "e" | strings.ReplaceAll "/" "-" }} {{- if lt $node1 $node2 }} - endpoints: ["{{ $node1 }}:{{ $iface1 }}", "{{ $node2 }}:{{ $iface2 }}"] {{- end }} {{- end }} {{- end }} {{- end }} {{- end }} deploy_topo: name: deploy_topo type: script command: sudo clab dep -t gnmic.clab.yaml --reconfigure debug: true ``` The above described processor can be triggered with the below command: ```bash gnmic --config clone.yaml get --path /system/name --processor clone-topology ``` ================================================ FILE: docs/user_guide/api/api_intro.md ================================================ A limited set of REST endpoints are supported, these are mainly used to allow for a clustered deployment for multiple `gnmic` instances. The API can be used to automate (to a certain extent) the targets configuration loading and starting/stopping subscriptions. ## Configuration Enabling the API server can be done via a command line flag: ```bash gnmic --config gnmic.yaml subscribe --api ":7890" ``` via ENV variable: `GNMIC_API=':7890'` Or via file configuration, by adding the below line to the config file: ```yaml api: ":7890" ``` More advanced API configuration options (like a secure API Server) can be achieved by setting the fields under `api-server`. ```yaml api-server: # string, in the form IP:port, the IP part can be omitted. # if not set, it defaults to the value of `api` in the file main level. # if `api` is not set, the default is `:7890` address: :7890 # duration, the server timeout. # The set value is equally split between read and write timeouts timeout: 10s # tls config tls: # string, path to the CA certificate file, # this certificate is used to verify the clients certificates. ca-file: # string, server certificate file. cert-file: # string, server key file. key-file: # string, one of `"", "request", "require", "verify-if-given", or "require-verify" # - request: The server requests a certificate from the client but does not # require the client to send a certificate. # If the client sends a certificate, it is not required to be valid. # - require: The server requires the client to send a certificate and does not # fail if the client certificate is not valid. # - verify-if-given: The server requests a certificate, # does not fail if no certificate is sent. # If a certificate is sent it is required to be valid. # - require-verify: The server requires the client to send a valid certificate. # # if no ca-file is present, `client-auth` defaults to ""` # if a ca-file is set, `client-auth` defaults to "require-verify"` client-auth: "" # boolean, if true, the server will also handle the path /metrics and serve # gNMIc's enabled prometheus metrics. enable-metrics: false # boolean, enables extra debug log printing debug: false # boolean, disables creating log messages when accessing the `healthz` path healthz-disable-logging: false ``` ## API Endpoints * [Configuration](./configuration.md) * [Targets](./targets.md) * [Cluster](./cluster.md) * [Other](./other.md) ================================================ FILE: docs/user_guide/api/cluster.md ================================================ # Cluster ## /api/v1/cluster ### `GET /api/v1/cluster` Request gNMIc cluster state and details. === "Request" ```bash curl --request GET gnmic-api-address:port/api/v1/cluster ``` === "200 OK" ```json { "name": "collectors", "number-of-locked-targets": 70, "leader": "clab-telemetry-gnmic1", "members": [ { "name": "clab-telemetry-gnmic1", "api-endpoint": "clab-telemetry-gnmic1:7890", "is-leader": true, "number-of-locked-nodes": 23, "locked-targets": [ "clab-lab2-leaf6", "clab-lab5-spine2", "clab-lab4-leaf4", "clab-lab2-leaf8", "clab-lab3-leaf2", "clab-lab5-spine1", "clab-lab1-spine1", "clab-lab2-super-spine2", "clab-lab3-super-spine1", "clab-lab4-spine3", "clab-lab2-spine3", "clab-lab3-leaf7", "clab-lab5-leaf7", "clab-lab5-leaf8", "clab-lab1-spine2", "clab-lab4-leaf8", "clab-lab4-leaf1", "clab-lab4-spine1", "clab-lab2-spine2", "clab-lab3-spine2", "clab-lab1-leaf8", "clab-lab3-leaf8", "clab-lab4-leaf2" ] }, { "name": "clab-telemetry-gnmic2", "api-endpoint": "clab-telemetry-gnmic2:7891", "number-of-locked-nodes": 24, "locked-targets": [ "clab-lab3-leaf6", "clab-lab1-leaf7", "clab-lab2-leaf3", "clab-lab5-leaf5", "clab-lab1-super-spine1", "clab-lab3-leaf5", "clab-lab4-super-spine1", "clab-lab5-leaf6", "clab-lab2-spine1", "clab-lab3-leaf3", "clab-lab4-leaf3", "clab-lab2-leaf4", "clab-lab4-super-spine2", "clab-lab1-spine3", "clab-lab3-leaf4", "clab-lab5-spine4", "clab-lab1-leaf4", "clab-lab2-leaf2", "clab-lab2-super-spine1", "clab-lab4-spine4", "clab-lab5-leaf2", "clab-lab5-leaf4", "clab-lab4-leaf7", "clab-lab1-spine4" ] }, { "name": "clab-telemetry-gnmic3", "api-endpoint": "clab-telemetry-gnmic3:7892", "number-of-locked-nodes": 23, "locked-targets": [ "clab-lab1-leaf5", "clab-lab3-spine3", "clab-lab1-leaf1", "clab-lab2-spine4", "clab-lab1-super-spine2", "clab-lab5-leaf3", "clab-lab4-spine2", "clab-lab1-leaf3", "clab-lab5-spine3", "clab-lab3-super-spine2", "clab-lab2-leaf5", "clab-lab1-leaf2", "clab-lab1-leaf6", "clab-lab4-leaf5", "clab-lab2-leaf7", "clab-lab3-leaf1", "clab-lab2-leaf1", "clab-lab3-spine1", "clab-lab5-leaf1", "clab-lab5-super-spine2", "clab-lab4-leaf6", "clab-lab3-spine4", "clab-lab5-super-spine1" ] } ] } ``` === "500 Internal Server Error" ```json { "errors": [ "Error Text" ] } ``` ### `POST /api/v1/cluster/rebalance` If the cluster load is not balanced it moves targets from the high load instances to the low load instances. === "Request" ```bash curl --request POST gnmic-api-address:port/api/v1/cluster/rebalance ``` === "200 OK" ``` ``` === "400 Bad Request" ```json { "errors": [ "not leader" ] } ``` ### `GET /api/v1/cluster/leader` Returns the cluster leader details. === "Request" ```bash curl --request POST gnmic-api-address:port/api/v1/cluster/leader ``` === "200 OK" ```json [ { "name": "clab-telemetry-gnmic1", "api-endpoint": "http://clab-telemetry-gnmic1:7890", "is-leader": true, "number-of-locked-nodes": 23, "locked-targets": [ "clab-lab4-leaf8", "clab-lab5-leaf8", "clab-lab1-spine2", "clab-lab3-leaf7", "clab-lab4-leaf4", "clab-lab2-leaf8", "clab-lab2-spine3", "clab-lab4-leaf1", "clab-lab4-leaf2", "clab-lab4-spine3", "clab-lab5-spine2", "clab-lab1-spine1", "clab-lab2-leaf6", "clab-lab5-leaf7", "clab-lab1-leaf8", "clab-lab3-leaf8", "clab-lab3-spine2", "clab-lab3-super-spine1", "clab-lab5-spine1", "clab-lab2-super-spine2", "clab-lab3-leaf2", "clab-lab2-spine2", "clab-lab4-spine1" ] } ] ``` === "500 Internal Server Error" ```json { "errors": [ "Error Text" ] } ``` ### `DELETE /api/v1/cluster/leader` Forces the cluster leader to free its lock to allow another instance to become the leader. === "Request" ```bash curl --request DELETE gnmic-api-address:port/api/v1/cluster/leader ``` === "200 OK" ```json ``` ## /api/v1/cluster/members ### `GET /api/v1/cluster/members` Query gNMIc cluster members Returns a list of gNMIc cluster members with details === "Request" ```bash curl --request GET gnmic-api-address:port/api/v1/cluster/members ``` === "200 OK" ```json [ { "name": "clab-telemetry-gnmic1", "api-endpoint": "http://clab-telemetry-gnmic1:7890", "is-leader": true, "number-of-locked-nodes": 23, "locked-targets": [ "clab-lab2-spine3", "clab-lab5-spine1", "clab-lab2-super-spine2", "clab-lab4-leaf2", "clab-lab4-leaf4", "clab-lab5-spine2", "clab-lab1-leaf8", "clab-lab4-spine1", "clab-lab5-leaf7", "clab-lab2-spine2", "clab-lab3-super-spine1", "clab-lab1-spine1", "clab-lab3-leaf2", "clab-lab3-spine2", "clab-lab2-leaf6", "clab-lab4-leaf1", "clab-lab4-spine3", "clab-lab1-spine2", "clab-lab2-leaf8", "clab-lab3-leaf8", "clab-lab5-leaf8", "clab-lab3-leaf7", "clab-lab4-leaf8" ] }, { "name": "clab-telemetry-gnmic2", "api-endpoint": "http://clab-telemetry-gnmic2:7891", "number-of-locked-nodes": 24, "locked-targets": [ "clab-lab1-spine4", "clab-lab2-leaf2", "clab-lab3-leaf3", "clab-lab4-super-spine1", "clab-lab5-leaf4", "clab-lab1-spine3", "clab-lab1-leaf4", "clab-lab3-leaf6", "clab-lab5-leaf2", "clab-lab2-leaf4", "clab-lab3-leaf4", "clab-lab4-leaf3", "clab-lab5-spine4", "clab-lab3-leaf5", "clab-lab4-super-spine2", "clab-lab1-leaf7", "clab-lab2-leaf3", "clab-lab2-super-spine1", "clab-lab5-leaf6", "clab-lab2-spine1", "clab-lab1-super-spine1", "clab-lab4-leaf7", "clab-lab4-spine4", "clab-lab5-leaf5" ] }, { "name": "clab-telemetry-gnmic3", "api-endpoint": "http://clab-telemetry-gnmic3:7892", "number-of-locked-nodes": 23, "locked-targets": [ "clab-lab1-leaf3", "clab-lab1-leaf5", "clab-lab3-spine4", "clab-lab3-spine3", "clab-lab1-leaf1", "clab-lab1-leaf6", "clab-lab2-leaf5", "clab-lab4-leaf6", "clab-lab5-leaf1", "clab-lab5-leaf3", "clab-lab5-super-spine2", "clab-lab2-spine4", "clab-lab5-super-spine1", "clab-lab4-spine2", "clab-lab3-spine1", "clab-lab4-leaf5", "clab-lab5-spine3", "clab-lab1-super-spine2", "clab-lab2-leaf1", "clab-lab3-super-spine2", "clab-lab3-leaf1", "clab-lab1-leaf2", "clab-lab2-leaf7" ] } ] ``` === "500 Internal Server Error" ```json { "errors": [ "Error Text" ] } ``` ### `POST /api/v1/cluster/members/{id}/drain` Drains the instance `id` from its targets, moving them to the other instances in the cluster. === "Request" ```bash curl --request POST gnmic-api-address:port/api/v1/cluster/members/{id}/drain ``` === "200 OK" ```json ``` ================================================ FILE: docs/user_guide/api/configuration.md ================================================ # Configuration ## /api/v1/config ### `GET /api/v1/config` Request all gnmic configuration Returns the whole configuration as json === "Request" ```bash curl --request GET gnmic-api-address:port/api/v1/config ``` === "200 OK" ```json { "username": "admin", "password": "admin", "port": "57400", "encoding": "json_ietf", "insecure": true, "timeout": 10000000000, "log": true, "max-msg-size": 536870912, "prometheus-address": ":8989", "retry": 10000000000, "api": ":7890", "get-type": "ALL", "set-delimiter": ":::", "subscribe-mode": "stream", "subscribe-stream-mode": "target-defined", "subscribe-cluster-name": "default-cluster", "subscribe-lock-retry": 5000000000, "path-path-type": "xpath", "prompt-max-suggestions": 10, "prompt-prefix-color": "dark_blue", "prompt-suggestions-bg-color": "dark_blue", "prompt-description-bg-color": "dark_gray", "targets": { "192.168.1.131:57400": { "name": "192.168.1.131:57400", "address": "192.168.1.131:57400", "username": "admin", "password": "admin", "timeout": 10000000000, "insecure": true, "skip-verify": false, "buffer-size": 1000, "retry-timer": 10000000000 }, "192.168.1.132:57400": { "name": "192.168.1.132:57400", "address": "192.168.1.131:57400", "username": "admin", "password": "admin", "timeout": 10000000000, "insecure": true, "skip-verify": false, "buffer-size": 1000, "retry-timer": 10000000000 } }, "subscriptions": { "sub1": { "name": "sub1", "paths": [ "/interface/statistics" ], "mode": "stream", "stream-mode": "sample", "encoding": "json_ietf", "sample-interval": 1000000000 } }, "Outputs": { "output2": { "address": "192.168.1.131:4222", "format": "event", "subject": "telemetry", "type": "nats", "write-timeout": "10s" } }, "inputs": {}, "processors": {}, "clustering": { "cluster-name": "cluster1", "instance-name": "gnmic1", "service-address": "gnmic1", "services-watch-timer": 60000000000, "targets-watch-timer": 5000000000, "leader-wait-timer": 5000000000, "locker": { "address": "consul-agent:8500", "type": "consul" } } } ``` === "500 Internal Server Error" ```json { "errors": [ "Error Text" ] } ``` ## /api/v1/config/targets ### `GET /api/v1/config/targets` Request all targets configuration returns the targets configuration as json === "Request" ```bash curl --request GET gnmic-api-address:port/api/v1/config/targets ``` === "200 OK" ```json { "192.168.1.131:57400": { "name": "192.168.1.131:57400", "address": "192.168.1.131:57400", "username": "admin", "password": "admin", "timeout": 10000000000, "insecure": true, "skip-verify": false, "buffer-size": 1000, "retry-timer": 10000000000 }, "192.168.1.132:57400": { "name": "192.168.1.132:57400", "address": "192.168.1.131:57400", "username": "admin", "password": "admin", "timeout": 10000000000, "insecure": true, "skip-verify": false, "buffer-size": 1000, "retry-timer": 10000000000 } } ``` === "404 Not found" ```json { "errors": [ "no targets found", ] } ``` === "500 Internal Server Error" ```json { "errors": [ "Error Text" ] } ``` ### `GET /api/v1/config/targets/{id}` Request a single target configuration Returns a single target configuration as json, where {id} is the target ID === "Request" ```bash curl --request GET gnmic-api-address:port/api/v1/config/targets/192.168.1.131:57400 ``` === "200 OK" ```json { "name": "192.168.1.131:57400", "address": "192.168.1.131:57400", "username": "admin", "password": "admin", "timeout": 10000000000, "insecure": true, "skip-verify": false, "buffer-size": 1000, "retry-timer": 10000000000 } ``` === "404 Not found" ```json { "errors": [ "target $target not found", ] } ``` === "500 Internal Server Error" ```json { "errors": [ "Error Text" ] } ``` ### `POST /api/v1/config/targets` Add a new target to gnmic configuration Expected request body is a single target config as json Returns an empty body if successful. === "Request" ```bash curl --request POST -H "Content-Type: application/json" \ -d '{"name": "10.10.10.10:57400", "address": "10.10.10.10:57400", "username": "admin", "password": "admin", "insecure": true}' \ gnmic-api-address:port/api/v1/config/targets ``` === "200 OK" ```json ``` === "400 Bad Request" ```json ``` === "500 Internal Server Error" ```json { "errors": [ "Error Text" ] } ``` ### `DELETE /api/v1/config/targets/{id}` Deletes a target {id} configuration, all active subscriptions are terminated. Returns an empty body === "Request" ```bash curl --request DELETE gnmic-api-address:port/api/v1/config/targets/192.168.1.131:57400 ``` === "200 OK" ```json ``` ### `PATCH /api/v1/config/targets/{id}/subscriptions` Updates existing subscriptions for the target ID Returns an empty body if successful. === "Request" ```bash curl --request PATCH gnmic-api-address:port/api/v1/config/targets/192.168.1.131:57400/subscriptions -d '{"subscriptions": ["sub1", "sub2"]}' ``` === "200 OK" ```json ``` === "404 Not found" ```json { "errors": [ "target $target not found" ] } ``` === "400 Bad Request" ```json { "errors": [ "subscription $subscription does not exist" ] } ``` === "500 Internal Server Error" ```json { "errors": [ "Error Text" ] } ``` ## /api/v1/config/subscriptions ### `GET /api/v1/config/subscriptions` Request all the configured subscriptions. Returns the subscriptions configuration as json ## /api/v1/config/outputs ### `GET /api/v1/config/outputs` Request all the configured outputs. Returns the outputs configuration as json ## /api/v1/config/inputs ### `GET /api/v1/config/inputs` Request all the configured inputs. Returns the outputs configuration as json ## /api/v1/config/processors ### `GET /api/v1/config/processors` Request all the configured processors. Returns the processors configuration as json ## /api/v1/config/clustering ### `GET /api/v1/config/clustering` Request the clustering configuration. Returns the clustering configuration as json ================================================ FILE: docs/user_guide/api/other.md ================================================ # Other ## /api/v1/healthz ### `GET /api/v1/healthz` Health check endpoint for Kubernetes or similar === "Request" ```bash curl --request GET gnmic-api-address:port/api/v1/healthz ``` === "200 OK" ```json { "status": "healthy" } ``` ## /api/v1/admin/shutdown ### `POST /api/v1/admin/shutdown` Gracefully shut down the application === "Request" ```bash curl --request POST gnmic-api-address:port/api/v1/admin/shutdown ``` ================================================ FILE: docs/user_guide/api/targets.md ================================================ ## `GET /api/v1/targets` Request all active targets details. Returns all active targets as json === "Request" ```bash curl --request GET gnmic-api-address:port/api/v1/targets ``` === "200 OK" ```json { "192.168.1.131:57400": { "config": { "name": "192.168.1.131:57400", "address": "192.168.1.131:57400", "username": "admin", "password": "admin", "timeout": 10000000000, "insecure": true, "skip-verify": false, "buffer-size": 1000, "retry-timer": 10000000000 }, "subscriptions": { "sub1": { "name": "sub1", "paths": [ "/interface/statistics" ], "mode": "stream", "stream-mode": "sample", "encoding": "json_ietf", "sample-interval": 1000000000 } } }, "192.168.1.131:57401": { "config": { "name": "192.168.1.131:57401", "address": "192.168.1.131:57401", "username": "admin", "password": "admin", "timeout": 10000000000, "insecure": true, "skip-verify": false, "buffer-size": 1000, "retry-timer": 10000000000 }, "subscriptions": { "sub1": { "name": "sub1", "paths": [ "/interface/statistics" ], "mode": "stream", "stream-mode": "sample", "encoding": "json_ietf", "sample-interval": 1000000000 } } } } ``` === "404 Not found" ```json { "errors": [ "no targets found" ] } ``` === "500 Internal Server Error" ```json { "errors": [ "Error Text" ] } ``` ## `GET /api/v1/targets/{id}` Query a single target details, if active. Returns a single target if active as json, where {id} is the target ID === "Request" ```bash curl --request GET gnmic-api-address:port/targets/192.168.1.131:57400 ``` === "200 OK" ```json { "config": { "name": "192.168.1.131:57400", "address": "192.168.1.131:57400", "username": "admin", "password": "admin", "timeout": 10000000000, "insecure": true, "skip-verify": false, "buffer-size": 1000, "retry-timer": 10000000000 }, "subscriptions": { "sub1": { "name": "sub1", "paths": [ "/interface/statistics" ], "mode": "stream", "stream-mode": "sample", "encoding": "json_ietf", "sample-interval": 1000000000 } } } ``` === "404 Not found" ```json { "errors": [ "no targets found" ] } ``` === "500 Internal Server Error" ```json { "errors": [ "Error Text" ] } ``` ## `POST /api/v1/targets/{id}` Starts a single target subscriptions, where {id} is the target ID Returns an empty body if successful. === "Request" ```bash curl --request POST gnmic-api-address:port/api/v1/targets/192.168.1.131:57400 ``` === "200 OK" ```json ``` === "404 Not found" ```json { "errors": [ "target $target not found" ] } ``` === "500 Internal Server Error" ```json { "errors": [ "Error Text" ] } ``` ## `DELETE /api/v1/targets/{id}` Stops a single target active subscriptions, where {id} is the target ID Returns an empty body if successful. === "Request" ```bash curl --request DELETE gnmic-api-address:port/api/v1/targets/192.168.1.131:57400 ``` === "200 OK" ```json ``` === "404 Not found" ```json { "errors": [ "target $target not found" ] } ``` === "500 Internal Server Error" ```json { "errors": [ "Error Text" ] } ``` ================================================ FILE: docs/user_guide/caching.md ================================================ `Caching` refers to the process of storing the collected gNMI updates before sending them out to the intended output(s). By default, `gNMIc` outputs send out the received gNMI updates as they arrive (i.e without storing them). A cache is used to store the received updates when the [`gnmi-server`](gnmi_server.md) functionality is enabled and (optionally) when `influxdb` and `prometheus` outputs are enabled to allow for advanced data pipeline processing. Caching messages before writing them to a remote location allows implementing a few use cases like **rate limiting**, **batch processing**, **data replication**, etc. Caching support for other outputs is planned. ### How does it work? When caching is enabled for a certain output, the received gNMI updates are not written directly to the output remote server (for e.g: InfluxDB server), but rather cached locally until the `cache-flush-timer` is reached (in the case of an `influxdb` output) or when the output receives a `Prometheus` scrape request (in the case of a `prometheus` output). The below diagram shows how an InfluxDB output works with and without cache enabled:
The cached gNMI updates are periodically retrieved from the cache in batch then converted to [events](event_processors/intro.md#the-event-format). If [processors](event_processors/intro.md) are defined under the output config section, they are applied to the whole list of events at once. This allows for augmentation of messages with values from other messages even if they where received in separate updates or collected from a different target/subscription. ### Enable caching #### gnmi-server The gNMI server has caching enabled by default. The cache type and its behavior can be tweaked, see [here](#cache-types) ```yaml gnmi-server: # # other gnmi-server related attributes # cache: {} ``` #### outputs Caching can be enabled per output by populating the `cache` attribute under the desired output: ```yaml outputs: output1: type: prometheus # # other output related attributes # cache: {} ``` This enables `output1` to use a cache of type [`oc`](#gnmi-cache). Each output has its own cache. Using a single global cache will be implemented in a future release. ### Distributed caches When running multiple instances of `gNMIc` it's possible to synchronize the collected data between all the instances using a distributed cache. Each output that is configured with a remote cache will write the collected gNMI updates to the remote cache first, then syncs back all the cached data to its local cache then eventually write it to the output.
(1) The received gNMI updates are written to the remote cache. (2) The output syncs the remote cache data to its local cache. (3) The locally cached data is written to the remote output periodically or on scape request. This is useful when different instances collect data from different targets and/or subscriptions. A single instance can be responsible for writing all the collected data to the output or each instance would be writing to a different output. ### Cache types `gNMIc` supports 4 cache types. There is 1 local cache and 3 distributed caches "flavors". The choice of cache to use depends on the use case you are trying to implement. A local cache is local to the `gNMIc` instance i.e not exposed externally, while a distributed cache is external to the `gNMIc` instance, potentially shared by multiple `gNMIc` instances and is always combined with a local cache to sync updates between `gNMIc` instances. #### gNMI cache (local) Is an in-memory gNMI cache based on the Openconfig gNMI cache published [here](https://github.com/openconfig/gnmi/tree/master/cache) This type of cache is ideal when running a single `gNMIc` instance. It is also the default cache type for the gNMI server and for an output when caching is enabled. Configuration: ```yaml outputs: output1: type: prometheus # or influxdb # # other output related fields # cache: type: oc # duration, default: 60s. # updates older than the expiration value will not be read from the cache. expiration: 60s # enable extra logging debug: false ``` #### NATS cache (distributed) Is a cache type that relies on a [NATS server](https://docs.nats.io/) to distribute the collected updates between `gNMIc` instances. This type of cache is useful when multiple `gNMIc` instances are subscribed to different targets and/or different gNMI paths. Configuration: ```yaml outputs: output1: type: prometheus # or influxdb # # other output related fields # cache: type: nats # string, address of the remote NATS server, # if left empty an in memory NATS server will be created an used. address: # string, the NATS server username. username: # string, the NATS server password. password: # string, expiration period of received messages. expiration: 60s # enable extra logging debug: false ``` #### JetStream cache (distributed) Is a cache type that relies on a [JetStream server](https://docs.nats.io/nats-concepts/jetstream) to distribute the collected updates between `gNMIc` instances. This type of cache is useful when multiple `gNMIc` instances are subscribed to different targets and/or different gNMI paths. It is planned to add [gNMI historical subscriptions](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md#1-purpose) support using the `jetstream` cache type. Configuration: ```yaml outputs: output1: type: prometheus # or influxdb # # other output related fields # cache: type: jetstream # string, address of the remote NATS JetStream server, # if left empty an in memory NATS JetStream server will be created an used. address: # string, the JetStream server username. username: # string, the JetStream server password. password: # duration, default: 60s. # Expiration period of received messages. expiration: 60s # int64, default: 1073741824 (1 GiB). # Max number of bytes stored in the cache per subscription. max-bytes: # int64, default: 1048576. # Max number of messages stored per subscription. max-msgs-per-subscription: # int, default 100. # Batch size used by the JetStream pull subscriber. fetch-batch-size: # duration, default 100ms. # Wait time used by the JetStream pull subscriber. fetch-wait-time: # enable extra logging debug: false ``` #### Redis cache (distributed) Is a cache type that relies on a [Redis PUBSUB server](https://redis.io/docs/manual/pubsub/) to distribute the collected updates between `gNMIc` instances. This type of cache is useful when multiple `gNMIc` instances are subscribed to different targets and/or different gNMI paths. ```yaml outputs: output1: type: prometheus # or influxdb # # other output related fields # cache: type: redis # string, redis server address address: # string, the Redis server username. username: # string, the Redis server password. password: # duration, default: 60s. # Expiration period of received messages. expiration: 60s # enable extra logging debug: false ``` ================================================ FILE: docs/user_guide/collector/collector_api.md ================================================ # Collector REST API The collector exposes a REST API for dynamic configuration management and status queries. This API is specific to the collector mode and differs from the API available in subscribe mode. ## Base URL All API endpoints are prefixed with `/api/v1`. For example, if the API server is running on `localhost:7890`: ``` http://localhost:7890/api/v1/targets ``` ## Authentication If TLS is configured with client authentication, requests must include valid client certificates. ## Common Response Formats ### Success Response Most successful responses return JSON with HTTP status 200. ### Error Response Error responses include an `errors` array: ```json { "errors": ["error message 1", "error message 2"] } ``` --- ## Health & Admin Endpoints ### Health Check ``` GET /api/v1/healthz ``` Returns the health status of the collector. **Response:** `200 OK` if healthy ### Shutdown ``` POST /api/v1/admin/shutdown ``` Not implemented in Collector mode --- ## Configuration Endpoints ### Get Full Configuration ``` GET /api/v1/config ``` Returns the current configuration of the collector. ### Apply Configuration ``` POST /api/v1/config/apply ``` Applies a complete configuration to the collector. Resources not included in the request are deleted. **Request Body:** ```json { "targets": { "router1": { "address": "10.0.0.1:57400", "username": "admin", "password": "admin", "skip-verify": true, "subscriptions": ["interfaces"] } }, "subscriptions": { "interfaces": { "paths": ["/interfaces/interface/state/counters"], "mode": "stream", "stream-mode": "sample", "sample-interval": "10s" } }, "outputs": { "prometheus": { "type": "prometheus", "listen": ":9804" } }, "inputs": {}, "processors": {}, "tunnel-target-matches": {} } ``` **Validation Rules:** - If `targets` are provided, at least one `subscription` is required - If `inputs` are provided, at least one `output` is required - Empty request is valid (resets all configuration) **Headers:** - `Content-Encoding: gzip` - Request body is gzip compressed --- ## Targets ### List Targets (Runtime State) ``` GET /api/v1/targets ``` Returns all targets with their runtime state (connection status, active subscriptions). **Response:** ```json [ { "name": "router1", "state": "running", "config": { "address": "10.0.0.1:57400", "username": "admin", "skip-verify": true }, "subscriptions": { "interfaces": { "state": "running" } } } ] ``` ### Get Target (Runtime State) ``` GET /api/v1/targets/{name} ``` Returns a specific target with its runtime state. ### List Target Configurations ``` GET /api/v1/config/targets ``` Returns target configurations (without runtime state). ### Get Target Configuration ``` GET /api/v1/config/targets/{name} ``` ### Create/Update Target ``` POST /api/v1/config/targets ``` **Request Body:** ```json { "name": "router1", "address": "10.0.0.1:57400", "username": "admin", "password": "admin", "skip-verify": true, "subscriptions": ["interfaces"], "outputs": ["prometheus"] } ``` ### Delete Target ``` DELETE /api/v1/config/targets/{name} ``` ### Update Target Subscriptions ``` PATCH /api/v1/config/targets/{name}/subscriptions ``` **Request Body:** ```json { "subscriptions": ["interfaces", "bgp"] } ``` ### Update Target Outputs ``` PATCH /api/v1/config/targets/{name}/outputs ``` **Request Body:** ```json { "outputs": ["prometheus", "influxdb"] } ``` ### Update Target State ``` POST /api/v1/config/targets/{name}/state POST /api/v1/targets/{name}/state/{state} ``` Enable or disable a target. State can be `enabled` or `disabled`. --- ## Subscriptions ### List Subscriptions (Runtime State) ``` GET /api/v1/subscriptions ``` Returns subscriptions with their runtime state (which targets are using them). **Response:** ```json [ { "name": "interfaces", "config": { "paths": ["/interfaces/interface/state/counters"], "mode": "stream", "stream-mode": "sample", "sample-interval": "10s" }, "targets": { "router1": { "state": "running" } } } ] ``` ### Get Subscription (Runtime State) ``` GET /api/v1/subscriptions/{name} ``` ### List Subscription Configurations ``` GET /api/v1/config/subscriptions ``` ### Get Subscription Configuration ``` GET /api/v1/config/subscriptions/{name} ``` ### Create/Update Subscription ``` POST /api/v1/config/subscriptions ``` **Request Body:** ```json { "name": "interfaces", "paths": ["/interfaces/interface/state/counters"], "mode": "stream", "stream-mode": "sample", "sample-interval": "10s", "encoding": "json", "outputs": ["prometheus"] } ``` ### Delete Subscription ``` DELETE /api/v1/config/subscriptions/{name} ``` --- ## Outputs ### List Output Configurations ``` GET /api/v1/config/outputs ``` **Response:** ```json { "prometheus": { "type": "prometheus", "listen": ":9804", "path": "/metrics" } } ``` ### Get Output Configuration ``` GET /api/v1/config/outputs/{name} ``` ### Create/Update Output ``` POST /api/v1/config/outputs ``` **Request Body:** ```json { "name": "prometheus", "type": "prometheus", "listen": ":9804", "path": "/metrics", "event-processors": ["trim-prefixes"] } ``` ### Delete Output ``` DELETE /api/v1/config/outputs/{name} ``` ### Update Output Processors ``` PATCH /api/v1/config/outputs/{name}/processors ``` **Request Body:** ```json { "event-processors": ["processor1", "processor2"] } ``` **Note:** Currently returns `501 Not Implemented`. --- ## Inputs ### List Input Configurations ``` GET /api/v1/config/inputs ``` **Response:** ```json { "nats-input": { "type": "nats", "address": "nats://localhost:4222", "subject": "telemetry.>" } } ``` ### Get Input Configuration ``` GET /api/v1/config/inputs/{name} ``` ### Create/Update Input ``` POST /api/v1/config/inputs ``` **Request Body:** ```json { "name": "nats-input", "type": "nats", "address": "nats://localhost:4222", "subject": "telemetry.>", "outputs": ["prometheus"], "event-processors": ["add-tags"] } ``` ### Delete Input ``` DELETE /api/v1/config/inputs/{name} ``` ### Update Input Processors ``` PATCH /api/v1/config/inputs/{name}/processors ``` **Note:** Currently returns `501 Not Implemented`. ### Update Input Outputs ``` PATCH /api/v1/config/inputs/{name}/outputs ``` **Note:** Currently returns `501 Not Implemented`. --- ## Processors ### List Processor Configurations ``` GET /api/v1/config/processors ``` **Response:** ```json [ { "name": "trim-prefixes", "type": "event-strings", "config": { "value-names": [".*"], "transforms": [...] } } ] ``` ### Get Processor Configuration ``` GET /api/v1/config/processors/{name} ``` ### Create/Update Processor ``` POST /api/v1/config/processors ``` ### Delete Processor ``` DELETE /api/v1/config/processors/{name} ``` --- ## Tunnel Target Matches ### List Tunnel Target Matches ``` GET /api/v1/config/tunnel-target-matches ``` ### Get Tunnel Target Match ``` GET /api/v1/config/tunnel-target-matches/{name} ``` ### Create/Update Tunnel Target Match ``` POST /api/v1/config/tunnel-target-matches ``` **Request Body:** ```json { "name": "srl-devices", "target-type": "srlinux", "subscriptions": ["interfaces"], "outputs": ["prometheus"] } ``` ### Delete Tunnel Target Match ``` DELETE /api/v1/config/tunnel-target-matches/{name} ``` --- ## Cluster Endpoints ### Get Cluster Status ``` GET /api/v1/cluster ``` Returns the current cluster status including membership and target distribution. ### Get Leader ``` GET /api/v1/cluster/leader ``` Returns information about the current cluster leader. ### Release Leadership ``` DELETE /api/v1/cluster/leader ``` Forces the current leader to release leadership (triggers new election). ### Get Members ``` GET /api/v1/cluster/members ``` Returns list of cluster members with their status. ### Drain Instance ``` POST /api/v1/cluster/members/{id}/drain ``` Drains all targets from a specific instance (moves them to other instances). ### Rebalance ``` POST /api/v1/cluster/rebalance ``` Triggers a rebalance of targets across cluster members. ### Move Target ``` POST /api/v1/cluster/move ``` Moves a specific target to a different instance. **Request Body:** ```json { "target": "router1", "instance": "collector-2" } ``` --- ## Assignments ### List Assignments ``` GET /api/v1/assignments ``` Returns current target-to-instance assignments. ### Get Assignment ``` GET /api/v1/assignments/{target} ``` ### Create Assignment ``` POST /api/v1/assignments ``` Manually assign a target to an instance. ### Delete Assignment ``` DELETE /api/v1/assignments/{target} ``` --- ## Metrics ``` GET /metrics ``` Returns Prometheus metrics for the collector (if `enable-metrics: true` in api-server config). --- ## Examples ### Using curl ```bash # List all targets curl http://localhost:7890/api/v1/targets # Create a target curl -X POST http://localhost:7890/api/v1/config/targets \ -H "Content-Type: application/json" \ -d '{ "name": "router1", "address": "10.0.0.1:57400", "username": "admin", "password": "admin", "skip-verify": true, "subscriptions": ["interfaces"] }' # Delete a target curl -X DELETE http://localhost:7890/api/v1/config/targets/router1 # Apply full configuration curl -X POST http://localhost:7890/api/v1/config/apply \ -H "Content-Type: application/json" \ -d @config.json # Apply gzipped configuration curl -X POST http://localhost:7890/api/v1/config/apply \ -H "Content-Type: application/json" \ -H "Content-Encoding: gzip" \ --data-binary @config.json.gz ``` ### Using gnmic CLI The collector subcommands use the same API endpoints: ```bash # Uses GET /api/v1/targets gnmic --config collector.yaml collect targets list # Uses GET /api/v1/targets/{name} gnmic --config collector.yaml collect targets get --name router1 # Uses POST /api/v1/config/targets gnmic --config collector.yaml collect targets set --input target.yaml # Uses DELETE /api/v1/config/targets/{name} gnmic --config collector.yaml collect targets delete --name router1 ``` ================================================ FILE: docs/user_guide/collector/collector_configuration.md ================================================ # Collector Configuration This page describes the configuration options specific to the collector mode. For general configuration options (targets, subscriptions, outputs, inputs, processors), refer to their respective documentation pages. ## API Server The API server is required for the collector to accept configuration changes and serve status information. ```yaml api-server: # string, address to listen on in the form "host:port" # the host part can be omitted to listen on all interfaces address: :7890 # duration, request timeout # split equally between read and write timeouts timeout: 10s # TLS configuration for secure API access tls: # string, path to CA certificate file # used to verify client certificates ca-file: # string, path to server certificate file cert-file: # string, path to server private key file key-file: # string, client authentication mode # one of: "", "request", "require", "verify-if-given", "require-verify" # # - "": no client certificate requested # - "request": request certificate, don't require it, don't verify # - "require": require certificate, don't verify # - "verify-if-given": request certificate, verify if provided # - "require-verify": require and verify certificate # # defaults to "" if no ca-file, "require-verify" if ca-file is set client-auth: "" # boolean, enable Prometheus metrics endpoint at /metrics enable-metrics: false # boolean, enable debug logging for API requests debug: false ``` ## Clustering Clustering enables multiple collector instances to work together for high availability and load distribution. ```yaml clustering: # string, cluster name # instances with the same cluster name form a cluster # used in leader lock key and target lock keys # defaults to "default-cluster" cluster-name: default-cluster # string, unique instance name within the cluster # used as value in target locks and leader lock # defaults to "gnmic-$UUID" if not set instance-name: "" # string, service address to register with the locker (e.g., Consul) # defaults to the address part of api-server address service-address: "" # duration, how long to watch for service changes (Consul blocking query) # defaults to 60s services-watch-timer: 60s # duration, interval between target distribution checks by the leader # defaults to 20s targets-watch-timer: 20s # duration, max time to wait for an instance to lock an assigned target # if exceeded, leader reassigns the target to another instance # defaults to 10s target-assignment-timeout: 10s # duration, time to wait after becoming leader before distributing targets # allows other instances to register their API services # defaults to 5s leader-wait-timer: 5s # tags used for target placement decisions # targets with matching tags are preferentially assigned to this instance tags: [] # locker configuration (required for clustering) locker: # string, locker type type: consul # string, locker server address address: localhost:8500 # string, datacenter name (Consul-specific) datacenter: dc1 # string, username for HTTP basic auth username: # string, password for HTTP basic auth password: # string, ACL token token: # duration, session TTL session-ttl: 10s # duration, delay before lock can be acquired after release delay: 5s # duration, time between lock retry attempts retry-timer: 2s # boolean, enable debug logging debug: false ``` ## Tunnel Server The tunnel server accepts connections from gNMI tunnel targets. ```yaml tunnel-server: # string, address to listen on address: :57401 # TLS configuration tls: ca-file: cert-file: key-file: client-auth: "" # boolean, enable debug logging debug: false ``` ## Tunnel Target Matches Define rules for handling tunnel target connections. ```yaml tunnel-target-matches: # match rule name match-all: # string, target id to match (from tunnel target Register RPC) id: "*" # string, target type to match, typically GNOI_GNMI (from tunnel target Register RPC) type: "GNMMI_GNOI" # list of subscription names to apply subscriptions: - interfaces - system # list of output names to send data to outputs: - prometheus ``` Note that tunnel-target-matches are not processed in any specific order. It's adviced to make sure there is no overlap between the rules `type` and `id`. ## Complete Example ```yaml # API server (required) api-server: address: :7890 timeout: 10s enable-metrics: true # Clustering (optional, for HA) clustering: cluster-name: production-cluster instance-name: collector-1 locker: type: consul address: consul.service.consul:8500 session-ttl: 10s # gNMI server gnmi-server: address: :57400 skip-verify: true cache: type: oc expiration: 60s # Tunnel server tunnel-server: address: :57401 # Tunnel target matches tunnel-target-matches: srl-devices: id: router1 type: "GNMI_GNOI" subscriptions: - interfaces outputs: - prometheus # Targets targets: spine1: address: 10.0.0.1:57400 username: admin password: admin skip-verify: true subscriptions: - interfaces - bgp outputs: - prometheus # Subscriptions subscriptions: interfaces: paths: - /interfaces/interface/state/counters mode: stream stream-mode: sample sample-interval: 10s bgp: paths: - /network-instances/network-instance/protocols/protocol/bgp mode: stream stream-mode: on-change # Outputs outputs: prometheus: type: prometheus listen: :9804 path: /metrics event-processors: - trim-prefixes # Processors processors: trim-prefixes: event-strings: value-names: - ".*" transforms: - trim-prefix: apply-on: name prefix: /interfaces/interface/state/ ``` ================================================ FILE: docs/user_guide/collector/collector_intro.md ================================================ # Collector Mode ## Introduction The Collector mode (`gnmic collect --config `) is ideal for a long-running telemetry collection service. While the `subscribe` command is designed for interactive use and ad-hoc data collection, the `collect` command is optimized for continuous operation with dynamic configuration capabilities. ## Dynamic Configuration Unlike gNMIc running with the subscribe command, the collector allows runtime modifications without restarts. You can add, update or remove **Targets**, **Susbcriptions**, **Outputs**, **Processors** and **Inputs**. All the changes are applied at runtime. All configuration changes are made via a REST API. ## Clustering Multiple collector instances can form a cluster just like gNMIc subscribe. The cluster uses a distributed locker, such as **Consul**, for: - Leader election - Target assignment coordination - Instance membership tracking ## Tunnel Target Support The collector supports gRPC tunnel, it will accept connections from gNMI tunnel targets. The tunnel target configutation is done using tunnel-target-matches. ## Comparison with Subscribe Command | Feature | `subscribe` Command | `collect` Command | |---------|---------------------|-------------------| | Configuration | Static (file/flags) | Dynamic (both file and REST API) | | Target management | Fixed at startup or using loaders | startup file, loaders or REST API | | Subscription management | Fixed at startup, can be modified using the REST API but requires a target restart to get applied | Add/update/remove at runtime using REST API | | Output management | Fixed at startup | Add/update/remove at runtime using REST API | | Tunnel targets | Fixed at startup | dynamic using target tunnel matching rules | ## Getting Started 1. Create a configuration file with at minimum the `api-server` section 2. Start the collector: `gnmic --config collector.yaml collect` 3. Use the REST API or CLI subcommands to manage configuration See [Collector Configuration](./collector_configuration.md) for detailed configuration options and [Collector REST API](./collector_api.md) for API reference. ================================================ FILE: docs/user_guide/configuration_env.md ================================================ `gnmic` can be configured using environment variables, it will read the environment variables starting with `GNMIC_`. The Env variable names are inline with the flag names as well as the configuration hierarchy. For e.g to set the gNMI username, the env variable `GNMIC_USERNAME` should be set. ### Constructing environment variables names #### Flags to environment variables mapping Global flags to env variable name mapping: | **Flag name** | **ENV variable name** | | -------------------- | ------------------------ | | --address | GNMIC_ADDRESS | | --encoding | GNMIC_ENCODING | | --format | GNMIC_FORMAT | | --insecure | GNMIC_INSECURE | | --log | GNMIC_LOG | | --log-file | GNMIC_LOG_FILE | | --no-prefix | GNMIC_NO_PREFIX | | --password | GNMIC_PASSWORD | | --prometheus-address | GNMIC_PROMETHEUS_ADDRESS | | --proxy-from-env | GNMIC_PROXY_FROM_ENV | | --retry | GNMIC_RETRY | | --skip-verify | GNMIC_SKIP_VERIFY | | --timeout | GNMIC_TIMEOUT | | --tls-ca | GNMIC_TLS_CA | | --tls-cert | GNMIC_TLS_CERT | | --tls-key | GNMIC_TLS_KEY | | --tls-max-version | GNMIC_TLS_MAX_VERSION | | --tls-min-version | GNMIC_TLS_MIN_VERSION | | --tls-version | GNMIC_TLS_VERSION | | --log-tls-secret | GNMIC_LOG_TLS_SECRET | | --username | GNMIC_USERNAME | | --cluster-name | GNMIC_CLUSTER_NAME | | --instance-name | GNMIC_INSTANCE_NAME | | --proto-file | GNMIC_PROTO_FILE | | --proto-dir | GNMIC_PROTO_DIR | | --token | GNMIC_TOKEN | #### Configuration file to environment variables mapping For configuration items that do not have a corresponding flag, the env variable will be constructed from the path elements to the variable name joined with a `_`. For e.g to set the clustering locker address, as in the yaml blob below: ```yaml clustering: locker: address: ``` the env variable `GNMIC_CLUSTERING_LOCKER_ADDRESS` should be set !!! note - Configuration items of type list cannot be set using env vars. - Intermediate configuration keys should not contain `_` or `-`. Example: ```yaml outputs: output1: # <-- should not contain `_` or `-` type: prometheus listen: :9804 ``` Is equivalent to: `GNMIC_OUTPUTS_OUTPUT1_TYPE=prometheus` `GNMIC_OUTPUTS_OUTPUT1_LISTEN=:9804` ================================================ FILE: docs/user_guide/configuration_file.md ================================================ `gnmic` configuration by means of the command line flags is both consistent and reliable. But sometimes its not the best way forward. With lots of configuration options that `gnmic` supports it might get tedious to pass them all via CLI flags. In cases like that the file-based configuration comes handy. With a configuration file a user can specify all the command line flags by means of a single file. `gnmic` will read this file and retrieve the configuration options from it. ### What options can be in a file? Configuration file allows a user to specify everything that can be supplied over the CLI and more. #### Global flags All of the [global](#global-flags) flags can be put in a conf file. Consider the following example of a typical configuration file in YAML format: ```yaml # gNMI target address; CLI flag `--address` address: "10.0.0.1:57400" # gNMI target user name; CLI flag `--username` username: admin # gNMI target user password; CLI flag `--password` password: NokiaSrl1! # connection mode; CLI flag `--insecure` insecure: true # log file location; CLI flag `--log-file` log-file: /tmp/gnmic.log ``` With such a file located at a default path the gNMI requests can be made in a very short and concise form: ```bash # configuration file is read by its default path gnmi capabilities # cfg file has all the global options set, so only the local flags are needed gnmi get --path /configure/system/name ``` #### Local flags Local flags have the scope of the command where they have been defined. Local flags can be put in a configuration file as well. To avoid flags names overlap between the different commands a command name should prepend the flag name - `-`. So, for example, we can provide the [`path`](../cmd/get.md#path) flag of a [`get`](../cmd/get.md) command in the file by adding the `get-` prefix to the local flag name: ```yaml address: "router.lab:57400" username: admin password: NokiaSrl1! insecure: true get-path: /configure/system/name # `get` command local flag ``` Another example: the [`update-path`](../cmd/set.md#1-in-line-update-implicit-type) flag of a [`set`](../cmd/set.md) will be `set-update-path` in the configuration file. #### Targets It is possible to specify multiple targets with different configurations (credentials, timeout,...). This is described in [Multiple targets](targets/targets.md) documentation article. #### Subscriptions It is possible to specify multiple subscriptions and associate them with different targets in a flexible way. This configuration option is described in [Multiple subscriptions](subscriptions.md) documentation article. #### Outputs The other mode `gnmic` supports (in contrast to CLI) is running as a daemon and exporting the data received from gNMI subscriptions to [multiple outputs](outputs/output_intro.md) like stan/nats, kafka, file, prometheus, influxdb, etc... #### Inputs `gnmic` supports reading gNMI data from a set of [inputs](inputs/input_intro.md) and export the data to any of the configured outputs. This is used when building data pipelines with `gnmic` ### Repeated flags If a flag can appear more than once on the CLI, it can be represented as a list in the file. For example one can set multiple paths for get/set/subscribe operations. In the following example we define multiple paths for the [`get`](../cmd/get.md) command to operate on: ```yaml address: "router.lab:57400" username: admin password: NokiaSrl1! insecure: true get-path: - /configure/system/name - /state/system/version ``` ### Options preference Configuration passed via CLI flags and Env variables take precedence over the file config. ### Environment variables in file Environment variables can be used in the configuration file and will be expanded at the time the configuration is read. ```yaml outputs: output1: type: nats address: ${NATS_IP}:4222 ``` ================================================ FILE: docs/user_guide/configuration_flags.md ================================================ `gnmic` supports a set of global flags, applicable to all sub commands, as well as local flags which are specific to each sub command. - [Global flags](../global_flags.md) - Local flags: - [Capabilities](../cmd/capabilities.md) - [Get](../cmd/get.md) - [Set](../cmd/set.md) - [Subscribe](../cmd/subscribe) - [Prompt](../cmd/prompt.md) - [Path](../cmd/path.md) - [Listen](../cmd/listen.md) ================================================ FILE: docs/user_guide/configuration_intro.md ================================================ `gnmic` reads configuration from three different sources, [Global and local flags](configuration_flags.md), [environment variables](configuration_env.md) and [local system file](configuration_file.md). The different sources follow a precedence order where a configuration variable from a source take precedence over the next one in the below list: - global and local flags - Environment variables - configuration file ## Flags See [here](configuration_flags.md) for a complete list of the supported global and local flags. ## Environment variables `gnmic` can also be configured using environment variables, it will read the environment variables starting with `GNMIC_`. The Env variable names are inline with the flag names as well as the configuration hierarchy. See [here](configuration_env.md) for more details on environment variables. ## File configuration Configuration file that `gnmic` reads must be in one of the following formats: JSON, YAML, TOML, HCL or Properties. By default, `gnmic` will search for a file named `.gnmic.[yml/yaml, toml, json]` in the following locations and will use the first file that exists: * `$PWD` * `$HOME` * `$XDG_CONFIG_HOME` * `$XDG_CONFIG_HOME/gnmic` The default path can be overridden with [`--config`](../global_flags.md#config) flag. ```bash # config file default path is : # $PWD/.gnmic.[yml, toml, json], or # $HOME/.gnmic.[yml, toml, json], or # $XDG_CONFIG_HOME/.gnmic.[yml, toml, json], or # $XDG_CONFIG_HOME/gnmic/.gnmic.[yml, toml, json] gnmic capabilities # read `cfg.yml` file located in the current directory gnmic --config ./cfg.yml capabilities ``` If the file referenced by `--config` flag is not present, the default path won't be tried. Example of the `gnmic` config files are provided in the following formats: [YAML](https://github.com/openconfig/gnmic/blob/main/config.yaml), [JSON](https://github.com/openconfig/gnmic/blob/main/config.json), [TOML](https://github.com/openconfig/gnmic/blob/main/config.toml). ================================================ FILE: docs/user_guide/event_processors/event_add_tag.md ================================================ The `event-add-tag` processor adds a set of tags to an event message if one of the configured regular expressions in the values, value names, tags or tag names sections matches. It is possible to overwrite a tag if it's name already exists. ```yaml processors: # processor name sample-processor: # processor type event-add-tag: # jq expression, if evaluated to true, the tags are added condition: # list of regular expressions to be matched against the tags names, if matched, the tags are added tag-names: # list of regular expressions to be matched against the tags values, if matched, the tags are added tags: # list of regular expressions to be matched against the values names, if matched, the tags are added value-names: # list of regular expressions to be matched against the values, if matched, the tags are added values: # list of regular expressions to be matched against the deleted paths, if matched, the tags are added deletes: # boolean, if true tags are over-written with the added ones if they already exist. overwrite: # map of tags to be added add: tag_name: tag_value ``` ### Examples ```yaml processors: # processor name sample-processor: # processor type event-add-tag: value-names: - "." add: tag_name: tag_value ``` === "Event format before" ```json { "name": "sub1", "timestamp": 1607678293684962443, "tags": { "interface_name": "mgmt0", "source": "172.20.20.5:57400" }, "values": { "Carrier_Transitions": 1, "In_Broadcast_Packets": 448, "In_Error_Packets": 0, "In_Fcs_Error_Packets": 0, "In_Multicast_Packets": 47578, "In_Octets": 15557349, "In_Unicast_Packets": 6482, "Out_Broadcast_Packets": 110, "Out_Error_Packets": 0, "Out_Multicast_Packets": 10, "Out_Octets": 464766 } } ``` === "Event format after" ```json { "name": "sub1", "timestamp": 1607678293684962443, "tags": { "interface_name": "mgmt0", "source": "172.20.20.5:57400", "tag_name": "tag_value" }, "values": { "Carrier_Transitions": 1, "In_Broadcast_Packets": 448, "In_Error_Packets": 0, "In_Fcs_Error_Packets": 0, "In_Multicast_Packets": 47578, "In_Octets": 15557349, "In_Unicast_Packets": 6482, "Out_Broadcast_Packets": 110, "Out_Error_Packets": 0, "Out_Multicast_Packets": 10, "Out_Octets": 464766 } } ``` ================================================ FILE: docs/user_guide/event_processors/event_allow.md ================================================ The `event-allow` processor allows only messages matching the configured `condition` or one of the regular expressions under `tags`, `tag-names`, `values` or `value-names`. Non matching messages are dropped. ```yaml processors: # processor name sample-processor: # processor type event-allow: # jq expression, if evaluated to true, the message is allowed condition: # list of regular expressions to be matched against the tags names, # if matched, the message is allowed tag-names: # list of regular expressions to be matched against the tags values, # if matched, the message is allowed tags: # list of regular expressions to be matched against the values names, # if matched, the message is allowed value-names: # list of regular expressions to be matched against the values, # if matched, the message is allowed values: ``` ### Examples ```yaml processors: # processor name allow-processor: # processor type event-allow: condition: ".tags.interface_name == 1/1/1" ``` === "Event format before" ```json [ { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } }, { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "1/1/1", "source": "172.23.23.3:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ] ``` === "Event format after" ```json [ { }, { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "1/1/1", "source": "172.23.23.3:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ] ``` ================================================ FILE: docs/user_guide/event_processors/event_combine.md ================================================ The `event-combine` processor combines multiple processors together. This allows to declare processors once and reuse them to build more complex processors. ### Configuration ```yaml processors: # processor name pipeline1: # processor type event-combine: # list of regex to be matched with the values names processors: # The "sub" processor execution condition. A jq expression. - condition: # the processor name, should be declared in the # `processors` section. name: # enable extra logging debug: false ``` ### Conditional Execution of Subprocessors The workflow for processing event messages can include multiple subprocessors, each potentially governed by its own condition. These conditions are defined using the jq query language, enabling dynamic and precise control over when each subprocessor should be executed. ### Defining Conditions for Subprocessors When configuring your subprocessors, you have the option to attach a jq-based condition to each one. The specified condition acts as a gatekeeper, determining whether the corresponding subprocessor should be activated for a particular event message. ### Condition Evaluation Process For a subprocessor to run, the following criteria must be met: Condition Presence: If a condition is specified for the subprocessor, it must be evaluated. Condition Outcome: The result of the jq condition evaluation must be true. Combined Conditions: In scenarios where both the main processor and the subprocessor have associated conditions, both conditions must independently evaluate to true for the subprocessor to be triggered. Only when all relevant conditions are met will the subprocessor execute its designated operations on the event message. It is important to note that the absence of a condition is equivalent to a condition that always evaluates to true. Thus, if no condition is provided for a subprocessor, it will execute as long as the main processor's condition (if any) is met. By using conditional execution, you can build sophisticated and efficient event message processing workflows that react dynamically to the content of the messages. ### Examples In the below example, we define 3 regular processors and 2 `event-combine` processors. - `proc1`: Allows event message that have tag `"interface_name = ethernet-1/1` - `proc2`: Renames values names to their path base. e.g: `interface/statistics/out-octets` --> `out-octets` - `proc3`: Converts any values with a name ending with `octets` to `int`. - `pipeline1`: Combines `proc1`, `proc2` and `proc3`, applying `proc2` only to subscription `sub1` - `pipeline2`: Combines `proc2` and `proc3`, applying `proc2` only to subscription `sub2` The 2 combine processors can be linked with different outputs. ```yaml processors: proc1: event-allow: condition: '.tags.interface_name == "ethernet-1/1"' proc2: event-strings: value-names: - ".*" transforms: - path-base: apply-on: "name" proc3: event-convert: value-names: - ".*octets$" type: int pipeline1: event-combine: processors: - name: proc1 - condition: '.tags["subscription-name"] == "sub1"' name: proc2 - name: proc3 pipeline2: event-combine: processors: - condition: '.tags["subscription-name"] == "sub2"' name: proc2 - name: proc3 ``` ================================================ FILE: docs/user_guide/event_processors/event_convert.md ================================================ The `event-convert` processor converts the values matching one of the regular expressions to a specific type: `uint`, `int`, `string`, `float` or `bool` ### Examples ```yaml processors: # processor name convert-int-processor: # processor type event-convert: # list of regex to be matched with the values names value-names: - ".*octets$" # the desired value type, one of: int, uint, string, float, bool type: int ``` === "Event format before" ```json { "name": "default", "timestamp": 1607290633806716620, "tags": { "port_port-id": "A/1", "source": "172.17.0.100:57400", "subscription-name": "default" }, "values": { "/state/port/ethernet/statistics/in-octets": "7753940" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607290633806716620, "tags": { "port_port-id": "A/1", "source": "172.17.0.100:57400", "subscription-name": "default" }, "values": { "/state/port/ethernet/statistics/in-octets": 7753940 } } ``` ================================================ FILE: docs/user_guide/event_processors/event_data_convert.md ================================================ The `event-data-convert` processor converts data values matching one of the regular expressions from/to a specific data unit: | Symbol | Unit | Symbol | Unit | Symbol | Unit | | ------ | ------- | ------ | --------- | --------| -------- | | `b` | Bit | `B` | Byte | `KiB` | KibiByte | | `kb` | kiloBit | `KB` | KiloByte | `MiB` | MebiByte | | `mb` | MegaBit | `MB` | MegaByte | `GiB` | GibiByte | | `gb` | GigaBit | `GB` | GigaByte | `TiB` | TebiByte | | `tb` | TeraBit | `TB` | TeraByte | `EiB` | ExbiByte | | `eb` | ExaBit | `EB` | ExaByte | `ZiB` | ZebiByte | | | | `ZB` | ZetaByte | `YiB` | YobiByte | | | | `YB` | YottaByte | | | The source values can be of any numeric type including a string with or without a unit, e.g: `2.3`, `1KB` or `1.1 TB`. The unit of the original value can be derived as `Byte` from its name if it ends with `-bytes`, `-octets`, `_bytes` or `_octets`. ### Examples #### simple conversion The below processor will convert any value with a name ending in `-octets` from `Byte` to `KiloByte`. ```yaml processors: # processor name convert-data-unit: # processor type event-data-convert: # list of regex to be matched with the values names value-names: - ".*-octets$" # the source value unit, defaults to B (Byte) from: B # the desired value unit, defaults to B (Byte) to: KB # keep the original value, # a new value name will be added with the converted value, # the new value name will be the original name with _$to as suffix # if no regex renaming is defined using `old` and `new` keep: false # old, a regex to be used to rename the converted value old: # new, the replacement string new: # debug, enables this processor logging debug: false ``` === "Event format before" ```json { "name": "default", "timestamp": 1607290633806716620, "tags": { "port_port-id": "A/1", "source": "172.17.0.100:57400", "subscription-name": "default" }, "values": { "/state/port/ethernet/statistics/in-octets": "2048" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607290633806716620, "tags": { "port_port-id": "A/1", "source": "172.17.0.100:57400", "subscription-name": "default" }, "values": { "/state/port/ethernet/statistics/in-octets": 2 } } ``` #### conversion with renaming The below data convert processor converts any value with a name ending in `-octets` from Byte to Kilobyte. It will retain the original value while renaming the new value name by replacing `-octets` with `-kilobytes`. ```yaml processors: # processor name convert-data-unit: # processor type event-data-convert: # list of regex to be matched with the values names value-names: - ".*-octets$" # the source value unit, defaults to B (Byte) from: B # the desired value unit, defaults to B (Byte) to: KB # keep the original value, # a new value name will be added with the converted value, # the new value name will be the original name with _$to as suffix # if no regex renaming is defined using `old` and `new` keep: true # old, a regex to be used to rename the converted value old: ^(\S+)-octets$ # new, the replacement string new: ${1}-kilobytes # debug, enables this processor logging debug: false ``` === "Event format before" ```json { "name": "default", "timestamp": 1607290633806716620, "tags": { "port_port-id": "A/1", "source": "172.17.0.100:57400", "subscription-name": "default" }, "values": { "/state/port/ethernet/statistics/in-octets": "2048" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607290633806716620, "tags": { "port_port-id": "A/1", "source": "172.17.0.100:57400", "subscription-name": "default" }, "values": { "/state/port/ethernet/statistics/in-octets": "2048" "/state/port/ethernet/statistics/in-kilobytes": 2 } } ``` ================================================ FILE: docs/user_guide/event_processors/event_date_string.md ================================================ The `event-date-string` processor converts a specific timestamp value (under tags or values) to a string representation. The format and location can be configured. ### Examples ```yaml processors: # processor name convert-timestamp-processor: # processor type event-date-string: # list of regex to be matched with the values names value-names: - "timestamp" # received timestamp unit precision: ms # desired date string format, defaults to RFC3339 format: "2006-01-02T15:04:05Z07:00" # timezone, defaults to the local timezone location: Asia/Taipei ``` ================================================ FILE: docs/user_guide/event_processors/event_delete.md ================================================ The `event-delete` processor deletes all tags or values matching a set of regular expressions from the event message. ### Examples ```yaml processors: # processor name delete-processor: # processor type event-delete: value-names: - ".*multicast.*" - ".*broadcast.*" ``` === "Event format before" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ``` ================================================ FILE: docs/user_guide/event_processors/event_drop.md ================================================ The `event-drop` processor drops the whole message if it matches the configured `condition` or one of the regexes under`tags`, `tag-names`, `values` or `value-names`. ```yaml processors: # processor name sample-processor: # processor type event-drop: # jq expression, if evaluated to true, the message is dropped condition: # list of regular expressions to be matched against the tags names, if matched, the message is dropped tag-names: # list of regular expressions to be matched against the tags values, if matched, the message is dropped tags: # list of regular expressions to be matched against the values names, if matched, the message is dropped value-names: # list of regular expressions to be matched against the values, if matched, the message is dropped values: ``` ### Examples ```yaml processors: # processor name drop-processor: # processor type event-drop: tags: - "172.23.23.2*" ``` === "Event format before" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ``` === "Event format after" ```json { } ``` ================================================ FILE: docs/user_guide/event_processors/event_duration_convert.md ================================================ The `event-duration-convert` processor converts duration written as string to a integer with second precision. The string format supported is a series of digits and a single letter indicating the unit, e.g 1w3d (1 week 3 days) The highest unit is `w` for week and the lowest is `s` for second. Any of the units may or may not be present. ### Examples #### simple conversion ```yaml processors: # processor name convert-uptime: # processor type event-duration-convert: # list of regex to be matched with the values names value-names: - ".*_uptime$" # keep the original value, # a new value name will be added with the converted value, # the new value name will be the original name with _seconds as suffix keep: false # debug, enables this processor logging debug: false ``` === "Event format before" ```json { "name": "default", "timestamp": 1607290633806716620, "tags": { "port_port-id": "A/1", "source": "172.17.0.100:57400", "subscription-name": "default" }, "values": { "connection_uptime": "1w5s" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607290633806716620, "tags": { "port_port-id": "A/1", "source": "172.17.0.100:57400", "subscription-name": "default" }, "values": { "connection_uptime": 604805 } } ``` ================================================ FILE: docs/user_guide/event_processors/event_extract_tags.md ================================================ The `event-extract-tags` processor extracts tags from a value, a value name, a tag name or a tag value using regex named groups. It is possible to overwrite a tag if its name already exists. ```yaml processors: # processor name sample-processor: # processor type event-extract-tags: # list of regular expressions to be used to extract strings to be added as a tag. tag-names: # list of regular expressions to be used to extract strings to be added as a tag. tags: # list of regular expressions to be used to extract strings to be added as a tag. value-names: # list of regular expressions to be used to extract strings to be added as a tag. values: # boolean, if true tags are over-written with the added ones if they already exist. overwrite: # boolean, enable extra logging debug: ``` ### Examples ```yaml processors: # processor name sample-processor: # processor type event-extract-tags: value-names: - /([a-zA-Z0-9-_:]+)/(?P[a-zA-Z0-9-_:]+)/([a-zA-Z0-9-_:]+) ``` === "Event format before" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "group": "statistics", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ``` ================================================ FILE: docs/user_guide/event_processors/event_group_by.md ================================================ The `event-group-by` processor groups values under the same event message based on a list of tag names. This processor is intended to be used together with an output with cached gNMI notifications, like `prometheus` output with `cache: {}`. ### Configuration ```yaml processors: # processor name sample-processor: # processor type event-group-by: # list of strings defining the tags to group by the values under # a single event tags: [] # a boolean, if true only the values from events of the same name # are grouped together according to the list of tags by-name: # boolean debug: false ``` ### Examples #### group by a single tag ```yaml processors: group-by-source: event-group-by: tags: - source ``` === "Event format before" ```json [ { "name": "sub2", "timestamp": 1615284691523204299, "tags": { "neighbor_peer-address": "2002::1:1:1:1", "network-instance_name": "default", "source": "leaf1:57400", "subscription-name": "sub2" }, "values": { "bgp_neighbor_sent_messages_queue_depth": 0, "bgp_neighbor_sent_messages_total_messages": "423", "bgp_neighbor_sent_messages_total_non_updates": "415", "bgp_neighbor_sent_messages_total_updates": "8" } }, { "name": "sub2", "timestamp": 1615284691523204299, "tags": { "neighbor_peer-address": "2002::1:1:1:1", "network-instance_name": "default", "source": "leaf1:57400", "subscription-name": "sub2" }, "values": { "bgp_neighbor_received_messages_malformed_updates": "0", "bgp_neighbor_received_messages_queue_depth": 0, "bgp_neighbor_received_messages_total_messages": "424", "bgp_neighbor_received_messages_total_non_updates": "418", "bgp_neighbor_received_messages_total_updates": "6" } } ] ``` === "Event format after" ```json [ { "name": "sub2", "timestamp": 1615284691523204299, "tags": { "neighbor_peer-address": "2002::1:1:1:1", "network-instance_name": "default", "source": "leaf1:57400", "subscription-name": "sub2" }, "values": { "bgp_neighbor_sent_messages_queue_depth": 0, "bgp_neighbor_sent_messages_total_messages": "423", "bgp_neighbor_sent_messages_total_non_updates": "415", "bgp_neighbor_sent_messages_total_updates": "8", "bgp_neighbor_received_messages_malformed_updates": "0", "bgp_neighbor_received_messages_queue_depth": 0, "bgp_neighbor_received_messages_total_messages": "424", "bgp_neighbor_received_messages_total_non_updates": "418", "bgp_neighbor_received_messages_total_updates": "6" } } ] ``` #### group by multiple tags ```yaml processors: group-by-queue-id: event-group-by: tags: - source - interface_name - multicast-queue_queue-id ``` === "Event Format Before" ```json [ { "name": "sub1", "timestamp": 1627997491187771616, "tags": { "interface_name": "ethernet-1/1", "multicast-queue_queue-id": "5", "source": "clab-ndk-srl1:57400", "subscription-name": "sub1", }, "values": { "/interface/qos/output/multicast-queue/queue-depth/maximum-burst-size": "0" } }, { "name": "sub1", "timestamp": 1627997491187771616, "tags": { "interface_name": "ethernet-1/1", "multicast-queue_queue-id": "5", "source": "clab-ndk-srl1:57400", "subscription-name": "sub1", }, "values": { "/interface/qos/output/multicast-queue/scheduling/peak-rate-bps": "0" } } ] ``` === "Event Format After" ```json [ { "name": "sub1", "timestamp": 1627997491187771616, "tags": { "interface_name": "ethernet-1/1", "multicast-queue_queue-id": "5", "source": "clab-ndk-srl1:57400", "subscription-name": "sub1", }, "values": { "/interface/qos/output/multicast-queue/queue-depth/maximum-burst-size": "0", "/interface/qos/output/multicast-queue/scheduling/peak-rate-bps": "0" } } ] ``` ================================================ FILE: docs/user_guide/event_processors/event_ieeefloat32.md ================================================ The `event-ieeefloat32` processor allows converting binary data received from a router with the type IEEE 32-bit floating point number. ```yaml processors: # processor name sample-processor: # processor type event-ieeefloat32: # jq expression, if evaluated to true, the processor applies based on the field `value-names` condition: # list of regular expressions to be matched against the values names, if matched, the value is converted to a float32. value-names: [] ``` ### Examples ```yaml processors: # processor name sample-processor: # processor type event-ieeefloat32: value-names: - "^components/component/power-supply/state/output-current$" ``` === "Event format before" ```json { "name": "sub1", "timestamp": 1607678293684962443, "tags": { "source": "172.20.20.5:57400" }, "values": { "components/component/power-supply/state/output-current": "QEYAAA==" } } ``` === "Event format after" ```json { "name": "sub1", "timestamp": 1607678293684962443, "tags": { "source": "172.20.20.5:57400", }, "values": { "components/component/power-supply/state/output-current": 3.09375 } } ``` ================================================ FILE: docs/user_guide/event_processors/event_jq.md ================================================ The `event-jq` processor applies a [`jq`](https://stedolan.github.io/jq/) expression on the received event messages. `jq` expressions are a powerful tool that can be used to slice, filter, map, transform JSON object. The `event-jq` processor uses two configuration fields, `condition` and `expression`, both support `jq` expressions. - `condition` (that needs to return a boolean value) determines if the processor is to be applied on the event message. if `false` the message is returned as is. - `expression` is used to transform, filter and/or enrich the messages. It needs to return a JSON object that can be mapped to an array of event messages. The event messages resulting from a single `gNMI` Notification are passed to the jq expression as a JSON array. Some `jq` expression examples: - Select messages with name "sub1" that include a value called "counter1" with value higher than 90 ```yaml expression: .[] | select(.name=="sub1" and .values.counter1 > 90) ``` - Delete values with name "counter1" ```yaml expression: .[] | del(.values.counter1) ``` - Delete values with names "counter1" or "counter2" ```yaml expression: .[] | del(.values.["counter1", "counter2"]) ``` - Delete tags with names "tag1" or "tag2" ```yaml expression: .[] | del(.tags.["tag1", "tag2"]) ``` - Add a tag called "my_new_tag" with value "tag1" ```yaml expression: .[] |= (.tags.my_new_tag = "tag1") ``` - Move a value to tag under a custom key ```yaml expression: .[] |= (.tags.my_new_tag_name = .values.value_name) ``` ### Configuration ```yaml processors: # processor name sample-processor: # processor type event-jq: # condition of application of the processor condition: # jq expression to transform/filter/enrich the message expression: # boolean enabling extra logging debug: ``` ================================================ FILE: docs/user_guide/event_processors/event_merge.md ================================================ The `event-merge` processor merges multiple event messages together based on some criteria. Each [gNMI subscribe Response Update](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L95) in a [gNMI subscribe Response Notification](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L79) is transformed into an [Event Message](intro.md) The `event-merge` processor is used to merge the updates into one event message if it's needed. The default merge strategy is based on the timestamp, the updates with the same timestamp will be merged into the same event message. ```yaml processors: # processor name sample-processor: # processor type event-merge: # if always is set to true, # the updates are merged regardless of the timestamp values always: false debug: false ``` === "Event format before" ```json [ { "name": "sub2", "timestamp": 1615284691523204299, "tags": { "neighbor_peer-address": "2002::1:1:1:1", "network-instance_name": "default", "source": "leaf1:57400", "subscription-name": "sub2" }, "values": { "bgp_neighbor_sent_messages_queue_depth": 0, "bgp_neighbor_sent_messages_total_messages": "423", "bgp_neighbor_sent_messages_total_non_updates": "415", "bgp_neighbor_sent_messages_total_updates": "8" } }, { "name": "sub2", "timestamp": 1615284691523204299, "tags": { "neighbor_peer-address": "2002::1:1:1:1", "network-instance_name": "default", "source": "leaf1:57400", "subscription-name": "sub2" }, "values": { "bgp_neighbor_received_messages_malformed_updates": "0", "bgp_neighbor_received_messages_queue_depth": 0, "bgp_neighbor_received_messages_total_messages": "424", "bgp_neighbor_received_messages_total_non_updates": "418", "bgp_neighbor_received_messages_total_updates": "6" } } ] ``` === "Event format after" ```json [ { "name": "sub2", "timestamp": 1615284691523204299, "tags": { "neighbor_peer-address": "2002::1:1:1:1", "network-instance_name": "default", "source": "leaf1:57400", "subscription-name": "sub2" }, "values": { "bgp_neighbor_sent_messages_queue_depth": 0, "bgp_neighbor_sent_messages_total_messages": "423", "bgp_neighbor_sent_messages_total_non_updates": "415", "bgp_neighbor_sent_messages_total_updates": "8", "bgp_neighbor_received_messages_malformed_updates": "0", "bgp_neighbor_received_messages_queue_depth": 0, "bgp_neighbor_received_messages_total_messages": "424", "bgp_neighbor_received_messages_total_non_updates": "418", "bgp_neighbor_received_messages_total_updates": "6" } } ] ``` ================================================ FILE: docs/user_guide/event_processors/event_override_ts.md ================================================ The `event-override-ts` processor overrides the message timestamp with `time.Now()`. The precision `s`, `ms`, `us` or `ns` (default) can be configured. ### Examples ```yaml processors: # processor name set-timestamp-processor: # processor type event-override-ts: # timestamp precision, s, ms, us, ns (default) precision: ms ``` ================================================ FILE: docs/user_guide/event_processors/event_plugin.md ================================================ The `event-plugin` processor initializes a processor that gNMIc loaded from the configured path under the `plugins:` section. ```yaml plugins: # path to load plugin binaries from. path: /path/to/plugin/bin # glob to match binaries against. glob: "*" # sets a start timeout for plugins. start-timeout: 0s ``` The specific configuration of an `event-plugin` processor varies from one plugin to another. But they are configured just like any other processor i.e under the `processors:` section of the config file and linked to outputs by name reference. The below configuration snippet initializes the `event-add-hostname` processor (a binary stored under `plugins.path`) and links to output `out1`. ```yaml processors: proc1: event-add-hostname: debug: true # the tag name to add with the host hostname as a tag value. hostname-tag-name: "collector-host" # read-interval controls how often the plugin runs the hostname cmd to get the host hostanme # by default it's at most every 1 minute read-interval: 1m outputs: out1: type: file format: event event-processors: - proc1 ``` ### Examples See [here](https://github.com/openconfig/gnmic/tree/main/examples/plugins). ### Writing a plugin processor Currently plugin processor can only be written in Golang. It relies on Hashicorp's [go-plugin](https://github.com/hashicorp/go-plugin) package for discovery and communication with gNMIc's main process. To write your own processor you can use the below skeleton code as a starting point. Can be found [here](https://github.com/openconfig/gnmic/tree/main/examples/plugins/minimal) as well. 1. Choose a name for your processor 2. Add struct fields to decode the processor's config into. 3. Implement your processor's logic under the `Apply` method 4. Optionally, store the `targets`,`actions` and `processors` config maps given to the processor under your processor's struct (`myProcessor`) if they are relevant to your processor's logic. ```go package main import ( "log" "os" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/formatters/event_plugin" "github.com/openconfig/gnmic/pkg/api/types" ) const ( // TODO: Choose a name for your processor processorType = "event-my-processor" ) type myProcessor struct { // TODO: Add your config struct fields here } func (p *myProcessor) Init(cfg interface{}, opts ...formatters.Option) error { // decode the plugin config err := formatters.DecodeConfig(cfg, p) if err != nil { return err } // apply options for _, o := range opts { o(p) } // TODO: Other initialization steps... return nil } func (p *myProcessor) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg { // TODO: The processor's logic is applied here return event } func (p *myProcessor) WithActions(act map[string]map[string]interface{}) { } func (p *myProcessor) WithTargets(tcs map[string]*types.TargetConfig) { } func (p *myProcessor) WithProcessors(procs map[string]map[string]any) { } func (p *myProcessor) WithLogger(l *log.Logger) { } func main() { logger := hclog.New(&hclog.LoggerOptions{ Output: os.Stderr, DisableTime: true, }) logger.Info("starting plugin processor", "name", processorType) // TODO: Create and initialize your processor's struct plug := &myProcessor{} // start it plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "GNMIC_PLUGIN", MagicCookieValue: "gnmic", }, Plugins: map[string]plugin.Plugin{ processorType: &event_plugin.EventProcessorPlugin{Impl: plug}, }, Logger: logger, }) } ``` ================================================ FILE: docs/user_guide/event_processors/event_rate_limit.md ================================================ The `event-rate-limit` processor rate-limits each event with matching tags to the configured amount per-seconds. All the tags for each event is hashed, and if the hash matches a previously seen event, then the timestamp of the event itself is compared to assess if the configured limit has been exceeded. If it has, then this new event is dropped from the pipeline. The cache for comparing timestamp is an LRU cache, with a default size of 1000 that can be increased for bigger deployments. To account for cases where the device will artificially split the event into multiple chunks (with the same timestamp), the rate-limiter will ignore events with exactly the same timestamp. ### Examples ```yaml processors: # processor name rate-limit-100pps: # processor type event-rate-limit: # rate of filtering, in events per seconds per-second: 100 # set the cache size for doing the rate-limiting comparison # default value is 1000 cache-size: 10000 # debug for additionnal logging of dropped events debug: true ``` ================================================ FILE: docs/user_guide/event_processors/event_starlark.md ================================================ ### Intro The `event-starlark` processor applies a [`Starlark`](https://github.com/google/starlark-go/blob/master/doc/spec.md) function on a list of `event` messages before returning them to the processors pipeline and then to the output. `starlark` is a dialect of Python, developed initially for the [Bazel build tool](https://bazel.build/) but found multiple uses as a configuration language embedded in a larger application. There are a few differences between Python and Starlark, programs written in Starlark are supposed to be short-lived and have no external side effects, their main result is structured data or side effects on the host application. As a result, Starlark has no need for classes, exceptions, reflection, concurrency, and other such features of Python. `gNMIc` uses the [Go implementation](https://github.com/google/starlark-go/blob/master/doc/spec.md) of Starlark. A Starlark program running as a `gNMIc` processor should define an `apply` function that takes an arbitrary number of arguments of type `Event` and returns zero or more `Event`s. An [`Event`](intro.md#the-event-format) is the transformed gNMI update message as `gNMIc` processes it. ```python def apply(*events) # events transformed/augmented/filtered here return events ``` ### Configuration ```yaml processors: # processor name sample-processor: # processor type event-starlark: # the source of the starlark program. source: | def apply(*events): # processor logic here return events # path to a file containing the starlark program to run. # Mutually exclusive with `source` parameter. script: # boolean enabling extra logging debug: false ``` ### Writing a Starlark processor To write a starlark processor all that is needed is writing a function called `apply` that will read/modify/delete a list of `Event` messages. Starlark [specification](https://github.com/google/starlark-go/blob/d1966c6b9fcd/doc/spec.md) defines multiple builtin types and functions. `gNMIc` provides additional builtin functions like `Event(name)` which creates a new `Event` message and `copy_event(Event)` which duplicates a given `Event` message. The `Event` message comprises a few fields: - `name`: string - `timestamp`: int64 - `tags`: dictionary of string to string - `values`: dictionary of string to any - `deletes`: list of strings Starlark allows for the dynamic [loading of other modules](https://github.com/bazelbuild/starlark/blob/master/spec.md#load-statements). In the context of gNMIc, the following two modules are available for loading within a starlark program: - **time**: `load("time.star", "time")` loads the time library which provides the following functions to work with the `Event` message timestamp field: - `time.from_timestamp(sec, nsec)`: Converts the given Unix time corresponding to the number of seconds and (optionally) nanoseconds since January 1, 1970 UTC into an object of type Time. For more details, refer to https://pkg.go.dev/time#Unix. - `time.is_valid_timezone(loc)`: Reports whether loc is a valid time zone name. - `time.now()`: Returns the current local time. - `time.parse_duration(d)`: Parses the given duration string. For more details, refer to https://pkg.go.dev/time#ParseDuration. - `time.parse_time(x, format, location)`: Parses the given time string using a specific time format and location. The expected arguments are a time string (mandatory), a time format (optional, set to RFC3339 by default, e.g. "2021-03-22T23:20:50.52Z") and a name of location (optional, set to UTC by default). For more details, refer to https://pkg.go.dev/time#Parse and https://pkg.go.dev/time#ParseInLocation. - `time.time(year, month, day, hour, minute, second, nanosecond, location)`: Returns the Time corresponding to `yyyy-mm-dd hh:mm:ss + nsec nanoseconds` in the appropriate zone for that time in the given location. All the parameters are optional. - **math**: `load("math.star", "math")` loads the math library which provides a set of constants and math-related functions: - `ceil(x)`: Returns the ceiling of x, the smallest integer greater than or equal to x. - `copysign(x, y)`: Returns a value with the magnitude of x and the sign of y. - `fabs(x)`: Returns the absolute value of x as float. - `floor(x)`: Returns the floor of x, the largest integer less than or equal to x. - `mod(x, y)`: Returns the floating-point remainder of x/y. The magnitude of the result is less than y and its sign agrees with that of x. - `pow(x, y)`: Returns x**y, the base-x exponential of y. - `remainder(x, y)`: Returns the IEEE 754 floating-point remainder of x/y. - `round(x)`: Returns the nearest integer, rounding half away from zero. - `exp(x)`: Returns e raised to the power x, where e = 2.718281… is the base of natural logarithms. - `sqrt(x)`: Returns the square root of x. - `acos(x)`: Returns the arc cosine of x, in radians. - `asin(x)`: Returns the arc sine of x, in radians. - `atan(x)`: Returns the arc tangent of x, in radians. - `atan2(y, x)`: Returns atan(y / x), in radians. The result is between -pi and pi. The vector in the plane from the origin to point (x, y) makes this angle with the positive X axis. The point of atan2() is that the signs of both inputs are known to it, so it can compute the correct quadrant for the angle. For example, atan(1) and atan2(1, 1) are both pi/4, but atan2(-1, -1) is -3*pi/4. - `cos(x)`: Returns the cosine of x, in radians. - `hypot(x, y)`: Returns the Euclidean norm, sqrt(x*x + y*y). This is the length of the vector from the origin to point (x, y). - `sin(x)`: Returns the sine of x, in radians. - `tan(x)`: Returns the tangent of x, in radians. - `degrees(x)`: Converts angle x from radians to degrees. - `radians(x)`: Converts angle x from degrees to radians. - `acosh(x)`: Returns the inverse hyperbolic cosine of x. - `asinh(x)`: Returns the inverse hyperbolic sine of x. - `atanh(x)`: Returns the inverse hyperbolic tangent of x. - `cosh(x)`: Returns the hyperbolic cosine of x. - `sinh(x)`: Returns the hyperbolic sine of x. - `tanh(x)`: Returns the hyperbolic tangent of x. - `log(x, base)`: Returns the logarithm of x in the given base, or natural logarithm by default. - `gamma(x)`: Returns the Gamma function of x. ### Examples #### Move a value to a tag ```python def apply(*events): dels = [] for e in events: for k, v in e.values.items(): if k == "val1": e.tags[k] = str(v) dels.append(k) for d in dels: e.values.pop(d) return events ``` #### Rename values ```python val_map = { "val1": "new_val", } def apply(*events): for e in events: for k, v in e.values.items(): if k in val_map: e.values[val_map[k]] = v e.values.pop(k) return events ``` #### Convert strings to integers ```python def apply(*events): for e in events: for k, v in e.values.items(): if v.isdigit(): e.values[k] = int(v) return events ``` #### Set an interface description as a tag This script stores each interface description per target/interface in a cache and adds it to other values as a tag. ```python cache = {} def apply(*events): evs = [] # check if on the event messages contains an interface description # and store in th cache dict for e in events: if e.values.get("/interface/description"): target_if = e.tags["source"] + "_" + e.tags["interface_name"] cache[target_if] = e.values["/interface/description"] # for each event get the 'source' and 'interface_name', check # if a corresponding cache entry exists and set it as a # 'description' tag for e in events: if e.tags.get("source") and e.tags.get("interface_name"): target_if = e.tags["source"] + "_" + e.tags["interface_name"] if cache.get(target_if): e.tags["description"] = cache[target_if] evs.append(e) return evs ``` #### Calculate new values based on the received ones The below script calculates the avg, min, max of a list of values over their last N=10 values ```python cache = {} values_names = [ '/interface/statistics/out-octets', '/interface/statistics/in-octets' ] N=10 def apply(*events): for e in events: for value_name in values_names: v = e.values.get(value_name) # check if v is not None and is a digit to proceed if not v.isdigit(): continue # update cache with the latest value val_key = "_".join([e.tags["source"], e.tags["interface_name"], value_name]) if not cache.get(val_key): # initialize the cache entry if empty cache.update({val_key: []}) if len(cache[val_key]) >= N: # remove the oldest entry if the number of entries reached N cache[val_key] = cache[val_key][1:] # update cache entry cache[val_key].append(int(v)) # get the list of values val_list = cache[val_key] # calculate min, max and avg e.values[value_name+"_min"] = min(val_list) e.values[value_name+"_max"] = max(val_list) e.values[value_name+"_avg"] = avg(val_list) return events def avg(vals): sum = 0 for v in vals: sum = sum + v return sum/len(vals) ``` The below script builds on top of the previous one by adding the rate calculation to the added values. Now the cache contains a timestamp as well as the value. ```python cache = {} values_names=[ '/interface/statistics/out-octets', '/interface/statistics/in-octets' ] N=10 def apply(*events): for e in events: for value_name in values_names: v = e.values.get(value_name) # check if v is not None and is a digit to proceed if not v.isdigit(): continue # update cache with the latest value val_key = "_".join([e.tags["source"], e.tags["interface_name"], value_name]) if not cache.get(val_key): # initialize the cache entry if empty cache.update({val_key: []}) if len(cache[val_key]) >= N: # remove the oldest entry if the number of entries reached N cache[val_key] = cache[val_key][1:] # update cache entry cache[val_key].append((e.timestamp, int(v))) # get the list of values val_list = cache[val_key] # calculate min, max and avg vals = [x[1] for x in val_list] e.values[value_name+"_min"] = min(vals) e.values[value_name+"_max"] = max(vals) e.values[value_name+"_avg"] = avg(vals) if len(val_list) > 1: e.values[value_name+"_rate"] = rate(val_list[-2:]) return events def avg(vals): sum = 0 for v in vals: sum = sum + v return sum/len(vals) def rate(vals): period = (vals[1][0] - vals[0][0]) / 1000000000 change = vals[1][1] - vals[0][1] return change / period ``` #### Ungroup values The below script ungroups values part of the same event message producing an event message per value. ```python def apply(*events): ungrouped_events = [] for e in events: for k, v in e.values.items(): # create a new event without any value new_event = Event(e.name, e.timestamp, e.tags) # add a single value to the new event new_event.values[k] = v # add the new event to the array ungrouped_events.append(new_event) return ungrouped_events ``` ================================================ FILE: docs/user_guide/event_processors/event_strings.md ================================================ The `event-strings` processor exposes a few of Golang strings transformation functions, there functions can be applied to tags, tag names, values or value names. Supported functions: * `strings.Replace` * `strings.TrimPrefix` * `strings.TrimSuffix` * `strings.Title` * `strings.ToLower` * `strings.ToUpper` * `strings.Split` * `filepath.Base` ```yaml processors: # processor name sample-processor: # processor type event-strings: value-names: [] tag-names: [] values: [] tags: [] transforms: # strings function name - replace: apply-on: # apply the transformation on name or value keep: # keep the old value or not if the name changed old: # string to be replaced new: #replacement string of old - trim-prefix: apply-on: # apply the transformation on name or value prefix: # prefix to be trimmed - trim_suffix: apply-on: # apply the transformation on name or value suffix: # suffix to be trimmed - title: apply-on: # apply the transformation on name or value - to-upper: apply-on: # apply the transformation on name or value - to-lower: apply-on: # apply the transformation on name or value - split: apply-on: # apply the transformation on name or value split-on: # character to split on join-with: # character to join with ignore-first: # number of first items to ignore when joining ignore-last: # number of last items to ignore when joining - path-base: apply-on: # apply the transformation on name or value ``` ### Examples #### replace ```yaml processors: # processor name sample-processor: # processor type event-strings: value-names: - ".*" transforms: # strings function name - replace: apply-on: "name" old: "-" new: "_" ``` === "Event format before" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "carrier-transitions": "1", "in-error-packets": "0", "in-fcs-error-packets": "0", "in-octets": "65382630", "in-unicast-packets": "107154", "out-error-packets": "0", "out-octets": "64721394", "out-unicast-packets": "105876" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "carrier_transitions": "1", "in_error_packets": "0", "in_fcs_error_packets": "0", "in_octets": "65382630", "in_unicast_packets": "107154", "out_error_packets": "0", "out_octets": "64721394", "out_unicast_packets": "105876" } } ``` #### trim-prefix ```yaml processors: # processor name sample-processor: # processor type event-strings: value-names: - ".*" transforms: # strings function name - trim-prefix: apply-on: "name" prefix: "/srl_nokia-interfaces:interface/statistics/" ``` === "Event format before" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "carrier-transitions": "1", "in-broadcast-packets": "3797", "in-error-packets": "0", "in-fcs-error-packets": "0", "in-multicast-packets": "288033", "in-octets": "65382630", "in-unicast-packets": "107154", "out-broadcast-packets": "614", "out-error-packets": "0", "out-multicast-packets": "11", "out-octets": "64721394", "out-unicast-packets": "105876" } } ``` #### to-upper ```yaml processors: # processor name sample-processor: # processor type event-strings: tag-names: - "interface_name" - "subscription-name" transforms: # strings function name - to-upper: apply-on: "value" ``` === "Event format before" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "MGMT0", "source": "172.23.23.2:57400", "subscription-name": "DEFAULT" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ``` #### path-base ```yaml processors: # processor name sample-processor: # processor type event-strings: value-names: - ".*" transforms: # strings function name - path-base: apply-on: "name" ``` === "Event format before" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "carrier-transitions": "1", "in-broadcast-packets": "3797", "in-error-packets": "0", "in-fcs-error-packets": "0", "in-multicast-packets": "288033", "in-octets": "65382630", "in-unicast-packets": "107154", "out-broadcast-packets": "614", "out-error-packets": "0", "out-multicast-packets": "11", "out-octets": "64721394", "out-unicast-packets": "105876" } } ``` #### split ```yaml processors: # processor name sample-processor: # processor type event-strings: value-names: - ".*" transforms: # strings function name - split: on: "name" split-on: "/" join-with: "_" ignore-first: 1 ``` === "Event format before" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "statistics_carrier-transitions": "1", "statistics_in-broadcast-packets": "3797", "statistics_in-error-packets": "0", "statistics_in-fcs-error-packets": "0", "statistics_in-multicast-packets": "288033", "statistics_in-octets": "65382630", "statistics_in-unicast-packets": "107154", "statistics_out-broadcast-packets": "614", "statistics_out-error-packets": "0", "statistics_out-multicast-packets": "11", "statistics_out-octets": "64721394", "statistics_out-unicast-packets": "105876" } } ``` #### multiple transforms ```yaml processors: # processor name sample-processor: # processor type event-strings: value-names: - ".*" transforms: # strings function name - path-base: apply-on: "name" - title: apply-on: "name" - replace: apply-on: "name" old: "-" new: "_" ``` === "Event format before" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "3797", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "288033", "/srl_nokia-interfaces:interface/statistics/in-octets": "65382630", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "107154", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "614", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "11", "/srl_nokia-interfaces:interface/statistics/out-octets": "64721394", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "105876" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607291271894072397, "tags": { "interface_name": "mgmt0", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "Carrier_transitions": "1", "In_broadcast_packets": "3797", "In_error_packets": "0", "In_fcs_error_packets": "0", "In_multicast_packets": "288033", "In_octets": "65382630", "In_unicast_packets": "107154", "Out_broadcast_packets": "614", "Out_error_packets": "0", "Out_multicast_packets": "11", "Out_octets": "64721394", "Out_unicast_packets": "105876" } } ``` ================================================ FILE: docs/user_guide/event_processors/event_time_epoch.md ================================================ The event-time-epoch processor is a plugin for gNMIc that converts string-based time values in event messages into epoch timestamps. This is particularly useful when input data includes timestamps in human-readable formats (like RFC3339) and you want to normalize them for downstream systems. # Configuration ```yaml processors: convert-timestamp: event-time-epoch: value-names: - ".*timestamp" - "lastSeen" format: "2006-01-02T15:04:05Z07:00" precision: "ms" debug: true ``` | Field | Type | Description | |---------------|------------|--------------------------------------------------------------------------------------------------| | `value-names` | `[]string` | List of regular expressions to match against the event `values` keys. Only matching keys will be processed. | | `format` | `string` | [Go time layout](https://golang.org/pkg/time/#Time.Format) used to parse the input strings. Defaults to RFC3339 format. | | `precision` | `string` | Desired epoch output precision: `s`, `ms`, `us`, or `ns`. Defaults to nanoseconds. | | `debug` | `bool` | Enables verbose logging to stderr or the provided logger. | === "Event format before" ```json { "name": "default", "timestamp": 1607290633806716620, "tags": { "source": "172.17.0.100:57400", "subscription-name": "default" }, "values": { "system/timestamp": "2025-04-05T10:30:00Z" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607290633806716620, "tags": { "source": "172.17.0.100:57400", "subscription-name": "default" }, "values": { "system.timestamp": 1743849000 } } ``` ================================================ FILE: docs/user_guide/event_processors/event_to_tag.md ================================================ The `event-to-tag` processor moves a value matching one of the regular expressions from the values section to the tags section. It's possible to keep the value under values section after moving it. ### Examples ```yaml processors: # processor name sample-processor: # processor type event-to-tag: value-names: - ".*-state$" ``` === "Event format before" ```json { "name": "default", "timestamp": 1607305284170936330, "tags": { "interface_name": "ethernet-1/1", "source": "172.23.23.2:57400", "subscription-name": "default" }, "values": { "/srl_nokia-interfaces:interface/admin-state": "disable", "/srl_nokia-interfaces:interface/ifindex": 54, "/srl_nokia-interfaces:interface/last-change": "2020-11-20T05:52:21.459Z", "/srl_nokia-interfaces:interface/oper-down-reason": "port-admin-disabled", "/srl_nokia-interfaces:interface/oper-state": "down" } } ``` === "Event format after" ```json { "name": "default", "timestamp": 1607305284170936330, "tags": { "interface_name": "ethernet-1/1", "source": "172.23.23.2:57400", "subscription-name": "default", "/srl_nokia-interfaces:interface/admin-state": "disable", "/srl_nokia-interfaces:interface/oper-state": "down" }, "values": { "/srl_nokia-interfaces:interface/ifindex": 54, "/srl_nokia-interfaces:interface/last-change": "2020-11-20T05:52:21.459Z", "/srl_nokia-interfaces:interface/oper-down-reason": "port-admin-disabled" } } ``` ================================================ FILE: docs/user_guide/event_processors/event_trigger.md ================================================ The `event-trigger` processor takes event messages as input and triggers a list of actions (sequentially) if a configured condition evaluates to `true`. The condition is evaluated using the the Golang implementation of [jq](https://github.com/itchyny/gojq) with the event message as a `json` input. `jq` [tutorial](https://stedolan.github.io/jq/tutorial/) `jq` [manual](https://stedolan.github.io/jq/manual/) `jq` [playground](https://jqplay.org/) Examples of conditions: - The below expression checks if the value named `counter1` has a value higher than 90 ```bash .values["counter1"] > 90 ``` - This expression checks if the event name is `sub1`, that the tag `source` is equal to `r1:57400` ```bash .name == "sub1" and .tags["source"] == "r1:57400" ``` The trigger can be monitored over a configurable window of time (default 1 minute), during which only a certain number of occurrences (default 1) trigger the actions execution. The action types availabe can be found [here](../actions/actions.md) ```yaml processors: # processor name my_trigger_proc: # # processor type event-trigger: # trigger condition condition: '.values["counter1"] > 90' # minimum number of condition occurrences within the configured window # required to trigger the action min-occurrences: 1 # max number of times the action is triggered within the configured window max-occurrences: 1 # window of time during which max-occurrences need to # be reached in order to trigger the action window: 60s # async, bool. default false. # If true the trigger is executed in the background and the triggering # message is passed to the next procesor. Otherwise it blocks until the trigger returns async: false # a dictionary of variables that is passed to the actions # and can be accessed in the actions templates using `.Vars` vars: # path to a file containing variables passed to the actions # the variable in the `vars` field override the ones read from the file. vars-file: # list of actions to be executed actions: - counter_alert ``` ### Examples #### Alerting when a threshold is crossed The below example triggers an HTTP GET to `http://remote-server:p8080/${router_name}` if the value of counter "counter1" crosses 90 twice within 2 minutes. ```yaml processors: my_trigger_proc: event-trigger: condition: '.values["counter1"] > 90' min-occurrences: 1 max-occurrences: 2 window: 120s async: true actions: - alert actions: alert: name: alert type: http method: POST url: http://remote-server:8080/{{ index .Tags "source" }} headers: content-type: application/text timeout: 5s body: '"counter1" crossed threshold, value={{ index .Values "counter1" }}' ``` #### Enabling backup interface The below example shows a trigger that enables a router interface if another interface's operational status changes to "down". ```yaml processors: interface_watch: # event-trigger: debug: true condition: '(.tags.interface_name == "ethernet-1/1" or .tags.interface_name == "ethernet-1/2") and .values["/srl_nokia-interfaces:interface/oper-state"] == "down"' actions: - enable_interface actions: enable_interface: name: my_gnmi_action type: gnmi rpc: set target: '{{ index .Event.Tags "source" }}' paths: - | {{ $interfaceName := "" }} {{ if eq ( index .Event.Tags "interface_name" ) "ethernet-1/1"}} {{$interfaceName = "ethernet-1/2"}} {{ else if eq ( index E.vent.Tags "interface_name" ) "ethernet-1/2"}} {{$interfaceName = "ethernet-1/1"}} {{end}} /interface[name={{$interfaceName}}]/admin-state values: - "enable" encoding: json_ietf debug: true ``` #### Clone a network topology and deploy it using containerlab Using lldp neighbor information it's possible to build a [containerlab](https://containerlab.srlinux.dev) topology using `gnmic` actions. In the below configuration file, an event processor called `clone-topology` is defined. When triggered it runs a series of actions to gather information (chassis type, lldp neighbors, configuration,...) from the defined targets. It then builds a containerlab topology from a defined template and the gathered info, writes it to a file and runs a `clab deploy` command. ```yaml username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf # log: true targets: srl1: srl2: srl3: processors: clone-topology: event-trigger: # debug: true actions: - chassis - lldp - read_config - write_config - clab_topo - deploy_topo actions: chassis: name: chassis type: gnmi target: all rpc: sub encoding: json_ietf #debug: true format: event paths: - /platform/chassis/type lldp: name: lldp type: gnmi target: all rpc: sub encoding: json_ietf #debug: true format: event paths: - /system/lldp/interface[name=ethernet-*] read_config: name: read_config type: gnmi target: all rpc: get data-type: config encoding: json_ietf #debug: true paths: - / write_config: name: write_config type: template template: | {{- range $n, $m := .Env.read_config }} {{- $filename := print $n ".json"}} {{ file.Write $filename (index $m 0 "updates" 0 "values" "" | data.ToJSONPretty " " ) }} {{- end }} #debug: true clab_topo: name: clab_topo type: template # debug: true output: gnmic.clab.yaml template: | name: gNMIc-action-generated topology: defaults: kind: srl kinds: srl: image: ghcr.io/nokia/srlinux:latest nodes: {{- range $n, $m := .Env.lldp }} {{- $type := index $.Env.chassis $n 0 0 "values" "/srl_nokia-platform:platform/srl_nokia-platform-chassis:chassis/type" }} {{- $type = $type | strings.ReplaceAll "7220 IXR-D1" "ixrd1" }} {{- $type = $type | strings.ReplaceAll "7220 IXR-D2" "ixrd2" }} {{- $type = $type | strings.ReplaceAll "7220 IXR-D3" "ixrd3" }} {{- $type = $type | strings.ReplaceAll "7250 IXR-6" "ixr6" }} {{- $type = $type | strings.ReplaceAll "7250 IXR-10" "ixr10" }} {{- $type = $type | strings.ReplaceAll "7220 IXR-H1" "ixrh1" }} {{- $type = $type | strings.ReplaceAll "7220 IXR-H2" "ixrh2" }} {{- $type = $type | strings.ReplaceAll "7220 IXR-H3" "ixrh3" }} {{ $n | strings.TrimPrefix "clab-" }}: type: {{ $type }} startup-config: {{ print $n ".json"}} {{- end }} links: {{- range $n, $m := .Env.lldp }} {{- range $rsp := $m }} {{- range $ev := $rsp }} {{- if index $ev.values "/srl_nokia-system:system/srl_nokia-lldp:lldp/interface/neighbor/system-name" }} {{- $node1 := $ev.tags.source | strings.TrimPrefix "clab-" }} {{- $iface1 := $ev.tags.interface_name | strings.ReplaceAll "ethernet-" "e" | strings.ReplaceAll "/" "-" }} {{- $node2 := index $ev.values "/srl_nokia-system:system/srl_nokia-lldp:lldp/interface/neighbor/system-name" }} {{- $iface2 := index $ev.values "/srl_nokia-system:system/srl_nokia-lldp:lldp/interface/neighbor/port-id" | strings.ReplaceAll "ethernet-" "e" | strings.ReplaceAll "/" "-" }} {{- if lt $node1 $node2 }} - endpoints: ["{{ $node1 }}:{{ $iface1 }}", "{{ $node2 }}:{{ $iface2 }}"] {{- end }} {{- end }} {{- end }} {{- end }} {{- end }} deploy_topo: name: deploy_topo type: script command: sudo clab dep -t gnmic.clab.yaml --reconfigure debug: true ``` The above described processor can be triggered with the below command: ```bash gnmic --config clone.yaml get --path /system/name --processor clone-topology ``` ================================================ FILE: docs/user_guide/event_processors/event_value_tag.md ================================================ The `event-value-tag` processor applies specific values from event messages to tags of other messages, if event tag names match. Each [gNMI subscribe Response Update](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L95) in a [gNMI subscribe Response Notification](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L79) is transformed into an [Event Message](intro.md) Additionally, if you are using an output cache, all [gNMI subscribe Response Update](https://github.com/openconfig/gnmi/blob/master/proto/gnmi/gnmi.proto#L95) messages are converted to Events on flush. The `event-value-tag` processor is used to extract Values as tags to apply to other Events that have the same K:V tag pairs from the original event message, without merging events with different timestamps. ```yaml processors: # processor name intf-description: # processor-type event-value-tag: # name of the value to match. Usually a specific gNMI path value-name: "/interfaces/interface/state/description" # if set, use instead of the value name for tag tag-name: "description" # if true, remove value from original event when copying consume: false debug: false ``` === "Event format before" ```json [ { "name": "sub1", "timestamp": 1, "tags": { "source": "leaf1:6030", "subscription-name": "sub1", "interface_name": "Ethernet1" }, "values": { "/interfaces/interface/state/counters/in-octets": 100 } }, { "name": "sub1", "timestamp": 200, "tags": { "source": "leaf1:6030", "subscription-name": "sub1", "interface_name": "Ethernet1" }, "values": { "/interfaces/interface/state/counters/out-octets": 100 } }, { "name": "sub1", "timestamp": 200, "tags": { "source": "leaf1:6030", "subscription-name": "sub1", "interface_name": "Ethernet1" }, "values": { "/interfaces/interface/state/description": "Uplink" } } ] ``` === "Event format after" ```json [ { "name": "sub1", "timestamp": 1, "tags": { "source": "leaf1:6030", "subscription-name": "sub1", "interface_name": "Ethernet1", "description": "Uplink" }, "values": { "/interfaces/interface/state/counters/in-octets": 100 } }, { "name": "sub1", "timestamp": 200, "tags": { "source": "leaf1:6030", "subscription-name": "sub1", "interface_name": "Ethernet1", "description": "Uplink" }, "values": { "/interfaces/interface/state/counters/out-octets": 100 } }, { "name": "sub1", "timestamp": 200, "tags": { "source": "leaf1:6030", "subscription-name": "sub1", "interface_name": "Ethernet1" }, "values": { "/interfaces/interface/state/description": "Uplink" } } ] ``` ```yaml bgp-description: event-value-tag: value-name: "neighbor_description" consume: true ``` === "Event format before" ```json [ { "name": "sub2", "timestamp": 1615284691523204299, "tags": { "neighbor_peer-address": "2002::1:1:1:1", "network-instance_name": "default", "source": "leaf1:57400", "subscription-name": "sub2" }, "values": { "bgp_neighbor_sent_messages_queue_depth": 0, "bgp_neighbor_sent_messages_total_messages": "423", "bgp_neighbor_sent_messages_total_non_updates": "415", "bgp_neighbor_sent_messages_total_updates": "8" } }, { "name": "sub2", "timestamp": 1615284691523204299, "tags": { "neighbor_peer-address": "2002::1:1:1:1", "network-instance_name": "default", "source": "leaf1:57400", "subscription-name": "sub2" }, "values": { "neighbor_description": "PeerRouter" } } ] ``` === "Event format after" ```json [ { "name": "sub2", "timestamp": 1615284691523204299, "tags": { "neighbor_peer-address": "2002::1:1:1:1", "network-instance_name": "default", "source": "leaf1:57400", "subscription-name": "sub2" "neighbor_description": "PeerRouter" }, "values": { "bgp_neighbor_sent_messages_queue_depth": 0, "bgp_neighbor_sent_messages_total_messages": "423", "bgp_neighbor_sent_messages_total_non_updates": "415", "bgp_neighbor_sent_messages_total_updates": "8", } }, { "name": "sub2", "timestamp": 1615284691523204299, "tags": { "neighbor_peer-address": "2002::1:1:1:1", "network-instance_name": "default", "source": "leaf1:57400", "subscription-name": "sub2" }, "values": {} } ] ``` ================================================ FILE: docs/user_guide/event_processors/event_write.md ================================================ The `event-write` processor writes a message that has a value or a tag matching one of the configured regular expressions to `stdout`, `stderr` or to a file. A custom separator (used between written messages) can be configured, it defaults to `\n` ```yaml processors: # processor name write-processor: # processor type event-write: # jq expression, if evaluated to true, the message is written to dst condition: # list of regular expressions to be matched against the tags names, if matched, the message is written to dst tag-names: # list of regular expressions to be matched against the tags values, if matched, the message is written to dst tags: # list of regular expressions to be matched against the values names, if matched, the message is written to dst value-names: # list of regular expressions to be matched against the values, if matched, the message is written to dst values: # path to the destination file dst: # separator to be written between messages separator: # indent to use when marshaling the event message to json indent: ``` ### Examples ```yaml processors: # processor name write-processor: # processor type event-write: value-names: - "." dst: file.log separator: "\n####\n" indent: " " ``` ``` bash $ cat file.log { "name": "sub1", "timestamp": 1607582483868459381, "tags": { "interface_name": "ethernet-1/1", "source": "172.20.20.5:57400", "subscription-name": "sub1" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "22", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "8694", "/srl_nokia-interfaces:interface/statistics/in-octets": "1740350", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "17", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "22", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "8696", "/srl_nokia-interfaces:interface/statistics/out-octets": "1723262", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "17" } } #### { "name": "sub1", "timestamp": 1607582483868459381, "tags": { "interface_name": "ethernet-1/1", "source": "172.20.20.5:57400", "subscription-name": "sub1" }, "values": { "/srl_nokia-interfaces:interface/statistics/carrier-transitions": "1", "/srl_nokia-interfaces:interface/statistics/in-broadcast-packets": "22", "/srl_nokia-interfaces:interface/statistics/in-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-fcs-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/in-multicast-packets": "8694", "/srl_nokia-interfaces:interface/statistics/in-octets": "1740350", "/srl_nokia-interfaces:interface/statistics/in-unicast-packets": "17", "/srl_nokia-interfaces:interface/statistics/out-broadcast-packets": "22", "/srl_nokia-interfaces:interface/statistics/out-error-packets": "0", "/srl_nokia-interfaces:interface/statistics/out-multicast-packets": "8696", "/srl_nokia-interfaces:interface/statistics/out-octets": "1723262", "/srl_nokia-interfaces:interface/statistics/out-unicast-packets": "17" } } #### ``` ================================================ FILE: docs/user_guide/event_processors/intro.md ================================================ The event processors provide an easy way to configure a set of functions in order to transform an event message that will be be written to a specific output.
While the `event` format is the de facto format used by `gNMIc` in case the output is `influxdb` or `prometheus`, it can be used with any other output type. Transforming the received gNMI message is sometimes needed to accomodate the output system ( converting types, complying with name constraints,...), or simply filtering out values that you are not interested on. ### The event format The event format is produced by `gNMIc` from the [gNMI Notification messages](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#21-reusable-notification-message-format) received within a gNMI subscribe response update, it contains 5 fields: * `name`: A `string` field populated by the subscription name, it is used as a part of the metric name in case of prometheus output or it can be used as the measurement name in case of influxdb output. * `timestamp`: An `int64` field containing the timestamp received within the gnmi Update. * `tags`: A map of string keys and string values. The keys and values are extracted from the keys in the [gNMI PathElement](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-path-conventions.md#constructing-paths) keys. `gNMIc` adds the subscription name and the target name/address. * `values`: A map of string keys and generic values. The keys are build from a xpath representation of the gNMI path without the keys, while the values are extracted from the gNMI [Node values](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#223-node-values). * `deletes`: A `string list` built from the `delete` field of the [gNMI Notification message](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#21-reusable-notification-message-format).
### Defining an event processor Event processors are defined under the section `processors` in `gNMIc` configuration file. Each processor is identified by a name, under which we specify the processor type as well as additional fields specific to each type. !!! note Processors names are case insensitive All processors support a `debug` field that enables extra debug log messages to help troubleshoot the processor transformation. Below is an example of an `event-delete` processor, which deletes all values with a name containing `multicast` or `broadcast` ```yaml processors: # processor name my-processor: # processor type event-delete: value-names: - ".*multicast.*" - ".*broadcast.*" ``` ### Linking an event processor to an output Once the needed event processors are defined under section `processors`, they can be linked to the desired output(s) in the same file. Each output can be configured with different event processors allowing flexibility in the way the same data is written to different outputs. A list of event processors names can be added under an output configuration, the processors will apply in the order they are configured. In the below example, 3 event processors are configured and linked to `output1` of type `influxdb`. The first processor converts all values type to `integer` if possible. The second deletes tags with name starting with `subscription-name`. Finally the third deletes values with name ending with `out-unicast-packets`. ```yaml outputs: output1: type: influxdb url: http://localhost:8086 bucket: telemetry token: srl:srl batch-size: 1000 flush-timer: 10s event-processors: - proc-convert-integer - proc-delete-tag-name - proc-delete-value-name processors: proc-convert-integer: event-convert: value-names: - ".*" type: int proc-delete-tag-name: event-delete: tag-names: - "^subscription-name" proc-delete-value-name: event-delete: value-names: - ".*out-unicast-packets" ``` ### Event processors with cache In the scenario where processors are configured under an output with [caching](../outputs/output_intro.md#caching) enabled, the event messages retrieved from the cache are processed as a single set by each processor. This concurrent processing facilitates the application of a logic that merges or combines messages, enabling more complex and integrated processing strategies. ### Event processors pipeline Processors under an output are applied in a strict sequential order for each group of event messages received. ### Event processors plugins gNMIc incorporates the capability to extend its functionality through the use of event processors as plugins. To integrate seamlessly with gNMIc, these plugins need to be written in Golang. The communication between gNMIc and these plugins is facilitated by HashiCorp's go-plugin package, which employs `netrpc` as the underlying protocol for this interaction. See some plugin examples [here](https://github.com/openconfig/gnmic/examples/plugins) ================================================ FILE: docs/user_guide/gnmi_server.md ================================================ # gNMI Server ## Introduction On top of acting as `gNMI` client `gNMIc` can run a `gNMI` server that supports `Get`, `Set` and `Subscribe` RPCs. The goal is to act as a caching point for the collected gNMI notifications and make them available to other collectors via the `Subscribe` RPC. Using this gNMI server feature it is possible to build `gNMI` based clusters and pipelines with `gNMIc`.
The server keeps a cache of the gNMI notifications received from the defined subscriptions and utilizes it to build the `Subscribe` RPC responses. The unary RPCs, Get and Set, are relayed to known targets based on the `Prefix.Target` field. ## Supported features - Supports gNMI RPCs, Get, Set, Subscribe - Acts as a gNMI gateway for Get and Set RPCs. - Supports Service registration with Consul server. - Supports all types of gNMI subscriptions, `once`, `poll`, `stream`. - Supports all types of `stream` subscriptions, `on-change`, `target-defined` and `sample`. - Supports `updates-only` with `stream` and `once` subscriptions. - Supports `suppress-redundant`. - Supports `heartbeat-interval` with `on-change` and `sample` stream subscriptions. ## Get RPC The server supports the gNMI `Get` RPC, it allows a client to retrieve `gNMI` notifications from multiple targets into a single `GetResponse`. It relies on the `GetRequest` `Prefix.Target` field to select the target(s) against which it will run the Get RPC. If `Prefix.Target` is left empty or is equal to `*`, the Get RPC is performed against all known targets. The received GetRequest is cloned, enriched with each target name and sent to the corresponding destination. Comma separated target names are also supported and allow to select a list of specific targets to send the Get RPC to. ```bash gnmic -a gnmic-server:57400 get --path /interfaces \ --target router1,router2,router3 ``` Once all GetResponses are received back successfully, the notifications contained in each GetResponse are combined into a single GetResponse with each notification's `Prefix.Target` populated, if empty. The resulting GetResponse is then returned to the gNMI client. If one of the RPCs fails, an error with status code `Internal(13)` is returned to the client. If the GetRequest Path has the `Origin` field set to `gnmic`, the request is performed against the internal `gNMIc` server configuration. Currently only the paths `targets` and `subscriptions` are supported. ```bash gnmic -a gnmic-server:57400 get --path gnmic:/targets gnmic -a gnmic-server:57400 get --path gnmic:/subscriptions ``` ## Set RPC This `gNMI` server supports the gNMI `Set` RPC, it allows a client to run a single `Set` RPC against multiple targets. Just like in the case of `Get` RPC, the server relies on the `Prefix.Target` field to select the target(s) against which it will run the `Set` RPC. If `Prefix.Target` is left empty or is equal to `*`, a Set RPC is performed against all known targets. The received SetRequest is cloned, enriched with each target name and sent to the corresponding destination. Comma separated target names are also supported and allow to select a list of specific targets to send the Set RPC to. ```bash gnmic -a gnmic-server:57400 set \ --update /system/ssh-server/admin-state:::json:::disable \ --target router1,router2,router3 ``` Once all SetResponses are received back successfully, the `UpdateResult`s from each response are merged into a single SetResponse, with the addition of the target name set in `Path.Target`. !!! note Adding a target value to a non prefix path is not compliant with the gNMI specification which stipulates that the `Target` field should only be present in [Prefix Paths](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target) The resulting SetResponse is then returned to the gNMI client. If one of the RPCs fails, an error with status code `Internal(13)` is returned to the client. ## Subscribe RPC The `gNMIc` server keeps a cache of gNMI notifications synched with the configured targets based on the configured subscriptions. The Subscribe requests received from a client are run against the afore mentioned cache, this means that a client cannot get updates about a leaf that `gNMIc` did not subscribe to as a client. Clients can subscribe to specific target using the gNMI `Prefix.Target` field, while leaving the `Prefix.Target` field empty or setting it to `*` is equivalent to subscribing to all known targets. ### Subscription Mode `gNMIc` gNMI Server supports the 3 gNMI specified subscription modes: `Once`, `Poll` and `Stream`. It also supports some subscription behavior modifiers: - `updates-only` with `stream` and `once` subscriptions. - `suppress-redundant`. - `heartbeat-interval` with `on-change` and `sample` stream subscriptions. #### [Once](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35151-once-subscriptions) A subscription operating in the `ONCE` mode acts as a single request/response channel. The target creates the relevant update messages, transmits them, and subsequently closes the RPC. In this subscription mode, `gNMIc` server supports the `updates-only` knob. #### [Poll](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35153-poll-subscriptions) Polling subscriptions are used for on-demand retrieval of data items via long-lived RPCs. A poll subscription relates to a certain set of subscribed paths, and is initiated by sending a SubscribeRequest message with encapsulated SubscriptionList. Subscription messages contained within the SubscriptionList indicate the set of paths that are of interest to the polling client. #### [Stream](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#35152-stream-subscriptions) Stream subscriptions are long-lived subscriptions which continue to transmit updates relating to the set of paths that are covered within the subscription indefinitely. In this subscription mode, `gNMIc` server supports the `updates-only` knob. ##### On Change When a subscription is defined to be `on-change`, data updates are only sent to the client when the value of the data item changes. In the case of `gNMIc` gNMI server, `on-change` subscriptions depend on the subscription writing data to the local cache, if it is a `sample` subscription, each update from a target will trigger an `on-change` update to the server client. `gNMIc` gNMI server supports `on-change` subscriptions with `heartbeat-interval`. If the `heartbeat-interval` value is set to a non zero value, the value of the data item(s) MUST be re-sent once per heartbeat interval regardless of whether the value has changed or not. !!! note The minimum heartbeat-interval is configurable using the field `min-heartbeat-interval`. It defaults to `1s` If the received `heartbeat-interval` value is greater than zero but lower than `min-heartbeat-interval`, the `min-heartbeat-interval` value is used instead. ##### Target Defined When a client creates a subscription specifying the target defined mode, the target MUST determine the best type of subscription to be created on a per-leaf basis. In the case of `gNMIc` gNMI server, a `target-defined` stream subscription, is treated as an `on-change` subscription. Note that this does not mean that `gNMIc` will filter out the unchanged values received from a sample subscription to the actual targets. ##### Sample A `sample` subscription is one where data items are sent to the client once per `sample-interval`. The minimum supported `sample-interval` is configurable using the field `min-sample-interval`, defaults to `1ms`. If within a `SubscribeRequest` the received `sample-interval` is zero, the `default-sample-interval` is used, defaults to `1s`. ## Configuration ```yaml gnmi-server: # the address the gNMI server will listen to address: :57400 # tls config tls: # string, path to the CA certificate file, # this certificate is used to verify the clients certificates. ca-file: # string, server certificate file. cert-file: # string, server key file. key-file: # string, one of `"", "request", "require", "verify-if-given", or "require-verify" # - request: The server requests a certificate from the client but does not # require the client to send a certificate. # If the client sends a certificate, it is not required to be valid. # - require: The server requires the client to send a certificate and does not # fail if the client certificate is not valid. # - verify-if-given: The server requests a certificate, # does not fail if no certificate is sent. # If a certificate is sent it is required to be valid. # - require-verify: The server requires the client to send a valid certificate. # # if no ca-file is present, `client-auth` defaults to ""` # if a ca-file is set, `client-auth` defaults to "require-verify"` client-auth: "" max-subscriptions: 64 # maximum number of active Get/Set RPCs max-unary-rpc: 64 # Unary RPC request timeout unary-rpc-timeout: 120s # defines the maximum msg size (in bytes) the server can receive, # defaults to 4MB max-recv-msg-size: # defines the maximum msg size (in bytes) the server can send, # default to MaxInt32 (2147483647 bytes or 2.147483647 Gb) max-send-msg-size: # defines the maximum number of streams per streaming RPC. max-concurrent-streams: # defines the TCP keepalive tiem and interval for client connections, # if unset it is enabled based on the OS. If negative it is disabled. tcp-keepalive: # set keepalive and max-age parameters on the server-side. keepalive: # MaxConnectionIdle is a duration for the amount of time after which an # idle connection would be closed by sending a GoAway. Idleness duration is # defined since the most recent time the number of outstanding RPCs became # zero or the connection establishment. # The current default value is infinity. max-connection-idle: # MaxConnectionAge is a duration for the maximum amount of time a # connection may exist before it will be closed by sending a GoAway. A # random jitter of +/-10% will be added to MaxConnectionAge to spread out # connection storms. # The current default value is infinity. max-connection-age: # MaxConnectionAgeGrace is an additive period after MaxConnectionAge after # which the connection will be forcibly closed. # The current default value is infinity. max-connection-age-grace: # After a duration of this time if the server doesn't see any activity it # pings the client to see if the transport is still alive. # If set below 1s, a minimum value of 1s will be used instead. # The current default value is 2 hours. time: 120m # After having pinged for keepalive check, the server waits for a duration # of Timeout and if no activity is seen even after that the connection is # closed. # The current default value is 20 seconds. timeout: 20s # defines the minimum allowed sample interval, this value is used when the received sample-interval # is greater than zero but lower than this minimum value. min-sample-interval: 1ms # defines the default sample interval, # this value is used when the received sample-interval is zero within a stream/sample subscription. default-sample-interval: 1s # defines the minimum heartbeat-interval # this value is used when the received heartbeat-interval is greater than zero but # lower than this minimum value min-heartbeat-interval: 1s # enables the collection of Prometheus gRPC server metrics enable-metrics: false # enable additional debug logs debug: false # Enables Consul service registration service-registration: # Consul server address, default to localhost:8500 address: # Consul Data center, defaults to dc1 datacenter: # Consul username, to be used as part of HTTP basicAuth username: # Consul password, to be used as part of HTTP basicAuth password: # Consul Token, is used to provide a per-request ACL token # which overrides the agent's default token token: # gnmi server service check interval, only TTL Consul check is enabled # defaults to 5s check-interval: # Maximum number of failed checks before the service is deleted by Consul # defaults to 3 max-fail: # Consul service name name: # List of tags to be added to the service registration, # if available, the instance-name and cluster-name will be added as tags, # in the format: gnmic-instance=$instance-name and gnmic-cluster=$cluster-name tags: # cache configuration cache: # cache type, defaults to `oc` type: oc # string, address of the remote cache server, # irrelevant if type is `oc` address: # string, the remote server username. username: # string, the remote server password. password: # string, expiration period of received messages. expiration: 60s # enable extra logging debug: false # int64, default: 1073741824 (1 GiB). # Max number of bytes stored in the cache per subscription. max-bytes: # int64, default: 1048576. # Max number of messages stored per subscription. max-msgs-per-subscription: # int, default 100. # Batch size used by the JetStream pull subscriber. fetch-batch-size: # duration, default 100ms. # Wait time used by the JetStream pull subscriber. fetch-wait-time: ``` ### Secure vs Insecure Server #### Insecure Mode By default, the server runs in insecure mode, as long as `skip-verify` is false and none of `ca-file`, `cert-file` and `key-file` are set. #### Secure Mode To run this gNMI server in secure mode, there are a few options: - **Using self signed certificates, without client certificate verification:** ```yaml gnmi-server: skip-verify: true ``` - **Using self signed certificates, with client certificate verification:** ```yaml gnmi-server: # a valid CA certificate to verify the client provided certificates ca-file: /path/to/caFile ``` - **Using CA provided certificates, without client certificate verification:** ```yaml gnmi-server: skip-verify: true # a valid server certificate cert-file: /path/to/server-cert # a valid server key key-file: /path/to/server-key ``` - **Using CA provided certificates, with client certificate verification:** ```yaml gnmi-server: # a valid CA certificate to verify the client provided certificates ca-file: /path/to/caFile # a valid server certificate cert-file: /path/to/server-cert # a valid server key key-file: /path/to/server-key ``` ### Fields #### address Defines the address the gNMI server will listen to. This can be a tcp socket in the format `` or a unix socket starting with `unix:///` #### skip-verify If true, the server will not verify the client's certificates. #### ca-file Defines the path to the CA certificate file to be used, irrelevant if `skip-verify` is true #### cert-file Defines the path to the server certificate file to be used. #### key-file Defines the path to the server key file to be used. #### max-subscriptions Defines the maximum number of allowed subscriptions. Defaults to `64`. #### max-unary-rpc Defines the maximum number of active Get/Set RPCs. Defaults to `64`. #### min-sample-interval Defines the minimum allowed sample interval, this value is used when the received sample-interval is greater than zero but lower than this minimum value. Defaults to `1ms` #### default-sample-interval Defines the default sample interval, this value is used when the received sample-interval is zero within a stream/sample subscription. Defaults to `1s` #### min-heartbeat-interval Defines the minimum heartbeat-interval, this value is used when the received heartbeat-interval is greater than zero but lower than this minimum value. Defaults to `1s` #### enable-metrics Enables the collection of Prometheus gRPC server metrics. #### debug Enables additional debug logging. ## Caching By default, the gNMI server uses Openconfig's gNMI cache as a backend. Distributed caching is supported using any of the other cache types specified [here](caching.md#cache-types).
When a distributed cache is used together with the gNMI server feature, a gNMI client can subscribe to any of the gNMI servers to get gNMI updates collected from all the targets. On the other hand, if the gNMI client sends a unary RPC (Get, Set), it will have be directed to the gNMI server directly connected to the target. ```yaml gnmi-server: # # other gnmi-server attributes # cache: # cache type, defaults to `oc` type: oc # redis, nats or jetstream # string, address of the remote cache server, # irrelevant if type is `oc` address: # string, the remote server username. username: # string, the remote server password. password: # string, expiration period of received messages. expiration: 60s # enable extra logging. debug: false # int64, default: 1073741824 (1 GiB). # Max number of bytes stored in the cache per subscription. max-bytes: # int64, default: 1048576. # Max number of messages stored per subscription. max-msgs-per-subscription: # int, default 100. # Batch size used by the JetStream pull subscriber. fetch-batch-size: # duration, default 100ms. # Wait time used by the JetStream pull subscriber. fetch-wait-time: ``` ================================================ FILE: docs/user_guide/golang_package/examples/capabilities.md ================================================ The below snippet shows how to create a target, send a Capabilities Request and print the response. ```golang package main import ( "context" "fmt" "log" "github.com/openconfig/gnmic/pkg/api" "google.golang.org/protobuf/encoding/prototext" ) func main() { // create a target tg, err := api.NewTarget( api.Name("srl1"), api.Address("10.0.0.1:57400"), api.Username("admin"), api.Password("admin"), api.SkipVerify(true), ) if err != nil { log.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() // create a gNMI client err = tg.CreateGNMIClient(ctx) if err != nil { log.Fatal(err) } defer tg.Close() // send a gNMI capabilities request to the created target capResp, err := tg.Capabilities(ctx) if err != nil { log.Fatal(err) } fmt.Println(prototext.Format(capResp)) } ``` ================================================ FILE: docs/user_guide/golang_package/examples/get.md ================================================ The below snippet shows how to create a target, send a Get Request and print the response. ```golang package main import ( "context" "fmt" "log" "github.com/openconfig/gnmic/pkg/api" "google.golang.org/protobuf/encoding/prototext" ) func main() { // create a target tg, err := api.NewTarget( api.Name("srl1"), api.Address("10.0.0.1:57400"), api.Username("admin"), api.Password("admin"), api.SkipVerify(true), ) if err != nil { log.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() // create a gNMI client err = tg.CreateGNMIClient(ctx) if err != nil { log.Fatal(err) } defer tg.Close() // create a GetRequest getReq, err := api.NewGetRequest( api.Path("/system/name"), api.Encoding("json_ietf")) if err != nil { log.Fatal(err) } fmt.Println(prototext.Format(getReq)) // send the created gNMI GetRequest to the created target getResp, err := tg.Get(ctx, getReq) if err != nil { log.Fatal(err) } fmt.Println(prototext.Format(getResp)) } ``` ================================================ FILE: docs/user_guide/golang_package/examples/set.md ================================================ The below snippet shows how to create a target, send a Set Request and print the reponse. ```golang package main import ( "context" "fmt" "log" "github.com/openconfig/gnmic/pkg/api" "google.golang.org/protobuf/encoding/prototext" ) func main() { // create a target tg, err := api.NewTarget( api.Name("srl1"), api.Address("10.0.0.1:57400"), api.Username("admin"), api.Password("admin"), api.SkipVerify(true), ) if err != nil { log.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() err = tg.CreateGNMIClient(ctx) if err != nil { log.Fatal(err) } defer tg.Close() // create a gNMI SetRequest setReq, err := api.NewSetRequest( api.Update( api.Path("/system/name/host-name"), api.Value("srl2", "json_ietf")), ) if err != nil { log.Fatal(err) } fmt.Println(prototext.Format(setReq)) // send the created gNMI SetRequest to the created target setResp, err := tg.Set(ctx, setReq) if err != nil { log.Fatal(err) } fmt.Println(prototext.Format(setResp)) } ``` ================================================ FILE: docs/user_guide/golang_package/examples/subscribe.md ================================================ The below snippet shows how to create a target and a Subscribe Request. It then starts a Stream subscription with 10s interval and listens to Responses and errors. ```golang package main import ( "context" "fmt" "log" "time" "github.com/openconfig/gnmic/pkg/api" "google.golang.org/protobuf/encoding/prototext" ) func main() { // create a target tg, err := api.NewTarget( api.Name("srl1"), api.Address("srl1:57400"), api.Username("admin"), api.Password("admin"), api.SkipVerify(true), ) if err != nil { log.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() err = tg.CreateGNMIClient(ctx) if err != nil { log.Fatal(err) } defer tg.Close() // create a gNMI subscribeRequest subReq, err := api.NewSubscribeRequest( api.Encoding("json_ietf"), api.SubscriptionListMode("stream"), api.Subscription( api.Path("system/name"), api.SubscriptionMode("sample"), api.SampleInterval(10*time.Second), )) if err != nil { log.Fatal(err) } fmt.Println(prototext.Format(subReq)) // start the subscription go tg.Subscribe(ctx, subReq, "sub1") // start a goroutine that will stop the subscription after x seconds go func() { select { case <-ctx.Done(): return case <-time.After(42 * time.Second): tg.StopSubscription("sub1") } }() subRspChan, subErrChan := tg.ReadSubscriptions() for { select { case rsp := <-subRspChan: fmt.Println(prototext.Format(rsp.Response)) case tgErr := <-subErrChan: log.Fatalf("subscription %q stopped: %v", tgErr.SubscriptionName, tgErr.Err) } } } ``` ================================================ FILE: docs/user_guide/golang_package/gnmi_options.md ================================================ The package `github.com/openconfig/gnmic/pkg/api` exposes a set of `api.GNMIOption` that can be used with `api.NewGetRequest(...api.GNMIOption) GNMIOption`, `api.NewSetRequest(...api.GNMIOption) GNMIOption` or `api.NewSubscribeRequest(...api.GNMIOption) GNMIOption` to create a gNMI Request. ```golang // Version sets the provided gNMI version string in a gnmi.CapabilityResponse message. func Version(v string) func(msg proto.Message) error ``` ```golang // SupportedEncoding creates an GNMIOption that sets the provided encodings as supported encodings in a gnmi.CapabilitiesResponse func SupportedEncoding(encodings ...string) func(msg proto.Message) error ``` ```golang // SupportedModel creates an GNMIOption that sets the provided name, org and version as a supported model in a gnmi.CapabilitiesResponse. func SupportedModel(name, org, version string) func(msg proto.Message) error ``` ```golang // Extension creates a GNMIOption that applies the supplied gnmi_ext.Extension to the provided // proto.Message. func Extension(ext *gnmi_ext.Extension) func(msg proto.Message) error ``` ```golang // Prefix creates a GNMIOption that creates a *gnmi.Path and adds it to the supplied // proto.Message (as a Path Prefix). // The proto.Message can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe. func Prefix(prefix string) func(msg proto.Message) error ``` ```golang // Target creates a GNMIOption that set the gnmi Prefix target to the supplied string value. // The proto.Message can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe. func Target(target string) func(msg proto.Message) error ``` ```golang // Path creates a GNMIOption that creates a *gnmi.Path and adds it to the supplied proto.Message // which can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.Subscription. func Path(path string) func(msg proto.Message) error ``` ```golang // Encoding creates a GNMIOption that adds the encoding type to the supplied proto.Message // which can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe. func Encoding(encoding string) func(msg proto.Message) error ``` ```golang // EncodingJSON creates a GNMIOption that sets the encoding type to JSON in a gnmi.GetRequest or // gnmi.SubscribeRequest. func EncodingJSON() func(msg proto.Message) error ``` ```golang // EncodingBytes creates a GNMIOption that sets the encoding type to BYTES in a gnmi.GetRequest or // gnmi.SubscribeRequest. func EncodingBytes() func(msg proto.Message) error ``` ```golang // EncodingPROTO creates a GNMIOption that sets the encoding type to PROTO in a gnmi.GetRequest or // gnmi.SubscribeRequest. func EncodingPROTO() func(msg proto.Message) error ``` ```golang // EncodingASCII creates a GNMIOption that sets the encoding type to ASCII in a gnmi.GetRequest or // gnmi.SubscribeRequest. func EncodingASCII() func(msg proto.Message) error ``` ```golang // EncodingJSON_IETF creates a GNMIOption that sets the encoding type to JSON_IETF in a gnmi.GetRequest or // gnmi.SubscribeRequest. func EncodingJSON_IETF() func(msg proto.Message) error ``` ```golang // EncodingCustom creates a GNMIOption that adds the encoding type to the supplied proto.Message // which can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe. // Unlike Encoding, this GNMIOption does not validate if the provided encoding is defined by the gNMI spec. func EncodingCustom(enc int) func(msg proto.Message) error ``` ```golang // DataType creates a GNMIOption that adds the data type to the supplied proto.Message // which must be a *gnmi.GetRequest. func DataType(datat string) func(msg proto.Message) error ``` ```golang // DataTypeALL creates a GNMIOption that sets the gnmi.GetRequest data type to ALL func DataTypeALL() func(msg proto.Message) error ``` ```golang // DataTypeCONFIG creates a GNMIOption that sets the gnmi.GetRequest data type to CONFIG func DataTypeCONFIG() func(msg proto.Message) error ``` ```golang // DataTypeSTATE creates a GNMIOption that sets the gnmi.GetRequest data type to STATE func DataTypeSTATE() func(msg proto.Message) error ``` ```golang // DataTypeOPERATIONAL creates a GNMIOption that sets the gnmi.GetRequest data type to OPERATIONAL func DataTypeOPERATIONAL() func(msg proto.Message) error ``` ```golang // UseModel creates a GNMIOption that add a gnmi.DataModel to a gnmi.GetRequest or gnmi.SubscribeRequest // based on the name, org and version strings provided. func UseModel(name, org, version string) func(msg proto.Message) error ``` ```golang // Update creates a GNMIOption that creates a *gnmi.Update message and adds it to the supplied proto.Message, // the supplied message must be a *gnmi.SetRequest. func Update(opts ...GNMIOption) func(msg proto.Message) error ``` ```golang // Replace creates a GNMIOption that creates a *gnmi.Update message and adds it to the supplied proto.Message. // the supplied message must be a *gnmi.SetRequest. func Replace(opts ...GNMIOption) func(msg proto.Message) error ``` ```golang // Value creates a GNMIOption that creates a *gnmi.TypedValue and adds it to the supplied proto.Message. // the supplied message must be a *gnmi.Update. // If a map is supplied as `data interface{}` it has to be a map[string]interface{}. func Value(data interface{}, encoding string) func(msg proto.Message) error ``` ```golang // Delete creates a GNMIOption that creates a *gnmi.Path and adds it to the supplied proto.Message. // the supplied message must be a *gnmi.SetRequest. The *gnmi.Path is added the .Delete list. func Delete(path string) func(msg proto.Message) error ``` ```golang // SubscriptionListMode creates a GNMIOption that sets the SubscribeRequest Mode. // The variable mode must be one of "once", "poll" or "stream". // The supplied proto.Message must be a *gnmi.SubscribeRequest with RequestType Subscribe. func SubscriptionListMode(mode string) func(msg proto.Message) error ``` ```golang // SubscriptionListModeSTREAM creates a GNMIOption that sets the Subscription List Mode to STREAM func SubscriptionListModeSTREAM() func(msg proto.Message) error ``` ```golang // SubscriptionListModeONCE creates a GNMIOption that sets the Subscription List Mode to ONCE func SubscriptionListModeONCE() func(msg proto.Message) error ``` ```golang // SubscriptionListModePOLL creates a GNMIOption that sets the Subscription List Mode to POLL func SubscriptionListModePOLL() func(msg proto.Message) error ``` ```golang // Qos creates a GNMIOption that sets the QosMarking field in a *gnmi.SubscribeRequest with RequestType Subscribe. func Qos(qos uint32) func(msg proto.Message) error ``` ```golang // UseAliases creates a GNMIOption that sets the UsesAliases field in a *gnmi.SubscribeRequest with RequestType Subscribe. func UseAliases(b bool) func(msg proto.Message) error ``` ```golang // AllowAggregation creates a GNMIOption that sets the AllowAggregation field in a *gnmi.SubscribeRequest with RequestType Subscribe. func AllowAggregation(b bool) func(msg proto.Message) error ``` ```golang // UpdatesOnly creates a GNMIOption that sets the UpdatesOnly field in a *gnmi.SubscribeRequest with RequestType Subscribe. func UpdatesOnly(b bool) func(msg proto.Message) error ``` ```golang // UpdatesOnly creates a GNMIOption that creates a *gnmi.Subscription based on the supplied GNMIOption(s) and adds it the // supplied proto.Message which must be of type *gnmi.SubscribeRequest with RequestType Subscribe. func Subscription(opts ...GNMIOption) func(msg proto.Message) error ``` ```golang // SubscriptionMode creates a GNMIOption that sets the Subscription mode in a proto.Message of type *gnmi.Subscription. func SubscriptionMode(mode string) func(msg proto.Message) error ``` ```golang // SubscriptionModeTARGET_DEFINED creates a GNMIOption that sets the subscription mode to TARGET_DEFINED func SubscriptionModeTARGET_DEFINED() func(msg proto.Message) error ``` ```golang // SubscriptionModeON_CHANGE creates a GNMIOption that sets the subscription mode to ON_CHANGE func SubscriptionModeON_CHANGE() func(msg proto.Message) error ``` ```golang // SubscriptionModeSAMPLE creates a GNMIOption that sets the subscription mode to SAMPLE func SubscriptionModeSAMPLE() func(msg proto.Message) error ``` ```golang // SampleInterval creates a GNMIOption that sets the SampleInterval in a proto.Message of type *gnmi.Subscription. func SampleInterval(d time.Duration) func(msg proto.Message) error ``` ```golang // HeartbeatInterval creates a GNMIOption that sets the HeartbeatInterval in a proto.Message of type *gnmi.Subscription. func HeartbeatInterval(d time.Duration) func(msg proto.Message) error ``` ```golang // SuppressRedundant creates a GNMIOption that sets the SuppressRedundant in a proto.Message of type *gnmi.Subscription. func SuppressRedundant(s bool) func(msg proto.Message) error ``` ```golang // Notification creates a GNMIOption that builds a gnmi.Notification from the supplied GNMIOptions and adds it // to the supplied proto.Message func Notification(opts ...GNMIOption) func(msg proto.Message) error ``` ```golang // Timestamp sets the supplied timestamp in a gnmi.Notification message func Timestamp(t int64) func(msg proto.Message) error ``` ```golang // TimestampNow is the same as Timestamp(time.Now().UnixNano()) func TimestampNow() func(msg proto.Message) error ``` ```golang // Alias sets the supplied alias value in a gnmi.Notification message func Alias(alias string) func(msg proto.Message) error ``` ```golang // Atomic sets the .Atomic field in a gnmi.Notification message func Atomic(b bool) func(msg proto.Message) error ``` ```golang // UpdateResult creates a GNMIOption that creates a gnmi.UpdateResult and adds it to // a proto.Message of type gnmi.SetResponse. func UpdateResult(opts ...GNMIOption) func(msg proto.Message) error ``` ```golang // Operation creates a GNMIOption that sets the gnmi.UpdateResult_Operation // value in a gnmi.UpdateResult. func Operation(oper string) func(msg proto.Message) error ``` ```golang // OperationINVALID creates a GNMIOption that sets the gnmi.SetResponse Operation to INVALID func OperationINVALID() func(msg proto.Message) error ``` ```golang // OperationDELETE creates a GNMIOption that sets the gnmi.SetResponse Operation to DELETE func OperationDELETE() func(msg proto.Message) error ``` ```golang // OperationREPLACE creates a GNMIOption that sets the gnmi.SetResponse Operation to REPLACE func OperationREPLACE() func(msg proto.Message) error ``` ```golang // OperationUPDATE creates a GNMIOption that sets the gnmi.SetResponse Operation to UPDATE func OperationUPDATE() func(msg proto.Message) error ``` ================================================ FILE: docs/user_guide/golang_package/intro.md ================================================ `gnmic` (`github.com/openconfig/gnmic/pkg/api`) can be imported as a dependency in your Golang programs. It acts as a wrapper around the `openconfig/gnmi` package providing a user friendly API to create a target and easily craft gNMI requests. ## Creating gNMI requests ### Get Request ```golang func NewGetRequest(opts ...GNMIOption) (*gnmi.GetRequest, error) ``` The below 2 snippets create a Get Request with 2 paths, `json_ietf` encoding and data type `STATE` Using `github.com/openconfig/gnmic/pkg/api` ```golang getReq, err := api.NewGetRequest( api.Encoding("json_ietf"), api.DataType("state"), api.Path("interface/statistics"), api.Path("interface/subinterface/statistics"), ) // check error ``` Using `github.com/openconfig/gnmi` ```golang getReq := &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "interface"}, {Name: "statistics"}, }, }, { Elem: []*gnmi.PathElem{ {Name: "interface"}, {Name: "subinterface"}, {Name: "statistics"}, }, }, }, Type: gnmi.GetRequest_STATE, Encoding: gnmi.Encoding_JSON_IETF, } ``` ### Set Request ```golang func NewSetRequest(opts ...GNMIOption) (*gnmi.SetRequest, error) ``` The below 2 snippets create a Set Request with one two updates, one replace and one delete messages: Using `github.com/openconfig/gnmic/pkg/api` ```golang setReq, err := api.NewSetRequest( api.Update( api.Path("/system/name/host-name"), api.Value("srl2", "json_ietf"), ), api.Update( api.Path("/system/gnmi-server/unix-socket/admin-state"), api.Value("enable", "json_ietf"), ), api.Replace( api.Path("/network-instance[name=default]/admin-state"), api.Value("enable", "json_ietf"), ), api.Delete("/interface[name=ethernet-1/1]/admin-state"), ) // check error ``` Using `github.com/openconfig/gnmi` ```golang setReq := &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("\"srl2\""), }, }, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "gnmi-server"}, {Name: "unix-socket"}, {Name: "admin-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("\"enable\""), }, }, }, }, Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "network-instance", Key: map[string]string{ "name": "default", }, }, { Name: "admin-state", }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("\"enable\""), }, }, }, }, Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, { Name: "admin-state", }, }, }, }, } ``` ### Subscribe Request Create a Subscribe Request ```golang func NewSubscribeRequest(opts ...GNMIOption) (*gnmi.SubscribeRequest, error) ``` Create a Subscribe Poll Request ```golang func NewSubscribePollRequest(opts ...GNMIOption) *gnmi.SubscribeRequest ``` The below 2 snippets create a `stream` subscribe request with 2 paths, `json_ietf` encoding and a sample interval of 10 seconds: Using `github.com/openconfig/gnmic/pkg/api` ```golang subReq, err := api.NewSubscribeRequest( api.Encoding("json_ietf"), api.SubscriptionListMode("stream"), api.Subscription( api.Path("interface/statistics"), api.SubscriptionMode("sample"), api.SampleInterval("10s"), ), api.Subscription( api.Path("interface/subinterface/statistics"), api.SubscriptionMode("sample"), api.SampleInterval("10s"), ), ) // check error ``` Using `github.com/openconfig/gnmi` ```golang subReq := &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Subscription: []*gnmi.Subscription{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "interface"}, {Name: "statistics"}, }, }, Mode: gnmi.SubscriptionMode_SAMPLE, SampleInterval: uint64(10 * time.Second), }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "interface"}, {Name: "subinterface"}, {Name: "statistics"}, }, }, Mode: gnmi.SubscriptionMode_SAMPLE, SampleInterval: uint64(10 * time.Second), }, }, Mode: gnmi.SubscriptionList_STREAM, Encoding: gnmi.Encoding_JSON_IETF, }, } ``` ## Creating Targets A target can be created using `func NewTarget(opts ...TargetOption) (*target.Target, error)`. The full list of `api.TargetOption` can be found [here](target_options.md) ```golang tg, err := api.NewTarget( api.Name("srl1"), api.Address("10.0.0.1:57400"), api.Username("admin"), api.Password("admin"), api.SkipVerify(true), ) // check error ``` Once a Target is created, Multiple functions are available to run the desired RPCs, check the examples [here](examples/capabilities.md) ================================================ FILE: docs/user_guide/golang_package/target_options.md ================================================ The package `github.com/openconfig/gnmic/pkg/api` exposes a set of `api.TargetOption` that can be used with `api.NewTarget(...api.TargetOption) TargetOption` to create `target.Target`. ```golang // Name sets the target name. func Name(name string) TargetOption // Address sets the target address. // This Option can be set multiple times. func Address(addr string) TargetOption // Username sets the target Username. func Username(username string) TargetOption // Password sets the target Password. func Password(password string) TargetOption // Timeout sets the gNMI client creation timeout. func Timeout(timeout time.Duration) TargetOption // Insecure sets the option to create a gNMI client with an // insecure gRPC connection func Insecure(i bool) TargetOption // SkipVerify sets the option to create a gNMI client with a // secure gRPC connection without verifying the target's certificates. func SkipVerify(i bool) TargetOption // TLSCA sets that path towards the TLS certificate authority file. func TLSCA(tlsca string) TargetOption // TLSCert sets that path towards the TLS certificate file. func TLSCert(cert string) TargetOption // TLSKey sets that path towards the TLS key file. func TLSKey(key string) TargetOption // TLSMinVersion sets the TLS minimum version used during the TLS handshake. func TLSMinVersion(v string) TargetOption // TLSMaxVersion sets the TLS maximum version used during the TLS handshake. func TLSMaxVersion(v string) TargetOption // TLSVersion sets the desired TLS version used during the TLS handshake. func TLSVersion(v string) TargetOption // LogTLSSecret, if set to true, // enables logging of the TLS master key. func LogTLSSecret(b bool) TargetOption // Gzip, if set to true, // adds gzip compression to the gRPC connection. func Gzip(b bool) TargetOption // Token sets the per RPC credentials for all RPC calls. func Token(token string) TargetOption ``` ================================================ FILE: docs/user_guide/inputs/input_intro.md ================================================ `gnmic` supports various Inputs to consume gnmi data, transform it and ultimately export it to one or multiple Outputs. The purpose of `gnmic`'s Inputs is to build a gnmi data pipeline by enabling the ingestion and export of gnmi data that was exported by `gnmic`'s outputs upstream.
Currently supported input types: * [NATS messaging system](nats_input.md) * [NATS Streaming messaging bus (STAN)](stan_input.md) * [Kafka messaging bus](kafka_input.md) ### Defining Inputs and matching Outputs To define an Input a user needs to fill in the `inputs` section in the configuration file. Each Input is defined by its name (`input1` in the example below), a `type` field which determines the type of input to be created (`nats`, `stan`, `kafka`) and various other configuration fields which depend on the Input type. !!! note Inputs names are case insensitive All Input types have an `outputs` field, under which the user can defined the downstream destination(s) of the consumed data. This way, data consumed once, can be exported multiple times. !!!info The same `gnmic` instance can act as gNMI collector, input and output simultaneously. Example: ```yaml # part of gnmic config file inputs: input1: type: nats # input type # # other config fields depending on the input type # outputs: - output1 - output2 ``` ### Inputs use cases #### Clustering Using `gnmic` Inputs, the user can aggregate all the collected data into one instance of `gnmic` that can make it available to a downstream off the shelf tool,typically Prometheus.
#### Data reuse Collect data once and use it multiple times. By chaining multiple instances of `gnmic` the user can process the same stream of data in different ways. A different set of event processors can be applied on the data stream before being exported to its intended outputs.
================================================ FILE: docs/user_guide/inputs/jetstream_input.md ================================================ When using jetstream as an input, `gnmic` consumes data from a specified NATS JetStream stream using a durable consumer. Messages are fetched in batches and delivered to `gnmic` in either `event` or `proto` format. Each gNMIc instance creates one durable consumer using the configured `subjects` field. Multiple workers (subscribers) can be spawned using the `num-workers` option to increase processing throughput. All workers within a single gNMIc instance share the same durable consumer to ensure coordinated message processing. For scaling across multiple consumers, deploy multiple gNMIc instances with different consumer names. Each instance will create its own durable consumer on the stream. The `jetstream` input will export received messages to the configured `outputs`. Optionally, `event-processors` can be applied when using event format. ```yaml inputs: js-input: # required string, type of input type: jetstream # optional string, input instance name # defaults to a generated name if empty name: js-consumer # string, NATS server address # default: "localhost:4222" address: nats.example.com:4222 # string, name of the JetStream stream to consume from stream: telemetry-stream # list of subject filters within the stream to consume from # the consumer will receive messages matching any of these subjects subjects: - telemetry.device.* # enum string, format of consumed message: "event" or "proto" # default: "event" format: event # enum string, delivery policy for JetStream: # one of: all, last, new, last-per-subject # default: all deliver-policy: last # optional string, subject format used to extract metadata # one of: static, subscription.target, target.subscription # affects proto messages only subject-format: target.subscription # optional string, NATS username username: nats-user # optional string, NATS password password: secret # optional duration, reconnect wait time # default: 2s connect-time-wait: 3s # optional bool, enables debug logging debug: true # integer, number of workers to start (parallel consumers) # default: 1 num-workers: 2 # integer, internal per-worker buffer size # default: 500 buffer-size: 1000 # integer, batch size when fetching messages from JetStream # default: 500 fetch-batch-size: 200 # integer, maximum number of allowed pending ack on the stream # default: 1000 max-ack-pending: 5000 # optional list of output names this input writes to # outputs must be configured at the root `outputs:` section outputs: - file - kafka # optional list of event processors # only applies when format is "event" event-processors: - add-tags # optional TLS configuration for secure NATS connection tls: ca-file: /etc/ssl/certs/ca.pem cert-file: /etc/ssl/certs/cert.pem key-file: /etc/ssl/certs/key.pem skip-verify: false ``` ## Message Formats - `event`: Expects JSON-encoded array of `EventMsg`. Supports processing pipelines and exporting to multiple outputs. - `proto`: Expects binary-encoded `gnmi.SubscribeResponse` messages. Metadata such as `source` and `subscription-name` is extracted from the subject based on subject-format. ## Delivery Policies - `all`: Delivers all messages from the stream history. - `last`: Delivers only the most recent message. - `new`: Starts delivery from new messages only. - `last-per-subject`: Delivers the latest message for each subject. ## Subject Format Behavior When using proto format, gnmic uses the subject name to extract metadata: - `subscription.target` → subscription-name = first, source = second - `target.subscription` → subscription-name = second, source = first - `static` → no parsing; no additional metadata is extracted ## Scaling with Multiple Consumers Each gNMIc instance creates a single durable consumer on the stream. To scale message processing across multiple consumers: 1. Deploy multiple gNMIc instances 2. Give each instance a different consumer `name` in its configuration 3. Configure each instance with appropriate `subjects` filters to partition work **Example - Two instances consuming different subjects:** Instance 1: ```yaml inputs: js-consumer-1: type: jetstream name: consumer-router-metrics address: localhost:4222 stream: telemetry-stream subjects: - telemetry.router.* num-workers: 2 format: event ``` Instance 2: ```yaml inputs: js-consumer-2: type: jetstream name: consumer-switch-metrics address: localhost:4222 stream: telemetry-stream subjects: - telemetry.switch.* num-workers: 2 format: event ``` Each instance creates its own durable consumer with its configured subject filters. Within each instance, multiple workers share the same consumer for parallel processing. ## Usage Notes - A durable consumer is created on the stream using the provided name as the durable name. - All workers use the same durable name to share state and resume progress across reconnects. - TLS can be configured if the NATS server uses secure connections. ================================================ FILE: docs/user_guide/inputs/kafka_input.md ================================================ When using Kafka as input, `gnmic` consumes data from a specific Kafka topic in `event` or `proto` format. Multiple consumers can be created per `gnmic` instance (`num-workers`). All the workers join the same [Kafka consumer group](https://docs.confluent.io/platform/current/clients/consumer.html#consumer-groups) (`group-id`) in order to load share the messages between the workers. Multiple instances of `gnmic` with the same Kafka input can be used to effectively consume the exported messages in parallel The Kafka input will export the received messages to the list of outputs configured under its `outputs` section. ```yaml inputs: input1: # string, required, specifies the type of input type: kafka # Kafka subscriber name # If left empty, it will be populated with the string from flag --instance-name appended with `--kafka-cons`. # If --instance-name is also empty, a random name is generated in the format `gnmic-$uuid` # note that each kafka worker (consumer) will get name=$name-$index name: "" # Kafka SASL configuration sasl: # SASL user name user: # SASL password password: # SASL mechanism: PLAIN, SCRAM-SHA-256, SCRAM-SHA-512 and OAUTHBEARER are supported mechanism: # token url for OAUTHBEARER SASL mechanism token-url: # string, comma separated Kafka servers addresses address: localhost:9092 # string, comma separated topics the Kafka consumer group consumes messages from. topics: telemetry # consumer group all gnmic Kafka input workers join, # so that Kafka server can load share the messages between them. Defaults to `gnmic-consumers` group-id: gnmic-consumers # duration, the timeout used to detect consumer failures when using Kafka's group management facility. # If no heartbeats are received by the broker before the expiration of this session timeout, # then the broker will remove this consumer from the group and initiate a rebalance. session-timeout: 10s # duration, the expected time between heartbeats to the consumer coordinator when using Kafka's group # management facilities. heartbeat-interval: 3s # duration, wait time before reconnection attempts after any error recovery-wait-time: 2s # string, kafka version, defaults to 2.5.0 version: # string, consumed message expected format, one of: proto, event format: event # bool, enables extra logging debug: false # integer, number of kafka consumers to be created num-workers: 1 # list of processors to apply on the message when received, # only applies if format is 'event' event-processors: # []string, list of named outputs to export data to. # Must be configured under root level `outputs` section outputs: ``` ================================================ FILE: docs/user_guide/inputs/nats_input.md ================================================ When using NATS as input, `gnmic` consumes data from a specific NATS subject in `event` or `proto` format. Multiple consumers can be created per `gnmic` instance (`num-workers`). All the workers join the same [NATS queue group](https://docs.nats.io/nats-concepts/queue) (`queue`) in order to load share the messages between the workers. Multiple instances of `gnmic` with the same NATS input can be used to effectively consume the exported messages in parallel The NATS input will export the received messages to the list of outputs configured under its `outputs` section. ```yaml inputs: input1: # string, required, specifies the type of input type: nats # NATS subscriber name # If left empty, it will be populated with the string from flag --instance-name appended with `--nats-sub`. # If --instance-name is also empty, a random name is generated in the format `gnmic-$uuid` # note that each nats worker (subscriber) will get name=$name-$index name: "" # string, comma separated NATS servers addresses address: localhost:4222 # The subject name gnmic NATS consumers subscribe to. subject: telemetry # subscribe queue group all gnmic NATS input workers join, # so that NATS server can load share the messages between them. queue: # string, NATS username username: # string, NATS password password: # duration, wait time before reconnection attempts connect-time-wait: 2s # string, consumed message expected format, one of: proto, event format: event # bool, enables extra logging debug: false # integer, number of nats consumers to be created num-workers: 1 # integer, sets the size of the local buffer where received # NATS messages are stored before being sent to outputs. # This value is set per worker. Defaults to 100 messages buffer-size: 100 # list of processors to apply on the message when received, # only applies if format is 'event' event-processors: # []string, list of named outputs to export data to. # Must be configured under root level `outputs` section outputs: ``` ================================================ FILE: docs/user_guide/inputs/stan_input.md ================================================ When using STAN as input, `gnmic` consumes data from a specific STAN subject in `event` or `proto` format. Multiple consumers can be created per `gnmic` instance (`num-workers`). All the workers join the same [STAN queue group](https://docs.stan.io/nats-concepts/queue) (`queue`) in order to load share the messages between the workers. Multiple instances of `gnmic` with the same STAN input can be used to effectively consume the exported messages in parallel The STAN input will export the received messages to the list of outputs configured under its `outputs` section. ```yaml inputs: input1: # string, required, specifies the type of input type: stan # STAN subscriber name # If left empty, it will be populated with the string from flag --instance-name appended with `--stan-sub`. # If --instance-name is also empty, a random name is generated in the format `gnmic-$uuid` # note that each stan worker (subscriber) will get name=$name-$index name: "" # string, comma separated STAN servers addresses address: localhost:4222 # The subject name gnmic STAN consumers subscribe to. subject: telemetry # subscribe queue group all gnmic STAN input workers join, # so that STAN server can load share the messages between them. queue: # string, STAN username username: # string, STAN password password: # duration, wait time before reconnection attempts connect-time-wait: 2s # string, the STAN cluster name. defaults to test-cluster cluster-name: # integer, interval (in seconds) at which # a connection sends a PING to the server. min=1 ping-interval: # integer, number of PINGs without a response # before the connection is considered lost. min=2 ping-retry: # string, consumed message expected format, one of: proto, event format: event # bool, enables extra logging debug: false # integer, number of stan consumers to be created num-workers: 1 # list of processors to apply on the message when received, # only applies if format is 'event' event-processors: # []string, list of named outputs to export data to. # Must be configured under root level `outputs` section outputs: ``` ================================================ FILE: docs/user_guide/outputs/asciigraph_output.md ================================================ `gnmic` supports displaying collected metrics as an ASCII graph on the terminal. The graph is generated using the [asciigraph](https://github.com/guptarohit/asciigraph) package. ### Configuration sample ```yaml outputs: output1: # required type: asciigraph # string, the graph caption caption: # integer, the graph height. If unset, defaults to the terminal height height: # integer, the graph width. If unset, defaults to the terminal width width: # float, the graph minimum value for the vertical axis. lower-bound: # float, the graph minimum value for the vertical axis. upper-bound: # integer, the graph left offset. offset: # integer, the decimal point precision of the label values. precision: # string, the caption color. one of ANSI colors. caption-color: # string, the axis color. one of ANSI colors. axis-color: # string, the label color. one of ANSI colors. label-color: # duration, the graph refresh timer. refresh-timer: 1s # string, one of `overwrite`, `if-not-present`, `` # This field allows populating/changing the value of Prefix.Target in the received message. # if set to ``, nothing changes # if set to `overwrite`, the target value is overwritten using the template configured under `target-template` # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template` add-target: # string, a GoTemplate that allows for the customization of the target field in Prefix.Target. # it applies only if the previous field `add-target` is not empty. # if left empty, it defaults to: # {{- if index . "subscription-target" -}} # {{ index . "subscription-target" }} # {{- else -}} # {{ index . "source" | host }} # {{- end -}}` # which will set the target to the value configured under `subscription.$subscription-name.target` if any, # otherwise it will set it to the target name stripped of the port number (if present) target-template: # list of processors to apply on the message before writing event-processors: # bool enable debug debug: false ``` ### Example This example shows how to use the `asciigraph` output. gNMIc config ```shell cat gnmic_asciiout.yaml ``` ```yaml targets: clab-nfd33-spine1-1: username: admin password: NokiaSrl1! skip-verify: true subscriptions: sub1: paths: - /interface[name=ethernet-1/3]/statistics/out-octets - /interface[name=ethernet-1/3]/statistics/in-octets stream-mode: sample sample-interval: 1s encoding: ascii outputs: out1: type: asciigraph caption: in/out octets per second event-processors: - rate processors: rate: event-starlark: script: rate.star ``` Starlark processor ```shell cat rate.star ``` ```python cache = {} values_names = [ '/interface/statistics/out-octets', '/interface/statistics/in-octets' ] N=2 def apply(*events): for e in events: for value_name in values_names: v = e.values.get(value_name) # check if v is not None and is a digit to proceed if not v: continue if not v.isdigit(): continue # update cache with the latest value val_key = "_".join([e.tags["source"], e.tags["interface_name"], value_name]) if not cache.get(val_key): # initialize the cache entry if empty cache.update({val_key: []}) if len(cache[val_key]) > N: # remove the oldest entry if the number of entries reached N cache[val_key] = cache[val_key][1:] # update cache entry cache[val_key].append((int(v), e.timestamp)) # get the list of values val_list = cache[val_key] # calculate rate e.values[value_name+"_rate"] = rate(val_list) e.values.pop(value_name) return events def rate(vals): previous_value, previous_timestamp = None, None for value, timestamp in vals: if previous_value != None and previous_timestamp != None: time_diff = (timestamp - previous_timestamp) / 1000000000 # 1 000 000 000 if time_diff > 0: value_diff = value - previous_value rate = value_diff / time_diff return rate previous_value = value previous_timestamp = timestamp return 0 ``` ================================================ FILE: docs/user_guide/outputs/file_output.md ================================================ `gnmic` supports exporting subscription updates to multiple local files A file output can be defined using the below format in `gnmic` config file under `outputs` section: ```yaml outputs: output1: # required type: file # filename to write telemetry data to. # will be ignored if `file-type` is set filename: /path/to/filename # file-type, stdout or stderr. # overwrites `filename` file-type: # stdout or stderr # string, message formatting, json, protojson, prototext, event format: # string, one of `overwrite`, `if-not-present`, `` # This field allows populating/changing the value of Prefix.Target in the received message. # if set to ``, nothing changes # if set to `overwrite`, the target value is overwritten using the template configured under `target-template` # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template` add-target: # string, a GoTemplate that allow for the customization of the target field in Prefix.Target. # it applies only if the previous field `add-target` is not empty. # if left empty, it defaults to: # {{- if index . "subscription-target" -}} # {{ index . "subscription-target" }} # {{- else -}} # {{ index . "source" | host }} # {{- end -}}` # which will set the target to the value configured under `subscription.$subscription-name.target` if any, # otherwise it will set it to the target name stripped of the port number (if present) target-template: # boolean, valid only if format is `event`. # if true, arrays of events are split and marshaled as JSON objects instead of an array of dicts. split-events: false # string, a GoTemplate that is executed using the received gNMI message as input. # the template execution is the last step before the data is written to the file, # First the received message is formatted according to the `format` field above, then the `event-processors` are applied if any # then finally the msg-template is executed. msg-template: # boolean, if true the message timestamp is changed to current time override-timestamps: # boolean, format the output in indented form with every element on a new line. multiline: # string, indent specifies the set of indentation characters to use in a multiline formatted output indent: # string, separator is the set of characters to write between messages, defaults to new line separator: # integer, specifies the maximum number of allowed concurrent file writes concurrency-limit: 1000 # boolean, enables the collection and export (via prometheus) of output specific metrics enable-metrics: false # list of processors to apply on the message before writing event-processors: # file rotation configuration rotation: max-size: 100 # size in megabytes max-age: 30 # max age in days max-backups: 3 # maximum number of old files to store, not counting the current file compress: false # whether or not to enable compression ``` The file output can be used to write to file on the disk, to stdout or to stderr. Also includes support for rotating files to control disk utilization and maximum age using the `rotation` configuration section. For a disk file, a file name is required. For stdout or stderr, only file-type is required. ================================================ FILE: docs/user_guide/outputs/gnmi_output.md ================================================ `gnmic` supports acting as a `gNMI Server` to expose the subscribed telemetry data to a `gNMI Client` using the `Subcribe` RPC, or to act as a gateway for `Get` and `Set` RPCs.
### Configuration ```yaml outputs: output1: # required type: gnmi # gNMI server address, either a TCP socket or UNIX socket. # In the latter case, the prefix `unix:///` should be present. address: ":57400" # maximum number of active subscriptions. max-subscriptions: 64 # maximum number of ongoing Get/Set RPCs. max-unary-rpc: 64 # tls config tls: # string, path to the CA certificate file, # this certificate is used to verify the clients certificates. ca-file: # string, server certificate file. cert-file: # string, server key file. key-file: # string, one of `"", "request", "require", "verify-if-given", or "require-verify" # - request: The server requests a certificate from the client but does not # require the client to send a certificate. # If the client sends a certificate, it is not required to be valid. # - require: The server requires the client to send a certificate and does not # fail if the client certificate is not valid. # - verify-if-given: The server requests a certificate, # does not fail if no certificate is sent. # If a certificate is sent it is required to be valid. # - require-verify: The server requires the client to send a valid certificate. # # if no ca-file is present, `client-auth` defaults to ""` # if a ca-file is set, `client-auth` defaults to "require-verify"` client-auth: "" # string, a GoTemplate that allow for the customization of the target field in Prefix.Target. # it applies only if the returned Prefix.Target is empty. # if left empty, it defaults to: # `{{- if index . "subscription-target" -}} # {{ index . "subscription-target" }} # {{- else -}} # {{ index . "source" | host }} # {{- end -}}` # which will set the target to the value configured under `subscription.$subscription-name.target` if any, # otherwise it will set it to the target name stripped of the port number (if present). target-template: # boolean, enables extra logging for the gNMI Server debug: false # boolean, enables the collection and export (via prometheus) of output specific metrics enable-metrics: false ``` #### Insecure Mode By default, the server runs in insecure mode, as long as `skip-verify` is false and none of `ca-file`, `cert-file` and `key-file` are set. #### Secure Mode To run this gNMI server in secure mode, there are a few options: - **Using self signed certificates, without client certificate verification:** ```yaml skip-verify: true ``` - **Using self signed certificates, with client certificate verification:** ```yaml # a valid CA certificate to verify the client provided certificates ca-file: /path/to/caFile ``` - **Using CA provided certificates, without client certificate verification:** ```yaml skip-verify: true # a valid server certificate cert-file: /path/to/server-cert # a valid server key key-file: /path/to/server-key ``` - **Using CA provided certificates, with client certificate verification:** ```yaml # a valid CA certificate to verify the client provided certificates ca-file: /path/to/caFile # a valid server certificate cert-file: /path/to/server-cert # a valid server key key-file: /path/to/server-key ``` ### Supported RPCs This `gNMI Server` supports `Get`, `Set` and `Subscribe` RPCs. #### gNMI Subscribe RPC The server keeps a cache of gNMI notifications synched with the configured targets based on the configured subscriptions. This means that a client cannot get updates about a leaf that `gNMIc` did not subscribe to upstream. As soon as there is an update to the cache, the added gNMI notification is sent to all the client which subscription matches the new notification. Clients can subscribe to specific target using the gNMI Prefix Target field, leaving the Target field empty or setting it to `*` is equivalent to subscribing to all known targets. #### gNMI Get RPC
The server supports the gNMI `Get` RPC. It relies on the Prefix.Target field to select the target(s) to relay the received GetRequest to. If Prefix.Target is empty or is equal to `*`, a Get RPC is performed for all known targets. The received GetRequest is cloned, enriched with each target name and sent to the corresponding destination. Comma separated target names are also supported and allow to select a list of specific targets to send the Get RPC to. Once all GetResponses are received back successfully, the notifications contained in each GetResponse are combined into a single GetResponse with their Prefix.Target populated, if empty. The resulting GetResponse is then returned to the gNMI client. If one of the RPCs fails, an error with status code `Internal(13)` is returned to the client. If the Get Request has the origin field set to `gnmic`, the request is performed against the internal server configuration. Currently only the path `targets` is supported. ```bash gnmic -a localhost:57400 --skip-verify get --path gnmic:/targets ``` ```json [ { "timestamp": 1626759382486891218, "time": "2021-07-20T13:36:22.486891218+08:00", "prefix": "gnmic:targets[name=clab-gw-srl1:57400]", "updates": [ { "Path": "address", "values": { "address": "clab-gw-srl1:57400" } }, { "Path": "username", "values": { "username": "admin" } }, { "Path": "insecure", "values": { "insecure": "false" } }, { "Path": "skip-verify", "values": { "skip-verify": "true" } }, { "Path": "timeout", "values": { "timeout": "10s" } } ] }, { "timestamp": 1626759382486900697, "time": "2021-07-20T13:36:22.486900697+08:00", "prefix": "gnmic:targets[name=clab-gw-srl2:57400]", "updates": [ { "Path": "address", "values": { "address": "clab-gw-srl2:57400" } }, { "Path": "username", "values": { "username": "admin" } }, { "Path": "insecure", "values": { "insecure": "false" } }, { "Path": "skip-verify", "values": { "skip-verify": "true" } }, { "Path": "timeout", "values": { "timeout": "10s" } } ] } ] ``` #### gNMI Set RPC The gNMI server supports the gNMI `Set` RPC. Just like in the case of `Get` RPC, the server relies on the `Prefix.Target` field to select the target(s) to relay the received SetRequest to. If Prefix.Target is empty or is equal to `*`, a Set RPC is performed for all known targets. The received SetRequest is cloned, enriched with each target name and sent to the corresponding destination. Comma separated target names are also supported and allow to select a list of specific targets to send the Set RPC to. Once all SetResponses are received back successfully, the `UpdateResult`s from each response are merged into a single SetResponse, with the addition of the target name set in `Path.Target`. This is not compliant with the gNMI specification which stipulates that the `Target` field should only be present in [Prefix Paths](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#2221-path-target) The resulting SetResponse is then returned to the gNMI client. If one of the RPCs fails, an error with status code `Internal(13)` is returned to the client. ================================================ FILE: docs/user_guide/outputs/influxdb_output.md ================================================ `gnmic` supports exporting subscription updates to [influxDB](https://www.influxdata.com/products/influxdb-overview/) time series database ## Configuration An influxdb output can be defined using the below format in `gnmic` config file under `outputs` section: ```yaml outputs: output1: # required type: influxdb # influxDB server address url: http://localhost:8086 # empty if using influxdb1.8.x org: myOrg # string in the form database/retention-policy. Skip retention policy for the default on bucket: telemetry # influxdb 1.8.x use a string in the form: "username:password" token: # number of points to buffer before writing to the server batch-size: 1000 # flush period after which the buffer is written to the server whether the batch_size is reached or not flush-timer: 10s # if true, the influxdb client will use gzip compression in write requests. use-gzip: false # (deprecated, use tls.skip-verify: true) #if true, the influxdb client will use a secure connection to the server. enable-tls: false # tls config tls: # string, path to the CA certificate file, # this will be used to verify the clients certificates when `skip-verify` is false ca-file: # string, client certificate file. cert-file: # string, client key file. key-file: # boolean, if true, the client will not verify the server # certificate against the available certificate chain. skip-verify: false # boolean, if true the message timestamp is changed to current time override-timestamps: false # server health check period, used to recover from server connectivity failure. # health check is disabled by default, can be enabled by setting the below field to any value other that zero. # with a minimum allowed period of 30s. health-check-period: 0s # defines the write timestamp precision, # one of `s` for second, `ms` for millisecond, `us` for microsecond and `ns` for nanoseconds # any other value defaults to `ns`. timestamp-precision: ns # server health check period, used to recover from server connectivity failure health-check-period: 30s # enable debug debug: false # string, one of `overwrite`, `if-not-present`, `` # This field allows populating/changing the value of Prefix.Target in the received message. # if set to ``, nothing changes # if set to `overwrite`, the target value is overwritten using the template configured under `target-template` # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template` add-target: # string, a GoTemplate that allow for the customization of the target field in Prefix.Target. # it applies only if the previous field `add-target` is not empty. # if left empty, it defaults to: # {{- if index . "subscription-target" -}} # {{ index . "subscription-target" }} # {{- else -}} # {{ index . "source" | host }} # {{- end -}}` # which will set the target to the value configured under `subscription.$subscription-name.target` if any, # otherwise it will set it to the target name stripped of the port number (if present) target-template: # NOT IMPLEMENTED boolean, enables the collection and export (via prometheus) of output specific metrics enable-metrics: false # list of processors to apply on the message before writing event-processors: [] # cache, if present enables the influxdb output to cache received updates and write them all together # at `cache-flush-timer` expiry. cache: # duration, if > 0, enables the expiry of values written to the cache. expiration: 0s # debug, if true enable extra logging debug: false # cache-flush-timer cache-flush-timer: 5s ``` `gnmic` uses the [`event`](../event_processors/intro.md#the-event-format) format to generate the measurements written to InfluxDB. When an event has been processed through `gnmic` processors, the final value of the `subscription-name` tag will be used as an InfluxDB measurement name and the tag will be removed. If the `subscription-name` tag does not exist in the event, the event's `Name` will be used as InfluxDB measurement. ## Caching When caching is enabled, the received messages are not written directly to InfluxDB, they are first cached as gNMI updates and written in batch when the `cache-flush-timer` is reached. The below diagram shows how an InfluxDB output works with and without cache enabled:
When caching is enabled, the cached gNMI updates are periodically retrieved in batch, converted to [events](../event_processors/intro.md#the-event-format). If [processors](../event_processors/intro.md) are defined under the output, they are applied to the whole list of events at once. This allows augmenting some messages with values from other messages even if they where collected from a different target/subscription. ================================================ FILE: docs/user_guide/outputs/jetstream_output.md ================================================ `gnmic` supports exporting subscription updates [NATS Jetstream](https://docs.nats.io/nats-concepts/jetstream) servers. A [Jetstream](https://docs.nats.io/nats-concepts/jetstream) output can be defined using the below format in `gnmic` config file under `outputs` section: ### configuration ```yaml outputs: output1: # required type: jetstream # NATS publisher name # if left empty, this field is populated with the output name used as output ID (output1 in this example). # If the flag --instance-name is not empty, the full name will be '$(instance-name)-$(name). # note that each jetstream worker (publisher) will get a client name=$name-$index name: "" # Comma separated NATS servers address: localhost:4222 # string, stream name to write update to, # if `create-stream` is set, it will be created # if `create-stream` is omitted, an existing stream with this name must be available # may not contain spaces, tabs, period (.), greater than (>) or asterisk (*) stream: # defines stream parameters that gNMIc will create on the target jetstream server(s) # if omitted, gnmic will use an existing stream and will not attempt to create or modify it create-stream: # string, stream description description: created by gNMIc # string list, list of subjects allowed on the stream # defaults to `.create-stream.$name.>` subjects: # string, one of `memory`, `file`. # defines the storage type to use for the stream. # defaults to `memory` storage: # int64, max number of messages in the stream. max-msgs: # int64, max bytes the stream may contain. max-bytes: # duration, max age of any message in the stream. max-age: # int32, maximum message size max-msg-size: # string, retention policy for the stream: `limits` or `workqueue` # `limits`: messages are retained based on size, count, or age limits # `workqueue`: messages are removed after being acknowledged by all consumers # defaults to `limits` retention-policy: limits # string, one of `static`, `subscription.target`, `subscription.target.path` # or `subscription.target.pathKeys`. # Defines the subject format. # `static`: # all updates will be written to the subject name set under `outputs.$output_name.subject` # `subscription.target`: # updates from each subscription, target will be written # to subject $subscription_name.$target_name # `subscription.target.path`: # updates from a certain subscription, target and path # will be written to subject $subscription_name.$target_name.$path. # The path is built by joining the gNMI path pathElements with a dot (.). # e.g: /interface[name=ethernet-1/1]/statistics/in-octets # --> interface.statistics.in-octets # `subscription.target.pathKeys`: # updates from a certain subscription, a certain target and a certain path # will be written to subject $subscription_name.$target_name.$path. # The path is built by joining the gNMI path pathElements and Keys with a dot (.). # e.g: /interface[name=ethernet-1/1]/statistics/in-octets # --> interface.{name=ethernet-1/1}.statistics.in-octets # `target.subscription`: # updates from each subscription, target will be written with a prefix of the `subject` # to subject $subject.$target_name.$subscription_name if `subject` is present. If not, # it will write to $target_name.$subscription_name. subject-format: static # If a subject-format is `static`, gnmic will publish all subscriptions updates # to a single subject configured under this field. Defaults to 'telemetry' # If a subject-format is `target.subscription`, gnmic will publish subscripion # updates prefixed with this subject. subject: telemetry # tls config tls: # string, path to the CA certificate file, # this will be used to verify the clients certificates when `skip-verify` is false ca-file: # string, client certificate file. cert-file: # string, client key file. key-file: # boolean, if true, the client will not verify the server # certificate against the available certificate chain. skip-verify: false # NATS username username: # NATS password password: # wait time before reconnection attempts connect-time-wait: 2s # Exported message format, one of: proto, prototext, protojson, json, event format: event # string, one of `overwrite`, `if-not-present`, `` # This field allows populating/changing the value of Prefix.Target in the received message. # if set to ``, nothing changes # if set to `overwrite`, the target value is overwritten using the template configured under `target-template` # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template` add-target: # string, a GoTemplate that allow for the customization of the target field in Prefix.Target. # it applies only if the previous field `add-target` is not empty. # if left empty, it defaults to: # {{- if index . "subscription-target" -}} # {{ index . "subscription-target" }} # {{- else -}} # {{ index . "source" | host }} # {{- end -}}` # which will set the target to the value configured under `subscription.$subscription-name.target` if any, # otherwise it will set it to the target name stripped of the port number (if present) target-template: # boolean, valid only if format is `event`. # if true, arrays of events are split and marshaled as JSON objects instead of an array of dicts. split-events: false # string, a GoTemplate that is executed using the received gNMI message as input. # the template execution is the last step before the data is written to the file. # First the received message is formatted according to the `format` field above, then the `event-processors` are applied if any # then finally the msg-template is executed. msg-template: # boolean, if true the message timestamp is changed to current time override-timestamps: false # integer, number of nats publishers to be created num-workers: 1 # duration after which a message waiting to be handled by a worker gets discarded write-timeout: 5s # boolean, enables extra logging for the nats output debug: false # integer, sets the size of the local buffer where received # NATS messages are stored before being sent to outputs. # This value is set per worker. Defaults to 0 messages buffer-size: 0 # boolean, enables the collection and export (via prometheus) of output specific metrics enable-metrics: false # list of processors to apply to the message before writing event-processors: ``` ### subject-format The `subject-format` field is used to control how the received gNMI notifications are written into the configured stream. #### static All notifications will be written to the subject name set under `outputs.$output_name.subject` #### subscription.target Notifications from each subscription and target pair will be written to subject `$subscription_name.$target_name` #### subscription.target.path Notifications from a subscription, target and path tuple will be written to subject $subscription_name.$target_name.$path. The path is built by joining the gNMI path pathElements with a period `(.)`. Notifications containing more than one update, will be expanded into multiple notifications with one update each. E.g: An update from target `target1` and subscription `sub1` containing path `/interface[name=ethernet-1/1]/statistics/in-octets`, will be written to subject: ```text $stream_name.sub1.target1.interface.statistics.in-octets ``` #### subscription.target.pathKeys Updates from a certain subscription, a certain target and a certain path will be written to subject `$subscription_name.$target_name.$path`. The path is built by joining the gNMI path pathElements and Keys with a period `(.)`. Notifications containing more than one update, will be expanded into multiple notifications with one update each. E.g: An update from target `target1` and subscription `sub1` containing path `/interface[name=ethernet-1/1]/statistics/in-octets`, will be written to subject: ```text $stream_name.sub1.target1.interface.{name=ethernet-1/1}.statistics.in-octets ``` ### JetStream Queue Patterns JetStream streams support three retention policies that enable different message processing patterns: Limits, Workqueue and Interest-Based. Gnmic supports Limits by default, and optionally supports workqueue based. It does not currently support Interest-based. #### Limits Retention (Default) Messages are retained based on configured limits (max-msgs, max-bytes, max-age). When these limits are exceeded, older messages are automatically removed, regardless of whether they have been consumed. **Use limits retention when:** - You don't have an better ideas :) - You want to have multiple consumers fetch the same metric asyncronously within the configured retention time. - You can accept losing messages that overfill the retention time, even when they were not acknoledged yet. **Example configuration:** ```yaml outputs: telemetry-output: type: jetstream address: localhost:4222 stream: telemetry-stream create-stream: retention-policy: limits # default storage: memory max-msgs: 100000 max-bytes: 10737418240 # 10GB max-age: 24h subject-format: subscription.target ``` #### Workqueue Retention Messages are automatically removed from the stream after being acknowledged by the consumer of that subject. This enables exactly-once message processing, where each message is processed by only one consumer and then deleted. **Use workqueue retention when:** - You have 1:1 producer/consumer configuration - You want automatic cleanup of messages after successful processing - Message loss is unacceptable (e.g. using a file persistence) **Example configuration:** ```yaml outputs: task-output: type: jetstream address: localhost:4222 stream: telemetry-stream create-stream: retention-policy: workqueue storage: file max-msgs: 100000 max-bytes: 10737418240 # 10GB subject-format: subscription.target ``` #### Using Existing Streams If a stream has already been created (e.g., by administrators or other applications), you can configure gnmic to use it by omitting the `create-stream` configuration: ```yaml outputs: existing-output: type: jetstream address: localhost:4222 stream: existing-stream subject-format: static subject: telemetry ``` When `create-stream` is omitted, gnmic will not attempt to create or modify the stream configuration. The stream with the specified name must already exist on the JetStream server. This is useful when: - Stream configuration is managed centrally - You don't have permissions to create streams - You want to ensure stream settings remain unchanged **Important:** When using an existing stream, ensure the stream's subjects configuration is compatible with your chosen `subject-format` and `subject` settings, otherwise messages may fail to publish. ================================================ FILE: docs/user_guide/outputs/kafka_output.md ================================================ `gnmic` supports exporting subscription updates to multiple Apache Kafka brokers/clusters simultaneously ### Configuration sample A Kafka output can be defined using the below format in `gnmic` config file under `outputs` section: ```yaml outputs: output1: # required type: kafka # kafka client name. # if left empty, this field is populated with the output name used as output ID (output1 in this example). # the full name will be '$(name)-kafka-prod'. # If the flag --instance-name is not empty, the full name will be '$(instance-name)-$(name)-kafka-prod. # note that each kafka worker (producer) will get client name=$name-$index name: "" # Comma separated brokers addresses address: localhost:9092 # Kafka topic name topic: telemetry # Kafka topic prefix # If supplied, overrides the `topic` key and outputs to a separate topic per source # named like `$topic_$subscriptionName_$targetName`. If `source` contains a port number separated with a colon, # the colon will be replaced with an underscore due to restrictions on the naming of kafka topics. # ex: telemetry_bgp_neighbor_state_device1_6030 topic-prefix: telemetry # starts a sync-producer if set to true. sync-producer: false # required-acks is used in Produce Requests to tell the broker how many replica acknowledgements # it must see before responding. One of `no-response`, `wait-for-local`, `wait-for-all`. required-acks: wait-for-local # Kafka SASL configuration sasl: # SASL user name user: # SASL password password: # SASL mechanism: PLAIN, SCRAM-SHA-256, SCRAM-SHA-512 and OAUTHBEARER are supported mechanism: # token url for OAUTHBEARER SASL mechanism token-url: # tls config tls: # string, path to the CA certificate file, # this will be used to verify the clients certificates when `skip-verify` is false ca-file: # string, client certificate file. cert-file: # string, client key file. key-file: # boolean, if true, the client will not verify the server # certificate against the available certificate chain. skip-verify: false # The total number of times to retry sending a message max-retry: 2 # Kafka connection timeout timeout: 5s # Wait time to reestablish the kafka producer connection after a failure recovery-wait-time: 10s # Exported msg format, json, protojson, prototext, proto, event format: event # boolean, if true the kafka producer will add a key to # the message written to the broker. The key value is ${source}_${subscription-name}. # this is useful for Kafka topics with multiple partitions, it allows to keep messages from the same source and subscription in sequence. insert-key: false # string, one of `overwrite`, `if-not-present`, `` # This field allows populating/changing the value of Prefix.Target in the received message. # if set to ``, nothing changes # if set to `overwrite`, the target value is overwritten using the template configured under `target-template` # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template` add-target: # string, a GoTemplate that allows for the customization of the target field in Prefix.Target. # it applies only if the previous field `add-target` is not empty. # if left empty, it defaults to: # {{- if index . "subscription-target" -}} # {{ index . "subscription-target" }} # {{- else -}} # {{ index . "source" | host }} # {{- end -}}` # which will set the target to the value configured under `subscription.$subscription-name.target` if any, # otherwise it will set it to the target name stripped of the port number (if present) target-template: # boolean, valid only if format is `event`. # if true, arrays of events are split and marshaled as JSON objects instead of an array of dicts. split-events: false # string, a GoTemplate that is executed using the received gNMI message as input. # the template execution is the last step before the data is written to the file, # First the received message is formatted according to the `format` field above, then the `event-processors` are applied if any # then finally the msg-template is executed. msg-template: # boolean, if true the message timestamp is changed to current time override-timestamps: false # Number of kafka producers to be created num-workers: 1 # (bool) enable debug debug: false # (int) number of messages to buffer before being picked up by the workers buffer-size: 0 # (string) enables compression of produced message. One of gzip, snappy, zstd, lz4 compression-codec: gzip # (bool) enables the collection and export (via prometheus) of output specific metrics enable-metrics: false # list of processors to apply on the message before writing event-processors: ``` Currently all subscriptions updates (all targets and all subscriptions) are published to the defined topic name unless the `topic-prefix` configuration option is set. ### Kafka Security protocol Kafka clients can operate with 4 [security protocols](https://kafka.apache.org/24/javadoc/org/apache/kafka/common/security/auth/SecurityProtocol.html), their configuration is controlled via both `.tls` and `.sasl` fields under the output config. **Security Protocol** | **Description** | **Configuration** | -----------------------|-------------------------------------------|-----------------------------------------| `PLAINTEXT` | Un-authenticated, non-encrypted channel | `.tls` and `.sasl` are **NOT** present | `SASL_PLAINTEXT` | SASL authenticated, non-encrypted channel | only `.sasl` is present | `SASL_SSL` | SASL authenticated, SSL channel | both `.tls` and `.sasl` are present | `SSL` | SSL channel | only `.tls` is present | #### Security Configuration Examples === "PLAINTEXT" ```yaml outputs: output1: type: kafka topic: my_kafka_topic # other fields # no tls and no sasl fields ``` === "SASL_PLAINTEXT" ```yaml outputs: output1: type: kafka topic: my_kafka_topic sasl: user: admin password: secret # other fields # no tls field ``` === "SASL_SSL" Example1: Without server certificate verification ```yaml outputs: output1: type: kafka topic: my_kafka_topic sasl: user: admin password: secret tls: skip-verify: true # other fields # ... ``` Example2: With server certificate verification ```yaml outputs: output1: type: kafka topic: my_kafka_topic sasl: user: admin password: secret tls: ca-file: /path/to/ca-file # other fields # ... ``` Example3: With client certificates ```yaml outputs: output1: type: kafka topic: my_kafka_topic sasl: user: admin password: secret tls: cert-file: /path/to/cert-file key-file: /path/to/cert-file # other fields # ... ``` Example4: With both server certificate verification and client certificates ```yaml outputs: output1: type: kafka topic: my_kafka_topic sasl: user: admin password: secret tls: cert-file: /path/to/cert-file key-file: /path/to/cert-file ca-file: /path/to/ca-file # other fields # ... ``` === "SSL" Example1: Without server certificate verification ```yaml outputs: output1: type: kafka topic: my_kafka_topic tls: skip-verify: true # other fields # no sasl field ``` Example2: With server certificate verification ```yaml outputs: output1: type: kafka topic: my_kafka_topic tls: ca-file: /path/to/ca-file # other fields # no sasl field ``` Example3: With client certificates ```yaml outputs: output1: type: kafka topic: my_kafka_topic tls: cert-file: /path/to/cert-file key-file: /path/to/cert-file # other fields # no sasl field ``` Example4: With both server certificate verification and client certificates ```yaml outputs: output1: type: kafka topic: my_kafka_topic tls: cert-file: /path/to/cert-file key-file: /path/to/cert-file ca-file: /path/to/ca-file # other fields # no sasl field ``` ### Kafka Output Metrics When a Prometheus server is enabled, `gnmic` kafka output exposes 4 prometheus metrics, 3 Counters and 1 Gauge: * `number_of_kafka_msgs_sent_success_total`: Number of msgs successfully sent by gnmic kafka output. This Counter is labeled with the kafka producerID * `number_of_written_kafka_bytes_total`: Number of bytes written by gnmic kafka output. This Counter is labeled with the kafka producerID * `number_of_kafka_msgs_sent_fail_total`: Number of failed msgs sent by gnmic kafka output. This Counter is labeled with the kafka producerID as well as the failure reason * `msg_send_duration_ns`: gnmic kafka output send duration in nanoseconds. This Gauge is labeled with the kafka producerID ================================================ FILE: docs/user_guide/outputs/nats_output.md ================================================ `gnmic` supports exporting subscription updates to multiple NATS servers/clusters simultaneously A [NATS](https://docs.nats.io/) output can be defined using the below format in `gnmic` config file under `outputs` section: ```yaml outputs: output1: # required type: nats # NATS publisher name # if left empty, this field is populated with the output name used as output ID (output1 in this example). # the full name will be '$(name)-nats-pub'. # If the flag --instance-name is not empty, the full name will be '$(instance-name)-$(name)-nats-pub. # note that each nats worker (publisher) will get client name=$name-$index name: "" # Comma separated NATS servers address: localhost:4222 # This prefix is used to to build the subject name for each target/subscription subject-prefix: telemetry # If a subject-prefix is not specified, gnmic will publish all subscriptions updates to a single subject configured under this field. Defaults to 'telemetry' subject: telemetry # NATS username username: # NATS password password: # wait time before reconnection attempts connect-time-wait: 2s # tls config tls: # string, path to the CA certificate file, # this will be used to verify the clients certificates when `skip-verify` is false ca-file: # string, client certificate file. cert-file: # string, client key file. key-file: # boolean, if true, the client will not verify the server # certificate against the available certificate chain. skip-verify: false # Exported message format, one of: proto, prototext, protojson, json, event format: json # string, one of `overwrite`, `if-not-present`, `` # This field allows populating/changing the value of Prefix.Target in the received message. # if set to ``, nothing changes # if set to `overwrite`, the target value is overwritten using the template configured under `target-template` # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template` add-target: # string, a GoTemplate that allow for the customization of the target field in Prefix.Target. # it applies only if the previous field `add-target` is not empty. # if left empty, it defaults to: # {{- if index . "subscription-target" -}} # {{ index . "subscription-target" }} # {{- else -}} # {{ index . "source" | host }} # {{- end -}}` # which will set the target to the value configured under `subscription.$subscription-name.target` if any, # otherwise it will set it to the target name stripped of the port number (if present) target-template: # boolean, valid only if format is `event`. # if true, arrays of events are split and marshaled as JSON objects instead of an array of dicts. split-events: false # string, a GoTemplate that is executed using the received gNMI message as input. # the template execution is the last step before the data is written to the file, # First the received message is formatted according to the `format` field above, then the `event-processors` are applied if any # then finally the msg-template is executed. msg-template: # boolean, if true the message timestamp is changed to current time override-timestamps: false # integer, number of nats publishers to be created num-workers: 1 # duration after which a message waiting to be handled by a worker gets discarded write-timeout: 5s # boolean, enables extra logging for the nats output debug: false # integer, sets the size of the local buffer where received # NATS messages are stored before being sent to outputs. # This value is set per worker. Defaults to 0 messages buffer-size: 0 # boolean, enables the collection and export (via prometheus) of output specific metrics enable-metrics: false # list of processors to apply on the message before writing event-processors: ``` Using `subject` config value, a user can specify the NATS subject to which to send all subscriptions updates for all targets If a user wants to separate updates by targets and by subscriptions, `subject-prefix` can be used. if `subject-prefix` is specified `subject` is ignored. `gnmic` takes advantage of NATS [subject hierarchy](https://docs.nats.io/nats-concepts/subjects#subject-hierarchies) by publishing gNMI subscription updates to a separate subject per target per subscription. The NATS subject name is built out of the `subject-prefix`, `name` under the target definition and `subscription-name` resulting in the following format: `subject-prefix.name.subscription-name` e.g: for a target `router1`, a subscription name `port-stats` and subject-prefix `telemetry` the subject name will be `telemetry.router1.port-stats` If the target name is an IP address, or a hostname (meaning potentially contains `.`), the `.` characters are replaced with a `-` e.g: for a target `172.17.0.100:57400`, the previous subject name becomes `telemetry.172-17-0-100:57400.port-stats` This way a user can subscribe to different subsets of updates by tweaking the subject name: * `"telemetry.>"` gets all updates sent to NATS by all targets, all subscriptions * `"telemetry.router1.>"` gets all NATS updates for target router1 * `"telemetry.*.port-stats"` gets all updates from subscription port-stats, for all targets ================================================ FILE: docs/user_guide/outputs/otlp_output.md ================================================ `gnmic` supports exporting subscription updates as [OpenTelemetry](https://opentelemetry.io/) metrics using the [OTLP](https://opentelemetry.io/docs/specs/otlp/) protocol. This output can be used to push metrics to any OTLP-compatible backend such as [Grafana Alloy](https://grafana.com/docs/alloy/latest/), [Grafana Mimir](https://grafana.com/docs/mimir/latest/), [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/), [Datadog](https://www.datadoghq.com/), [Dynatrace](https://www.dynatrace.com/), or any system that accepts OTLP metrics over gRPC. ## Configuration An OTLP output can be defined using the below format in `gnmic` config file under `outputs` section: ```yaml outputs: output1: # required type: otlp # required, address of the OTLP collector endpoint: localhost:4317 # string, transport protocol. Only "grpc" is supported. # defaults to "grpc" protocol: grpc # duration, defaults to 10s. # RPC timeout for each export request. timeout: 10s # tls config tls: # string, path to the CA certificate file, # this will be used to verify the server certificate when `skip-verify` is false ca-file: # string, client certificate file. cert-file: # string, client key file. key-file: # boolean, if true, the client will not verify the server # certificate against the available certificate chain. skip-verify: false # integer, defaults to 1000. # number of events to buffer before sending a batch to the collector. # events are sent every `interval` or when the batch is full, whichever comes first. batch-size: 1000 # duration, defaults to 5s. # time interval between export requests. interval: 5s # integer, defaults to 2x batch-size. # size of the internal event buffer. buffer-size: 2000 # integer, defaults to 3. # number of retries per export request on failure. max-retries: 3 # string, to be used as the metric namespace metric-prefix: "" # boolean, if true the subscription name will be prepended to the metric name after the prefix. append-subscription-name: false # boolean, if true, string type values are exported as gauge metrics with value=1 # and the string stored as an attribute named "value". # if false, string values are dropped. strings-as-attributes: false # list of tag keys to place as OTLP Resource attributes. # these tags are excluded from data point attributes. # defaults to empty (all tags become data point attributes). resource-tag-keys: # - device # - vendor # - model # - site # - source # list of regex patterns matched against the value key (metric path). # if any pattern matches, the metric is exported as a monotonic cumulative Sum (counter). # unmatched metrics are exported as Gauges. # defaults to empty (all metrics are Gauges). counter-patterns: # - "counter" # - "octets|packets|bytes" # - "errors|discards|drops" # map of string:string, additional static attributes to add to the OTLP Resource. resource-attributes: # key: value # map of string:string, HTTP headers (or gRPC metadata) to include with every export request. # Use this to set tenant/org identifiers required by multi-tenant backends such as # Grafana Mimir, Loki, or Tempo. headers: # X-Scope-OrgID: my-tenant # integer, defaults to 1. # number of workers processing events. num-workers: 1 # boolean, defaults to false. # enables debug logging. debug: false # boolean, defaults to false. # enables the collection and export (via prometheus) of output specific metrics. enable-metrics: false # list of processors to apply on the message before writing event-processors: ``` ## Metric Naming The metric name is built from up to three parts joined by underscores: 1. The value of `metric-prefix`, if configured. 2. The subscription name, if `append-subscription-name` is `true`. 3. The gNMI path (value key), with `/` and `-` replaced by `_`. For example, a gNMI update from subscription `port-stats` with path: ``` /interfaces/interface[name=1/1/1]/state/counters/in-octets ``` with `metric-prefix: gnmic` and `append-subscription-name: true`, produces a metric named: ``` gnmic_port_stats_interfaces_interface_state_counters_in_octets ``` ## Metric Type Detection Metrics are classified based on the `counter-patterns` configuration: - **Sum (monotonic counter)**: if the value key matches any regex in `counter-patterns`. - **Gauge**: all other numeric values. By default `counter-patterns` is empty, so all metrics are exported as Gauges. To classify counter-like metrics, configure the patterns explicitly: ```yaml counter-patterns: - "counter|octets|packets|bytes" - "errors|discards|drops" ``` Each pattern is a Go [regexp](https://pkg.go.dev/regexp/syntax) matched against the value key (the gNMI path portion of the metric, **before name transformation**). ## Resource and Data Point Attributes Event tags are split between the OTLP Resource and data point attributes based on the `resource-tag-keys` configuration: - Tags whose keys appear in `resource-tag-keys` are placed as **Resource attributes** and excluded from data point attributes. - All remaining tags become **data point attributes** (equivalent to Prometheus labels). By default `resource-tag-keys` is empty, so all tags become data point attributes. To move device-level metadata to the OTLP Resource (keeping it out of Prometheus labels), configure it explicitly: ```yaml resource-tag-keys: - device - vendor - model - site - source ``` Additional static attributes can be added to every Resource using `resource-attributes`: ```yaml resource-attributes: service.name: gnmic-collector deployment.environment: production ``` ## OTLP Resource Grouping Events are grouped by their `source` tag (the target device address). Each unique source becomes a separate OTLP `ResourceMetrics` entry with its own set of resource attributes. ## Custom Headers The `headers` field attaches key/value pairs to every export request — as gRPC metadata when using the `grpc` protocol, or as HTTP headers when using `http`. This is required by multi-tenant Grafana backends (Mimir, Loki, Tempo) which use the `X-Scope-OrgID` header to route data to the correct tenant: ```yaml outputs: mimir-output: type: otlp endpoint: mimir.example.com:4317 headers: X-Scope-OrgID: my-tenant-id ``` Multiple headers can be set simultaneously: ```yaml headers: X-Scope-OrgID: my-tenant-id X-Custom-Header: some-value ``` ## OTLP Output Metrics When `enable-metrics` is set to `true`, the OTLP output exposes the following Prometheus metrics: | Metric Name | Type | Description | |---|---|---| | `gnmic_otlp_output_number_of_sent_events_total` | Counter | Number of events successfully sent to the OTLP collector | | `gnmic_otlp_output_number_of_failed_events_total` | Counter | Number of events that failed to send | | `gnmic_otlp_output_send_duration_seconds` | Histogram | Duration of sending batches to the OTLP collector | | `gnmic_otlp_output_rejected_data_points_total` | Counter | Number of data points rejected by the collector (PartialSuccess) | ================================================ FILE: docs/user_guide/outputs/output_intro.md ================================================ In the context of gnmi subscriptions (on top of terminal output) `gnmic` supports multiple output options: * [Local file](file_output.md) * [NATS messaging system](nats_output.md) * [NATS Streaming messaging bus (STAN)](stan_output.md) * [NATS JetStream](jetstream_output.md) * [Kafka messaging bus](kafka_output.md) * [InfluxDB Time Series Database](influxdb_output.md) * [Prometheus Server](prometheus_output.md) * [Prometheus Remote Write](prometheus_write_output.md) * [UDP Server](udp_output.md) * [TCP Server](tcp_output.md)
These outputs can be mixed and matched at will with the different gnmi subscribe targets. With multiple outputs defined in the [configuration file](../configuration_file.md) you can collect once and export the subscriptions updates to multiple locations formatted differently. ### Defining outputs To define an output a user needs to create the `outputs` section in the configuration file: ```yaml # part of ~/gnmic.yml config file outputs: output1: type: file # output type file-type: stdout # or stderr format: json output2: type: file filename: /path/to/localFile.log format: protojson output3: type: nats # output type address: 127.0.0.1:4222 # comma separated nats servers addresses subject-prefix: telemetry # format: event output4: type: file filename: /path/to/localFile.log format: json output5: type: stan # output type address: 127.0.0.1:4223 # comma separated nats streaming servers addresses subject: telemetry # cluster-name: test-cluster # format: proto output6: type: kafka # output type address: localhost:9092 # comma separated kafka brokers addresses topic: telemetry # kafka topic format: proto output7: type: stan # output type address: 127.0.0.1:4223 # comma separated nats streaming servers addresses subject: telemetry cluster-name: test-cluster ``` !!! note Outputs names are case insensitive #### Output formats Different formats are supported for all outputs **Format/output** | **proto** | **protojson** | **prototext** | **json** | **event** ----------------- | ---------------------------------- | --------------------------------| ------------------------------------|--------------------------------|--------------------------------: **File** | :x: | :heavy_check_mark: | :heavy_check_mark: |:heavy_check_mark: |:heavy_check_mark: **NATS / STAN** | :heavy_check_mark: | :heavy_check_mark: | :x: |:heavy_check_mark: |:heavy_check_mark: **Kafka** | :heavy_check_mark: | :heavy_check_mark: | :x: |:heavy_check_mark: |:heavy_check_mark: **UDP / TCP** | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |:heavy_check_mark: |:heavy_check_mark: **InfluxDB** | NA | NA | NA |NA |NA **Prometheus** | NA | NA | NA |NA |NA #### Formats examples === "protojson" ```json { "update": { "timestamp": "1595491618677407414", "prefix": { "elem": [ { "name": "configure" }, { "name": "system" } ] }, "update": [ { "path": { "elem": [ { "name": "name" } ] }, "val": { "stringVal": "sr123" } } ] } } ``` === "prototext" ```yaml update: { timestamp: 1595491704850352047 prefix: { elem: { name: "configure" } elem: { name: "system" } } update: { path: { elem: { name: "name" } } val: { string_val: "sr123" } } } ``` === "json" ```json { "source": "172.17.0.100:57400", "subscription-name": "sub1", "timestamp": 1595491557144228652, "time": "2020-07-23T16:05:57.144228652+08:00", "prefix": "configure/system", "updates": [ { "Path": "name", "values": { "name": "sr123" } } ] } ``` === "event" ```json [ { "name": "sub1", "timestamp": 1595491586073072000, "tags": { "source": "172.17.0.100:57400", "subscription-name": "sub1" }, "values": { "/configure/system/name": "sr123" } } ] ``` ### Binding outputs Once the outputs are defined, they can be flexibly associated with the targets. ```yaml # part of ~/gnmic.yml config file targets: router1.lab.com: username: admin password: secret outputs: - output1 - output3 router2.lab.com: username: gnmi password: telemetry outputs: - output2 - output3 - output4 ``` ### Caching By default, `gNMIc` outputs write the received gNMI updates as they arrive (i.e without caching). Caching messages before writing them to a remote location can yield a few benefits like **rate limiting**, **batch processing**, **data replication**, etc. Both `influxdb` and `prometheus` outputs support caching messages before exporting. Caching support for other outputs is planned. See more details about caching [here](../caching.md) ================================================ FILE: docs/user_guide/outputs/prometheus_output.md ================================================ ## Introduction gNMIc offers the capability to present gNMI updates on a Prometheus server, allowing a Prometheus system to perform scrapes. The Prometheus metric name and its labels are generated according to the subscription name, gNMI path, and the value name. To define a gNMIc Prometheus output, use the following format in the gnmic configuration file under the outputs section: ```yaml outputs: sample-prom-output: type: prometheus # required # address to listen on for incoming scrape requests listen: :9804 # path to query to get the metrics path: /metrics # maximum lifetime of metrics in the local cache, # # a zero value defaults to 60s, a negative duration (e.g: -1s) disables the expiration expiration: 60s # a string to be used as the metric namespace metric-prefix: "" # a boolean, if true the subscription name will be appended to the metric name after the prefix append-subscription-name: false # boolean, if true the message timestamp is changed to current time override-timestamps: false # a boolean, enables exporting timestamps received from the gNMI target as part of the metrics export-timestamps: false # a boolean, enables setting string type values as prometheus metric labels. strings-as-labels: false # tls config tls: # string, path to the CA certificate file, # this certificate is used to verify the clients certificates. ca-file: # string, server certificate file. cert-file: # string, server key file. key-file: # string, one of `"", "request", "require", "verify-if-given", or "require-verify" # - request: The server requests a certificate from the client but does not # require the client to send a certificate. # If the client sends a certificate, it is not required to be valid. # - require: The server requires the client to send a certificate and does not # fail if the client certificate is not valid. # - verify-if-given: The server requests a certificate, # does not fail if no certificate is sent. # If a certificate is sent it is required to be valid. # - require-verify: The server requires the client to send a valid certificate. # # if no ca-file is present, `client-auth` defaults to ""` # if a ca-file is set, `client-auth` defaults to "require-verify"` client-auth: "" # see https://gnmic.openconfig.net/user_guide/caching/, # if enabled, the received gNMI notifications are stored in a cache. # the prometheus metrics are generated at the time a prometheus server sends scrape request. # this behavior allows the processors (if defined) to be run on all the generated events at once. # this mode uses more resource compared to the default one, but offers more flexibility when it comes # to manipulating the data to customize the returned metrics using event-processors. cache: # duration, scrape request timeout. # this timer is started when a scrape request is received, # if it is reached, the metrics generation/collection is stopped. timeout: 10s # enable debug for prometheus output debug: false # string, one of `overwrite`, `if-not-present`, `` # This field allows populating/changing the value of Prefix.Target in the received message. # if set to ``, nothing changes # if set to `overwrite`, the target value is overwritten using the template configured under `target-template` # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template` add-target: # string, a GoTemplate that allow for the customization of the target field in Prefix.Target. # it applies only if the previous field `add-target` is not empty. # if left empty, it defaults to: # {{- if index . "subscription-target" -}} # {{ index . "subscription-target" }} # {{- else -}} # {{ index . "source" | host }} # {{- end -}}` # which will set the target to the value configured under `subscription.$subscription-name.target` if any, # otherwise it will set it to the target name stripped of the port number (if present) target-template: # list of processors to apply on the message before writing event-processors: # an integer, sets the number of worker handling messages to be converted into Prometheus metrics num-workers: 1 # Enables Consul service registration service-registration: # Consul server address, default to localhost:8500 address: # Consul Data center, defaults to dc1 datacenter: # Consul username, to be used as part of HTTP basicAuth username: # Consul password, to be used as part of HTTP basicAuth password: # Consul Token, is used to provide a per-request ACL token which overrides the agent's default token token: # address and port number to be registered as a service address in Consul. # if the field is empty the address is derived from the listen field. # if the address does not contain a port number, the port number fmro the listen field is used. service-address: # Prometheus service check interval, for both http and TTL Consul checks, # defaults to 5s check-interval: # Maximum number of failed checks before the service is deleted by Consul # defaults to 3 max-fail: # Consul service name name: # List of tags to be added to the service registration, # if available, the instance-name and cluster-name will be added as tags, # in the format: gnmic-instance=$instance-name and gnmic-cluster=$cluster-name tags: # bool, enables http service check on top of the TTL check enable-http-check: # string, if enable-http-check is true, this field can be used to specify the http endpoint to be used to the check # if provided, this filed with be prepended with 'http://' (if not already present), # and appended with the value in 'path' field. # if not specified, it will be derived from the fields 'listen' and 'path' http-check-address: # if set to true, the gnmic instance will try to ac quire a lock before registering the prometheus output in consul. # this allows to register a single instance of the cluster in consul. # if the instance which acquired the lock fails, one of the remaining ones will take over. use-lock: false ``` ## Fields definition ### **type** The output type, `prometheus` in this case. ### **listen** Address to listen on for incoming scrape requests, defaults to `:9804` ### **path** URL Path to query in order to retrieve the metrics, defaults to `/metrics` ### **expiration** Maximum lifetime of metrics in the local cache, A zero value defaults to 60s, a negative duration (e.g: -1s) disables the expiration ### **metric-prefix** A string to be used as the metric namespace ### **append-subscription-name** A boolean, if true the subscription name will be appended to the metric name after the prefix ### **override-timestamps** A boolean, if true the message timestamp is changed to current time ### **export-timestamps** A boolean, enables exporting timestamps received from the gNMI target as part of the metrics ### **strings-as-labels** A boolean, enables setting string type values as prometheus metric labels. ### **tls** #### **ca-file** A string, path to the CA certificate file. This certificate is used to verify the clients certificates. #### **cert-file** A string, path to server certificate file. #### **key-file** A string, server key file. #### **client-auth** A string, use to control whether the server requests a client certificate or not and how it validates it. One of: - "": The server does not request a certificate from the client. - "request": The server requests a certificate from the client but does not require the client to send a certificate. If the client sends a certificate, it is not required to be valid. - "require": The server requires the client to send a certificate and does not fail if the client certificate is not valid. - "verify-if-given": The server requests a certificate, does not fail if no certificate is sent. If a certificate is sent it is required to be valid. - "require-verify": The server requires the client to send a valid certificate. If the ca-file is not provided, the default value for client-auth is an empty string (""). However, if a ca-file is specified, the default value for client-auth becomes "require-verify". ### **cache** Refer to the [cache docs](https://gnmic.openconfig.net/user_guide/caching) for more information. When enabled, gNMI notifications are stored in a cache upon receipt. Prometheus metrics are subsequently generated when a Prometheus system sends a scrape request. This approach allows processors (if defined) to operate on all generated events simultaneously. While this mode consumes more resources compared to the default, it provides increased flexibility for data manipulation and metric customization through the use of event-processors. ### **timeout** A Duration such as 10s, 1m or 1m30s, defines the scrape request timeout. This timer is started when a scrape request is received from a Prometheus system. If the timer is is reached, the metrics generation/collection is stopped. ### **debug** A boolean. Enables debug for prometheus output ### **add-target** A string, one of `overwrite`, `if-not-present` or ``. This field allows populating/changing the value of Prefix.Target in the received message. If left empty (""), no changes will be made. If set to "overwrite", the target value will be replaced with the configuration specified under target-template. If set to "if-not-present", the target value will be populated only if it is empty, utilizing the target-template. ### **target-template** A string, a GoTemplate that allow for the customization of the target field in `Prefix.Target`. It applies only if the previous field `add-target` is not empty. If left `target-template` is left empty, it defaults to: ``` {{- if index . "subscription-target" -}} {{ index . "subscription-target" }} {{- else -}} {{ index . "source" | host }} {{- end -}} ``` The above template sets the target to the value configured under `subscription.$subscription-name.target` if any, otherwise it will set it to the target name stripped of the port number (if present) ### **event-processors** A string list. List of processors to apply on the message before writing ### **service-registration** Enables Consul service registration #### address Consul server address, default to localhost:8500 #### datacenter Consul Data center, defaults to dc1 #### username Consul username, to be used as part of HTTP basicAuth #### password Consul password, to be used as part of HTTP basicAuth #### token Consul Token, is used to provide a per-request ACL token which overrides the agent's default token #### service-address Address and port number to be registered as a service address in Consul. if the field is empty the address is derived from the listen field. if the address does not contain a port number, the port number from the listen field is used. #### check-interval Prometheus service check interval, for both http and TTL Consul checks, defaults to 5s #### max-fail Maximum number of failed checks before the service is deleted by Consul defaults to 3 #### name Consul service name #### tags List of tags to be added to the service registration, if available, the instance-name and cluster-name will be added as tags, in the format: gnmic-instance=$instance-name and gnmic-cluster=$cluster-name #### enable-http-check A boolean, enables http service check on top of the TTL check #### http-check-address A string, if enable-http-check is true, this field can be used to specify the http endpoint to be used to the check if provided, this filed with be prepended with 'http://' (if not already present), and appended with the value in 'path' field. if not specified, it will be derived from the fields 'listen' and 'path' #### use-lock A boolean, if set to true, the gnmic instance will try to acquire a lock before registering the prometheus output. This knob allows to register a single instance of the cluster in Consul. if the instance which acquired the lock fails, one of the remaining ones takes over by acquiring the lost lock. ## Metric Generation The below diagram shows an example of a prometheus metric generation from a gnmi update
### **Metric Naming** The metric name starts with the string configured under __metric-prefix__. Then if __append-subscription-name__ is `true`, the __subscription-name__ as specified in `gnmic` configuration file is appended. The resulting string is followed by the gNMI __path__ stripped of its keys if there are any. All non-alphanumeric characters are replaced with an underscore "`_`" The 3 strings are then joined with an underscore "`_`" If further customization of the metric name is required, the [processors](../event_processors/intro.md) can be used to transform the metric name. For example, a gNMI update from subscription `port-stats` with path: ```bash /interfaces/interface[name=1/1/1]/subinterfaces/subinterface[index=0]/state/counters/in-octets ``` is exposed as a metric named: ```bash gnmic_port_stats_interfaces_interface_subinterfaces_subinterface_state_counters_in_octets ``` ### **Metric Labels** The metrics labels are generated from the subscription metadata (e.g: `subscription-name` and `source`) and the keys present in the gNMI path elements. For the previous example the labels would be: ```bash {interface_name="1/1/1",subinterface_index=0,source="$routerIP:Port",subscription_name="port-stats"} ``` ## Service Registration `gnmic` supports `prometheus_output` service registration via `Consul`. It allows `prometheus` to dynamically discover new instances of `gnmic` exposing a prometheus server ready for scraping via its [service discovery feature](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config). If the configuration section `service-registration` is present under the output definition, `gnmic` will register the `prometheus_output` service in `Consul`. ### **Configuration Example** The below configuration, registers a service name `gnmic-prom-srv` with `IP=10.1.1.1` and `port=9804` ```yaml # gnmic.yaml outputs: output1: type: prometheus listen: 10.1.1.1:9804 path: /metrics service-registration: address: consul-agent.local:8500 name: gnmic-prom-srv ``` This allows running multiple instances of `gnmic` with minimal configuration changes by using `prometheus` [service discovery feature](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config). Simplified scrape configuration in the prometheus client: ```yaml # prometheus.yaml scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: $CONSUL_ADDRESS services: - $service_name ``` ### **Service Name and ID** The `$service_name` to be discovered by `prometheus` is configured under `outputs.$output_name.service-registration.name`. If the service registration name field is not present, the name `prometheus-${output_name}` will be used. In both cases the service ID will be `prometheus-${output_name}-${instance_name}`. ### **Service Checks** `gnmic` registers the service in `Consul` with a `ttl` check enabled by default: * `ttl`: `gnmic` periodically updates the service definition in `Consul`. The goal is to allow `Consul` to detect a same instance restarting with a different service name. If `service-registration.enable-http-check` is `true`, an `http` check is added: * `http`: `Consul` periodically scrapes the prometheus server endpoint to check its availability. ```yaml # gnmic.yaml outputs: output1: type: prometheus listen: 10.1.1.1:9804 path: /metrics service-registration: address: consul-agent.local:8500 name: gnmic-prom-srv enable-http-check: true ``` Note that for the `http` check to work properly, a reachable address ( IP or name ) should be specified under `listen`. Otherwise, a reachable address should be added under `service-registration.http-check-address`. ## Caching When caching is enabled, the received messages are not immediately converted into metrics, they are cached as gNMI updates. The conversion from gNMI update to Prometheus metrics happens only when a scrape request is received. The below diagram shows how a `prometheus` output works with and without cache enabled:
When caching is enabled, the received gNMI updates are not processed and converted into metrics immediately, they are rather stored as is in the configured gNMI cache. Once a scrape request is received from `Prometheus`, all the cached gNMI updates are retrieved from the cache, converted to [events](../event_processors/intro.md#the-event-format), the configured processors, if any, are then applied to the whole list of events. Finally, The resulting event are converted into metrics and written back to `Prometheus` within the scrape response. ## Prometheus Output Metrics When a Prometheus server (gNMI API) is enabled, `gnmic` prometheus output exposes 2 prometheus Gauges: * `number_of_prometheus_metrics_total`: Number of metrics stored by the prometheus output. * `number_of_prometheus_cached_metrics_total`: Number of metrics cached by the prometheus output. ## Examples ### **A simple Prometheus output** A basic Prometheus output utilizing all default values converts each received gNMI update into a Prometheus metric, retaining it in the cache until a scrape request is received from a Prometheus system. ```yaml outputs: simple-prom: type: prometheus ``` ### **Promote string values to labels** A straightforward Prometheus output, utilizing default values for the most part, transforms each incoming gNMI update into a Prometheus metric. In this process, if a value is a string, it is incorporated as a label in the final metric. These metrics are retained in the cache, awaiting a scrape request from a Prometheus system. ```yaml outputs: simple-prom: type: prometheus strings-as-labels: true ``` ### **Use a gNMI cache** A Prometheus output leveraging a gNMI cache stores incoming gNMI updates in their original form, only converting them into Prometheus metrics upon receiving a scrape request from a Prometheus system. This mode enables batch processing of all updates simultaneously during their conversion into Prometheus metrics. ```yaml outputs: simple-prom: type: prometheus cache: {} ``` ### ****Register as a Consul service**** A Prometheus output that dynamically registers its endpoint within Consul, enabling the Prometheus system to seamlessly discover the associated address and port number. ```yaml outputs: simple-prom: type: prometheus service-registration: address: consul-server-address:8500 ``` ================================================ FILE: docs/user_guide/outputs/prometheus_write_output.md ================================================ `gnmic` supports writing metrics to Prometheus using its [remote write API](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/). `gNMIc`'s prometheus remote write can be used to push metrics to a variety of monitoring systems like [Prometheus](https://prometheus.io), [Mimir](https://grafana.com/oss/mimir/), [CortexMetrics](https://cortexmetrics.io/), [VictoriaMetrics](https://victoriametrics.com/), [Thanos](https://thanos.io/)... A Prometheus write output can be defined using the below format in `gnmic` config file under `outputs` section: ```yaml outputs: output1: # required type: prometheus_write # url to push metrics towards, scheme is required url: http://:9009/api/v1/push # a map of string:string, # custom HTTP headers to be sent along with each remote write request. headers: # header: value # sets the `Authorization` header on every remote write request with the # configured username and password. authentication: username: password: # sets the `Authorization` header with type `.authorization.type` and the token value. authorization: type: Bearer credentials: # tls config tls: # string, path to the CA certificate file, # this will be used to verify the clients certificates when `skip-verify` is false ca-file: # string, client certificate file. cert-file: # string, client key file. key-file: # boolean, if true, the client will not verify the server # certificate against the available certificate chain. skip-verify: false # duration, defaults to 10s, time interval between write requests interval: 10s # integer, defaults to 1000. # Buffer size for time series to be sent to the remote system. # metrics are sent to the remote system every `.interval` or when the buffer is full. Whichever one is reached first. buffer-size: 1000 # integer, defaults to 500, sets the maximum number of timeSeries per write request to remote. max-time-series-per-write: 500 # integer, defaults to 0 # number of retries per write, retries will have a back off of 100ms. max-retries: 0 # metadata configuration metadata: # boolean, # if true, metrics metadata is sent. include: false # duration, defaults to 60s. # Applies if `metadata.include` is set to true # Interval after which all metadata entries are sent to the remote write address interval: 60s # integer, defaults to 500 # applies if `metadata.include` is set to true # Max number of metadata entries per write request. max-entries-per-write: 500 # string, to be used as the metric namespace metric-prefix: "" # boolean, if true the subscription name will be appended to the metric name after the prefix append-subscription-name: false # boolean, enables setting string type values as prometheus metric labels. strings-as-labels: false # duration, defaults to 10s # Push request timeout. timeout: 10s # boolean, defaults to false # Enables debug for prometheus write output. debug: false # string, one of `overwrite`, `if-not-present`, `` # This field allows populating/changing the value of Prefix.Target in the received message. # if set to ``, nothing changes # if set to `overwrite`, the target value is overwritten using the template configured under `target-template` # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template` add-target: # string, a GoTemplate that allow for the customization of the target field in Prefix.Target. # it applies only if the previous field `add-target` is not empty. # if left empty, it defaults to: # {{- if index . "subscription-target" -}} # {{ index . "subscription-target" }} # {{- else -}} # {{ index . "source" | host }} # {{- end -}}` # which will set the target to the value configured under `subscription.$subscription-name.target` if any, # otherwise it will set it to the target name stripped of the port number (if present) target-template: # list of processors to apply on the message before writing event-processors: # an integer, sets the number of worker handling messages to be converted into Prometheus metrics num-workers: 1 # an integer, sets the number of writers draining the buffer and writing to Prometheus num-writers: 1 ``` `gnmic` creates the prometheus metric name and its labels from the subscription name, the gnmic path and the value name. ## Metric Generation The below diagram shows an example of a prometheus metric generation from a gnmi update
### Metric Naming The metric name starts with the string configured under __metric-prefix__. Then if __append-subscription-name__ is `true`, the __subscription-name__ as specified in `gnmic` configuration file is appended. The resulting string is followed by the gNMI __path__ stripped of its keys if there are any. All non-alphanumeric characters are replaced with an underscore "`_`" The 3 strings are then joined with an underscore "`_`" If further customization of the metric name is required, the [processors](../event_processors/intro.md) can be used to transform the metric name. For example, a gNMI update from subscription `port-stats` with path: ```bash /interfaces/interface[name=1/1/1]/subinterfaces/subinterface[index=0]/state/counters/in-octets ``` is exposed as a metric named: ```bash gnmic_port_stats_interfaces_interface_subinterfaces_subinterface_state_counters_in_octets ``` ### Metric Labels The metrics labels are generated from the subscription metadata (e.g: `subscription-name` and `source`) and the keys present in the gNMI path elements. For the previous example the labels would be: ```bash {interface_name="1/1/1",subinterface_index=0,source="$routerIP:Port",subscription_name="port-stats"} ``` ## Prometheus Write Metrics When a Prometheus server (gNMI API) is enabled, `gnmic` prometheus write output exposes 4 prometheus counters and 2 prometheus Gauges: * `number_of_prometheus_write_msgs_sent_success_total`: Number of msgs successfully sent by gnmic prometheus_write output. * `number_of_prometheus_write_msgs_sent_fail_total`: Number of failed msgs sent by gnmic prometheus_write output. * `msg_send_duration_ns`: gnmic prometheus_write output send duration in ns. * `number_of_prometheus_write_metadata_msgs_sent_success_total`: Number of metadata msgs successfully sent by gnmic prometheus_write output. * `number_of_prometheus_write_metadata_msgs_sent_fail_total`: Number of failed metadata msgs sent by gnmic prometheus_write output. * `metadata_msg_send_duration_ns`: gnmic prometheus_write output metadata send duration in ns. ================================================ FILE: docs/user_guide/outputs/snmp_output.md ================================================ `gnmic` supports generating SNMP traps based on received gNMI updates. This output type is useful when trying to integrate legacy systems that ingest SNMP traps with more modern telemetry/alarms stacks. Only SNMPv2c is supported. ## Configuration The SNMP output can be defined using the below format in `gnmic` config file under `outputs` section: ```yaml outputs: # the output name snmp_trap: # the output type type: snmp # the traps destination address address: # the trap destination port, defaults to 162 port: 162 # the SNMP trap community community: public # duration, wait time before the first trap evaluation. # defaults to 5s and minimum allowed value is 5s. start-delay: 5s # traps definition traps: # if true, the SNMP message generated is an inform request, not a trap. - inform: false # trap trigger definition, # the trigger section of the trap defines which received path trigger the trap # as well as the variable binding to append to it. trigger: # xpath, if present in the received event message, the trap is triggered path: # a jq script that is executed with the trigger event message as input. # must return a valid OID. oid: # a static string, defining the type of the OID value, # one of: bool, int, bitString, octetString, null, objectID, objectDescription, # ipAddress, counter32, gauge32, timeTicks, opaque, nsapAddress, counter64, # uint32, opaqueFloat, opaqueDouble type: # a jq script that is executed with the trigger event message as input. # must return a value matching the above configured type. value: # trap variable bindings definition, # the bindings section defines the extra variable bindings to append to the trap. # multiple bindings can be defined here. bindings: # A jq script that is executed with the trigger message as input. # Must return a valid xpath. # The local cache is queried using the resulting xpath, the resulting event message is used # as input to execute the below oid and value jq scripts - path: # A jq script that is executed with the message obtained from the cache as input. # must return a valid OID. oid: # a static string, defining the type of the OID value, # one of: bool, int, bitString, octetString, null, objectID, objectDescription, # ipAddress, counter32, gauge32, timeTicks, opaque, nsapAddress, counter64, # uint32, opaqueFloat, opaqueDouble type: # A jq script that is executed with the message obtained from the cache as input. # must return a value matching the above configured type. value: ``` ## How does it work? The SNMP output stores each received update message in a local cache (1.a), then checks if the message should trigger any of the configured traps (1.b). If the received message triggers a trap (2), an SNMP variable binding is generated from the trap `trigger` configuration section (`OID`, `type` and `value`) based on the triggering event. The `OID` and `value` can be [jq](https://github.com/itchyny/gojq) scripts. Then (3) for each configured binding, the configured `path` (`jq` script) is rendered based on the triggering event then used to retrieve an event message from the cache, that message is then used to generate the variable binding (`OID`, `type` and `value`). Once all bindings are generated, a `sysUpTimeInstance` (OID=`1.3.6.1.2.1.1.3.0`) binding is prepended to the PDU list of the trap, its value is the number of seconds since `gNMIc` SNMP output startup.
## Metrics The SNMP output exposes 4 Prometheus metrics: - Number of failed trap generation - Number of SNMP trap sending failures - SNMP trap generation duration in ns ```text gnmic_snmp_output_number_of_snmp_trap_failed_generation{name="snmp_trap",reason="",trap_index="0"} 0 gnmic_snmp_output_number_of_snmp_trap_sent_fail_total{name="snmp_trap",reason="",trap_index="0"} 0 gnmic_snmp_output_number_of_snmp_traps_sent_total{name="snmp_trap",trap_index="0"} 114 gnmic_snmp_output_snmp_trap_generation_duration_ns{name="snmp_trap",trap_index="0"} 380215 ``` ## Examples ### interface operational state trap The below example generates an SNMPV2 trap whenever the operational state of an interface changes (`ifOperStatus`). It adds `sysName`, `ifAdminStatus` and `ifIndex` variable bindings to the trap before sending it out. ```yaml username: admin password: NokiaSrl1! skip-verify: true targets: clab-snmp-srl1: clab-snmp-srl2: subscriptions: sub1: paths: - /interface/admin-state - /interface/oper-state - /interface/ifindex - /system/name/host-name stream-mode: on-change encoding: ascii outputs: snmp_trap: type: snmp address: snmptrap.server # port: 162 # community: public traps: - trigger: path: /interface/oper-state # static path oid: '".1.3.6.1.2.1.2.2.1.8"' # ifOperStatus type: int value: if (.values."/interface/oper-state" == "up") then 1 else 2 end bindings: - path: '"/system/name/host-name"' # jq script oid: '".1.3.6.1.2.1.1.5"' # sysName type: octetString value: '.values."/system/name/host-name"' - path: '"/interface[name="+.tags.interface_name+"]/admin-state"' # jq script oid: '".1.3.6.1.2.1.2.2.1.7"' # ifAdminStatus type: int value: if (.values."/interface/admin-state" == "enable") then 1 else 2 end - path: '"/interface[name="+.tags.interface_name+"]/ifindex"' # jq script oid: '".1.3.6.1.2.1.2.2.1.1"' # ifIndex type: int value: '.values."/interface/ifindex" | tonumber' # jq script ``` ================================================ FILE: docs/user_guide/outputs/stan_output.md ================================================ `gnmic` supports exporting subscription updates to multiple NATS Streaming (STAN) servers/clusters simultaneously A STAN output can be defined using the below format in `gnmic` config file under `outputs` section: ```yaml outputs: output1: type: stan # required # comma separated STAN servers address: localhost:4222 # stan subject subject: telemetry # stan subject prefix, the subject prefix is built the same way as for NATS output subject-prefix: telemetry # STAN username username: # STAN password password: # STAN publisher name # if left empty, this field is populated with the output name used as output ID (output1 in this example). # the full name will be '$(name)-stan-pub'. # If the flag --instance-name is not empty, the full name will be '$(instance-name)-$(name)-stan-pub. # note that each stan worker (publisher) will get client name=$name-$index name: "" # cluster name, mandatory cluster-name: test-cluster # STAN ping interval ping-interval: 5 # STAN ping retry ping-retry: 2 # string, message marshaling format, one of: proto, prototext, protojson, json, event format: event # string, one of `overwrite`, `if-not-present`, `` # This field allows populating/changing the value of Prefix.Target in the received message. # if set to ``, nothing changes # if set to `overwrite`, the target value is overwritten using the template configured under `target-template` # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template` add-target: # string, a GoTemplate that allow for the customization of the target field in Prefix.Target. # it applies only if the previous field `add-target` is not empty. # if left empty, it defaults to: # {{- if index . "subscription-target" -}} # {{ index . "subscription-target" }} # {{- else -}} # {{ index . "source" | host }} # {{- end -}}` # which will set the target to the value configured under `subscription.$subscription-name.target` if any, # otherwise it will set it to the target name stripped of the port number (if present) target-template: # boolean, if true the message timestamp is changed to current time override-timestamps: false # duration to wait before re establishing a lost connection to a stan server recovery-wait-time: 2s # integer, number of stan publishers to be created num-workers: 1 # boolean, enables extra logging for the STAN output debug: false # duration after which a message waiting to be handled by a worker gets discarded write-timeout: 10s # boolean, enables the collection and export (via prometheus) of output specific metrics enable-metrics: false # list of processors to apply on the message before writing event-processors: ``` Using `subject` config value a user can specify the STAN subject to which to send all subscriptions updates for all targets If a user wants to separate updates by targets and by subscriptions, `subject-prefix` can be used. if `subject-prefix` is specified `subject` is ignored. ================================================ FILE: docs/user_guide/outputs/tcp_output.md ================================================ `gnmic` supports exporting subscription updates to a TCP server A TCP output can be defined using the below format in `gnmic` config file under `outputs` section: ```yaml outputs: output1: # required type: tcp # a UDP server address address: IPAddress:Port # maximum sending rate, e.g: 1ns, 10ms rate: 10ms # number of messages to buffer in case of sending failure buffer-size: # export format. json, protobuf, prototext, protojson, event format: json # string, one of `overwrite`, `if-not-present`, `` # This field allows populating/changing the value of Prefix.Target in the received message. # if set to ``, nothing changes # if set to `overwrite`, the target value is overwritten using the template configured under `target-template` # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template` add-target: # string, a GoTemplate that allow for the customization of the target field in Prefix.Target. # it applies only if the previous field `add-target` is not empty. # if left empty, it defaults to: # {{- if index . "subscription-target" -}} # {{ index . "subscription-target" }} # {{- else -}} # {{ index . "source" | host }} # {{- end -}}` # which will set the target to the value configured under `subscription.$subscription-name.target` if any, # otherwise it will set it to the target name stripped of the port number (if present) target-template: # boolean, valid only if format is `event`. # if true, arrays of events are split and marshaled as JSON objects instead of an array of dicts. split-events: false # boolean, if true the message timestamp is changed to current time override-timestamps: false # string, a delimiter to be sent after each message. # useful when writing to logstash TCP input. delimiter: # enable TCP keepalive and specify the timer, e.g: 1s, 30s keep-alive: # time duration to wait before re-dial in case there is a failure retry-interval: # NOT IMPLEMENTED boolean, enables the collection and export (via prometheus) of output specific metricss enable-metrics: false # list of processors to apply on the message before writing event-processors: ``` A TCP output can be used to export data to an ELK stack, using [Logstash TCP input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-tcp.html) ================================================ FILE: docs/user_guide/outputs/udp_output.md ================================================ `gnmic` supports exporting subscription updates to a UDP server A UDP output can be defined using the below format in `gnmic` config file under `outputs` section: ```yaml outputs: output1: # required type: udp # a UDP server address address: IPAddress:Port # maximum sending rate, e.g: 1ns, 10ms rate: 10ms # number of messages to buffer in case of sending failure buffer-size: # export format. json, protobuf, prototext, protojson, event format: json # string, one of `overwrite`, `if-not-present`, `` # This field allows populating/changing the value of Prefix.Target in the received message. # if set to ``, nothing changes # if set to `overwrite`, the target value is overwritten using the template configured under `target-template` # if set to `if-not-present`, the target value is populated only if it is empty, still using the `target-template` add-target: # string, a GoTemplate that allow for the customization of the target field in Prefix.Target. # it applies only if the previous field `add-target` is not empty. # if left empty, it defaults to: # {{- if index . "subscription-target" -}} # {{ index . "subscription-target" }} # {{- else -}} # {{ index . "source" | host }} # {{- end -}}` # which will set the target to the value configured under `subscription.$subscription-name.target` if any, # otherwise it will set it to the target name stripped of the port number (if present) target-template: # boolean, valid only if format is `event`. # if true, arrays of events are split and marshaled as JSON objects instead of an array of dicts. split-events: false # boolean, if true the message timestamp is changed to current time override-timestamps: false # time duration to wait before re-dial in case there is a failure retry-interval: # NOT IMPLEMENTED boolean, enables the collection and export (via prometheus) of output specific metrics enable-metrics: false # list of processors to apply on the message before writing event-processors: ``` A UDP output can be used to export data to an ELK stack, using [Logstash UDP input](https://www.elastic.co/guide/en/logstash/current/plugins-inputs-udp.html) ================================================ FILE: docs/user_guide/prompt_suggestions.md ================================================ Starting with `gnmic v0.4.0` release the users can enjoy the interactive prompt mode which can be enabled with the [`prompt`](../cmd/prompt.md) command. The prompt mode delivers two major features: - simplifies `gnmic` commands and flags navigation, as every option is suggested and auto-completed - provides interactive YANG path auto-suggestions for `get`, `set`, `subscribe` commands effectively making the terminal your YANG browser ## Using the prompt interface Depending on the cursor position in the prompt line, a so-called _suggestion box_ pops up with contextual auto-completions. The user can enter the suggestion box by pressing the TAB key. The and keys can be used to navigate the suggestion list. Select the suggested menu item with SPACE key or directly commit your command with ENTER, its that easy! The following most-common key bindings will work in the prompt mode: | Key combination | Description | | ------------------------------------------ | -------------------------------------------------------- | | Option/Control + →/← | move cursor a word right/left | | Control + W | delete a word to the left | | Control + Z | delete a path element in the xpath string ([example][1]) | | Control + A | move cursor to the beginning of a line | | Control + E | move cursor to the end of a line | | Control + C | discard the current line | | Control + D | exit prompt | | Control + K | delete the line after the cursor to the clipboard | | Control + U | delete the line before the cursor to the clipboard | | Control + L | clear screen | ## Commands and flags suggestions To make `gnmic` configurable and flexible we introduced a considerable amount of flags and sub-commands. To help the users navigate the sheer selection of `gnmic` configuration options, the prompt mode will auto-suggest the global flags, sub-commands and local flags of those sub-commands. When the prompt mode is launched, the suggestions will be shown for the top-level commands and all the global flags. Once the sub-command is typed into the terminal, the auto-suggestions will be provided for the commands nested under this command and its local flags. In the following demo we show how the command and flag suggestions work. As the prompt starts, the suggestion box immediately hints what commands and global flags are available for input as well as their description. The user starts with adding the global flags `--address, --insecure, --username` and then selects the `capabilities` command and commits it. This results in gNMI Capability RPC execution against a specified target. ### Mixed mode Its perfectly fine to specify some global flags outside of the prompt command and add more within the prompt mode. For example, the following is a valid invocation: ``` gnmic --insecure --username admin --password admin --address 10.1.0.11 prompt ``` Here the prompt will start with with the `insecure, username, password, address` flags set. ## YANG-completions One of the most challenging problems in the network automation field is to process the YANG models and traverse YANG trees to construct the requests used against the network elements. Be it gNMI, NETCONF or RESTCONF a users still needs to have a path pointing to specific YANG-defined node which is targeted by a request. In gNMI paths can be represented in a [human readable XPATH-like form](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-path-conventions.md#constructing-paths) - `/a/b/c[key=val]/d` - and these paths are based on the underlying YANG models. The problem at hand was how to get these paths interactively, or even better - walk the YANG tree from within the CLI and dynamically build the path used in a gNMI RPC? With **YANG-completions** feature embedded in `gnmic` what used to be a dream is now a reality 🎉

Let us explain what just happened there. In the demonstration above, we called the `gnmic` with the well-known flags defining the gNMI target (`address`, `username`, `password`). But this time we also added a few YANG specific flags ([`--file`](../cmd/prompt.md#file) and [`--dir`](../cmd/prompt.md#dir)) that load the full set of Nokia SR OS YANG models and the 3rd party models SR OS rely on. ``` gnmic --address 10.1.0.11 --insecure --username admin --password admin \ --file ~/7x50_YangModels/YANG/nokia-combined \ --dir ~/7x50_YangModels/YANG \ prompt ``` In the background `gnmic` processed these YANG models to build the entire schema tree of the Nokia SR OS state and configuration datastores. With that in-mem stored information, `gnmic` was able to auto-suggest all the possible YANG paths when the user entered the `--path` flag which accepts gNMI paths. By using the auto-suggestion hints, a user navigated the `/state` tree of a router and drilled down to the version-number leaf that, in the end, was retrieved with the gNMI Get RPC. !!! success "YANG-driven path suggestions" `gnmic` is now capable of reading and processing YANG modules to enable live path auto-suggestions ### YANG processing For the YANG-completion feature to work its absolutely imperative for `gnmic` to successfully parse and compile the YANG models. The [`prompt`](../cmd/prompt.md) command leverages the [`--file`](../cmd/prompt.md#file) and [`--dir`](../cmd/prompt.md#dir) flags to select the YANG models for processing. With the `--file` flag a user specifies a file path to a YANG file or a directory of them that `gnmic` will read and process. If it points to a directory it will be visited recursively reading in all `*.yang` files it finds. The `--dir` flag also points to a YANG file or a directory and indicates which additional YANG files might be required. For example, if the YANG modules that a user specified with the `--file` flag import or include modules that were not part of the path specified with `--file`, they need to be added with the `--dir` flag. The [Examples](#examples) section provide some good practical examples on how these two flags can be used together to process the YANG models from different vendors. ### Understanding path suggestions When `gnmic` provides a user with the path suggestions it does it in a smart and intuitive way. ![path suggestions](https://gitlab.com/rdodin/pics/-/wikis/uploads/d3815b474605765989d136753c0f9c87/image.png) First, it understands in what part of the tree a user currently is and suggests only the next possible elements. Additionally, the suggested next path elements will be augmented with the information extracted from the YANG model, such as: * element description, as given in the YANG `description` statement for the element * element configuration state (`rw` / `ro`), as defined in section [4.2.3 of RFC 7950](https://tools.ietf.org/html/rfc7950#section-4.2.3). * node type: * The containers and lists will be denoted with the `[+]` marker, which means that a user can type `/` char after them to receive suggestions for the nested elements. * the `[⋯]` character belongs to a leaf-list element. * an empty space will indicate the leaf element. ### Examples The examples in this section will show how to use the `--file` and `--dir` flags of the [`prompt`](../cmd/prompt.md) command with the YANG collections from different vendors and standard bodies. #### Nokia SR OS YANG repo: [nokia/7x50_YangModels](https://github.com/nokia/7x50_YangModels) Clone the repository with Nokia YANG models and checkout the release of interest: ``` git clone https://github.com/nokia/7x50_YangModels cd 7x50_YangModels git checkout sros_20.7.r2 ``` Start `gnmic` in prompt mode and read in the nokia-combined YANG modules: ``` gnmic --file YANG/nokia-combined \ --dir YANG \ prompt ``` This will enable path auto-suggestions for the entire tree of the Nokia SR OS YANG models. The full command with the gNMI target specified could look like this: ``` gnmic --address 10.1.0.11 --insecure --username admin --password admin \ prompt \ --file ~/7x50_YangModels/YANG/nokia-combined \ --dir ~/7x50_YangModels/YANG ``` #### Openconfig YANG repo: [openconfig/public](https://github.com/openconfig/public) Clone the OpenConfig repository: ``` git clone https://github.com/openconfig/public cd public ``` Start `gnmic` in prompt mode and read in all the modules: ``` gnmic --file release/models \ --dir third_party \ --exclude ietf-interfaces \ prompt ``` !!! note With OpenConfig models we have to use `--exclude` flag to exclude ietf-interfaces module from being clashed with OpenConfig interfaces module. #### Cisco YANG repo: [YangModels/yang](https://github.com/YangModels/yang) Clone the `YangModels/yang` repo and change into the main directory of the repo: ``` git clone https://github.com/YangModels/yang cd yang/vendor ``` ##### IOS-XR The IOS-XR native YANG models are disaggregated and spread all over the place. Although its technically possible to load them all in one go, this approach will produce a lot of top-level modules making the navigation quite hard. An easier and cleaner approach would be to find the relevant module(s) and load them separately or in small batches. For example here we load BGP config and operational models together: ``` gnmic --file vendor/cisco/xr/721/Cisco-IOS-XR-um-router-bgp-cfg.yang \ --file vendor/cisco/xr/721/Cisco-IOS-XR-ipv4-bgp-oper.yang \ --dir standard/ietf \ prompt ``` !!! note We needed to include the `ietf/` directory by means of the `--dir` flag, since the Cisco's native modules rely on the IETF modules and these modules are not in the same directory as the BGP modules. The full command that you can against the real Cisco IOS-XR node must have a target defined, the encoding set and origin suggestions enabled. Here is what it can look like: ``` gnmic -a 10.10.30.5:57500 --insecure -e json_ietf -u admin -p Cisco123 \ prompt \ --file yang/vendor/cisco/xr/662/Cisco-IOS-XR-ipv4-bgp-cfg.yang \ --file yang/vendor/cisco/xr/662/Cisco-IOS-XR-ipv4-bgp-oper.yang \ --dir yang/standard/ietf \ --suggest-with-origin ``` ##### NX-OS Cisco NX-OS native modules, on the other hand, are aggregated in a single file, here is how you can generate the suggestions from it: ``` gnmic --file vendor/cisco/xr/721/Cisco-IOS-XR-um-router-bgp-cfg.yang \ --dir standard/ietf \ prompt ``` #### Juniper YANG repo: [Juniper/yang](https://github.com/Juniper/yang) Clone the Juniper YANG repository and change into the release directory: ``` git clone https://github.com/Juniper/yang cd yang/20.3/20.3R1 ``` Start `gnmic` and generate path suggestions for the whole configuration tree of Juniper MX: ``` gnmic --file junos/conf --dir common prompt ``` !!! note 1. Juniper models are constructed in a way that a top-level container appears to be `/configuration`, that will not work with your gNMI Subscribe RPC. Instead, you should omit this top level container. So, for example, the suggested path `/configuration/interfaces/interface/state` should become `/interfaces/interface/state`. 2. Juniper vMX doesn't support gNMI Get RPC, if you plan to test it, use gNMI Subscribe RPC 3. With gNMI Subscribe, specify `-e proto` flag to enable protobuf encoding. #### Arista YANG repo: [aristanetworks/yang](https://github.com/aristanetworks/yang) Arista uses a subset of OpenConfig modules and does not provide IETF modules inside their repo. So make sure you have IETF models available so you can reference it, a `openconfig/public` is a good candidate. Clone the Arista YANG repo: ``` git clone https://github.com/aristanetworks/yang cd yang ``` Generate path suggestions for all Arista OpenConfig modules: ``` gnmic --file EOS-4.23.2F/openconfig/public/release/models \ --dir ~/public/third_party/ietf \ --exclude ietf-interfaces \ prompt ``` ## Enumeration suggestions `gnmic` flags that can take pre-defined values (enumerations) will get suggestions as well. For example, no need to keep in mind which subscription modes are available, the prompt will hint you: ![enum suggestion](https://gitlab.com/rdodin/pics/-/wikis/uploads/a2772c709d869d5efc299db451e3d4a9/image.png) ## File-path completions Whenever a user needs to provide a file path in a prompt mode, the filepath suggestions will make the process interactive: [1]: https://gitlab.com/rdodin/pics/-/wikis/uploads/cc97ef563e2b973da512951fedd1ddb8/CleanShot_2020-10-21_at_11.37.57.mp4 ================================================ FILE: docs/user_guide/subscriptions.md ================================================ Defining subscriptions with [`subscribe`](../cmd/subscribe.md) command's CLI flags is a quick&easy way to work with gNMI subscriptions. A downside of that approach is that commands can get lengthy when defining multiple subscriptions and not all possible flavors and combinations of subscription can be defined. With the multiple subscriptions defined in the [configuration file](configuration_file.md) we make a complex task of managing multiple subscriptions for multiple targets easy. The idea behind the multiple subscriptions is to define the subscriptions separately and then bind them to the targets. ## Defining subscriptions ### CLI-based subscription A subscription is configured through a series of command-line interface (CLI) flags. These include, *but are not limited to*: 1. `--path`: This flag is used to set the paths for the subscription. 2. `--mode [once | poll | stream]`: Defines the subscription mode. It can be set to once, poll, or stream. 3. `--stream-mode [target-defined | sample | on-change]`: Sets the stream subscription mode. The options are target-defined, sample, or on-change. 4. `--sample-interval`: Determines the sample interval for a stream/sample subscription. A command executed with these flags will generate a single [SubscribeRequest](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3511-the-subscriberequest-message) that is sent to the target. Every path configured with the `--path` flag leads to a [`Subscription`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3513-the-subscription-message) added to the [`subscriptionList`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3512-the-subscriptionlist-message) message. There are no constraints when defining a `ONCE` or `POLL` subscribe request. However, when a `STREAM` subscribe request is defined using flags, all subscriptions (paths) will adopt the same mode (`target-defined`, `on-change`, or `sample`) and stream subscription attributes such as `sample-interval` and `heartbeat-interval`. ### File-based subscription config To define a subscription a user needs to create the `subscriptions` container in the configuration file: ```yaml subscriptions: # a configurable subscription name subscription-name: # string, path to be set as the Subscribe Request Prefix prefix: # string, value to set as the SubscribeRequest Prefix Target target: # boolean, if true, the SubscribeRequest Prefix Target will be set to # the configured target name under section `targets`. # does not apply if the previous field `target` is set. set-target: # true | false # list of strings, list of subscription paths for the named subscription paths: [] # list of strings, schema definition modules models: [] # string, case insensitive, one of ONCE, STREAM, POLL mode: STREAM # string, case insensitive, if `mode` is set to STREAM, this defines the type # of streamed subscription, # one of SAMPLE, TARGET_DEFINED, ON_CHANGE stream-mode: TARGET_DEFINED # string, case insensitive, defines the gNMI encoding to be used for the subscription encoding: JSON # integer, specifies the packet marking that is to be used for the subscribe responses qos: # duration, Golang duration format, e.g: 1s, 1m30s, 1h. # specifies the sample interval for a STREAM/SAMPLE subscription sample-interval: # duration, Golang duration format, e.g: 1s, 1m30s, 1h. # The heartbeat interval value can be specified along with `ON_CHANGE` or `SAMPLE` # stream subscriptions modes and has the following meanings in each case: # - `ON_CHANGE`: The value of the data item(s) MUST be re-sent once per heartbeat # interval regardless of whether the value has changed or not. # - `SAMPLE`: The target MUST generate one telemetry update per heartbeat interval, # regardless of whether the `--suppress-redundant` flag is set to true. heartbeat-interval: # boolean, if set to true, the target SHOULD NOT generate a telemetry update message unless # the value of the path being reported on has changed since the last suppress-redundant: # boolean, if set to true, the target MUST not transmit the current state of the paths # that the client has subscribed to, but rather should send only updates to them. updates-only: # list of strings, the list of outputs to send updates to. If blank, defaults to all outputs outputs: - output1 - output2 # list of subscription definition, this field is used to define multiple stream subscriptions (target-defined, sample or on-change) # that will be created using a single SubscribeRequest (i.e: share the same gRPC stream). # This field cannot be defined if `paths`, `stream-mode`, `sample-interval`, `heartbeat-interval` or`suppress-redundant` are set. # Only fields applicable to STREAM subscriptions can be set in this list of subscriptions: # `paths`, `stream-mode`, `sample-interval`, `heartbeat-interval` or`suppress-redundant` stream-subscriptions: - paths: [] stream-mode: sample-interval: heartbeat-interval: suppress-redundant: - paths: [] stream-mode: sample-interval: heartbeat-interval: suppress-redundant: # historical subscription config: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-history.md#1-purpose history: # string, nanoseconds since Unix epoch or RFC3339 format. # if set, the history extension type will be a Snapshot request snapshot: # string, nanoseconds since Unix epoch or RFC3339 format. # if set, the history extension type will be a Range request start: # string, nanoseconds since Unix epoch or RFC3339 format. # if set, the history extension type will be a Range request end: # uint32, depth value as per: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-depth.md depth: 0 ``` #### Subscription config to gNMI SubscribeRequest Each subscription (under `subscriptions:`) results in a single [`SubscribeRequest`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3511-the-subscriberequest-message) being sent to the target. If `paths` is set, each path results in a separate [`Subscription`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3513-the-subscription-message) message being added to the [`subscriptionList`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3512-the-subscriptionlist-message) message. If instead of paths, a list of stream-subscriptions is defined: ```yaml subscriptions: sub1: stream-subscriptions: - paths: ``` Each path under each stream-subscriptions will result in a separate [`Subscription`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3513-the-subscription-message) message being added to the [`subscriptionList`](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#3512-the-subscriptionlist-message) message. #### Examples ##### A single stream/sample subscription === "YAML" ```yaml subscriptions: port_stats: paths: - "/state/port[port-id=*]/statistics" stream-mode: sample sample-interval: 5s encoding: bytes ``` === "CLI" ```shell gnmic sub --path /state/port/statistics \ --stream-mode sample \ --sample-interval 5s \ --encoding bytes ``` === "PROTOTEXT" ```text subscribe: { subscription: { path: { elem: { name: "state" } elem: { name: "port" } elem: { name: "statistics" } } mode: SAMPLE sample_interval: 5000000000 } encoding: BYTES } ``` ##### A single stream/on-change subscription === "YAML" ```yaml subscriptions: port_stats: paths: - "/state/port/oper-state" stream-mode: on-change encoding: bytes ``` === "CLI" ```shell gnmic sub --path /state/port/oper-state \ --stream-mode on-change \ --encoding bytes ``` === "PROTOTEXT" ```text subscribe: { subscription: { path: { elem: { name: "state" } elem: { name: "port" } elem: { name: "oper-state" } } mode: ON_CHANGE } encoding: BYTES } ``` ##### A ONCE subscription === "YAML" ```yaml subscriptions: system_facts: paths: - /configure/system/name - /state/system/version mode: once encoding: bytes ``` === "CLI" ```shell gnmic sub --path /configure/system/name \ --path /state/system/version \ --mode once \ --encoding bytes ``` === "PROTOTEXT" ```text subscribe: { subscription: { path: { elem: { name: "configure" } elem: { name: "port" } elem: { name: "name" } } } subscription: { path: { elem: { name: "state" } elem: { name: "system" } elem: { name: "version" } } } mode: ONCE encoding: BYTES } ``` ##### Combining multiple stream subscriptions in the same gRPC stream === "YAML" ```yaml subscriptions: sub1: stream-subscriptions: - paths: - /configure/system/name stream-mode: on-change - paths: - /state/port/statistics stream-mode: sample sample-interval: 10s encoding: bytes ``` === "CLI" NA === "PROTOTEXT" ```text subscribe: { subscription: { path: { elem: { name: "configure" } elem: { name: "system" } elem: { name: "name" } } mode: ON_CHANGE } subscription: { path: { elem: { name: "state" } elem: { name: "port" } elem: { name: "statistics" } } mode: SAMPLE sample_interval: 10000000000 } encoding: BYTES } ``` ##### Configure multiple subscriptions ```yaml # part of ~/gnmic.yml config file subscriptions: # container for subscriptions port_stats: # a named subscription, a key is a name paths: # list of subscription paths for that named subscription - "/state/port[port-id=1/1/c1/1]/statistics/out-octets" - "/state/port[port-id=1/1/c1/1]/statistics/in-octets" stream-mode: sample # one of [on-change target-defined sample] sample-interval: 5s encoding: bytes service_state: paths: - "/state/service/vpls[service-name=*]/oper-state" - "/state/service/vprn[service-name=*]/oper-state" stream-mode: on-change system_facts: paths: - "/configure/system/name" - "/state/system/version" mode: once ``` Inside that subscriptions container a user defines individual named subscriptions; in the example above two named subscriptions `port_stats` and `service_state` were defined. These subscriptions can be used on the cli via the `[ --name ]` flag of subscribe command: ```shell gnmic subscribe --name service_state --name port_stats ``` Or by binding them to different targets, (see next section) ## Binding subscriptions Once the subscriptions are defined, they can be flexibly associated with the targets. ```yaml # part of ~/gnmic.yml config file targets: router1.lab.com: username: admin password: secret subscriptions: - port_stats - service_state router2.lab.com: username: gnmi password: telemetry subscriptions: - service_state ``` The named subscriptions are put under the `subscriptions` section of a target container. As shown in the example above, it is allowed to add multiple named subscriptions under a single target; in that case each named subscription will result in a separate Subscription Request towards a target. !!! note If a target is not explicitly associated with any subscription, the client will subscribe to all defined subscriptions in the file. The full configuration with the subscriptions defined and associated with targets will look like this: ```yaml username: admin password: nokiasr0s insecure: true targets: router1.lab.com: subscriptions: - port_stats - service_state - system_facts router2.lab.com: subscriptions: - service_state - system_facts subscriptions: port_stats: paths: - "/state/port[port-id=1/1/c1/1]/statistics/out-octets" - "/state/port[port-id=1/1/c1/1]/statistics/in-octets" stream-mode: sample sample-interval: 5s encoding: bytes service_state: paths: - "/state/service/vpls[service-name=*]/oper-state" - "/state/service/vprn[service-name=*]/oper-state" stream-mode: on-change system_facts: paths: - "/configure/system/name" - "/state/system/version" mode: once ``` As a result of such configuration the `gnmic` will set up three gNMI subscriptions to router1 and two other gNMI subscriptions to router2: ```shell $ gnmic subscribe gnmic 2020/07/06 22:03:35.579942 target 'router2.lab.com' initialized gnmic 2020/07/06 22:03:35.593082 target 'router1.lab.com' initialized ``` ```json { "source": "router2.lab.com", "subscription-name": "service_state", "timestamp": 1594065869313065895, "time": "2020-07-06T22:04:29.313065895+02:00", "prefix": "state/service/vpls[service-name=testvpls]", "updates": [ { "Path": "oper-state", "values": { "oper-state": "down" } } ] } { "source": "router1.lab.com", "subscription-name": "service_state", "timestamp": 1594065868850351364, "time": "2020-07-06T22:04:28.850351364+02:00", "prefix": "state/service/vpls[service-name=test]", "updates": [ { "Path": "oper-state", "values": { "oper-state": "down" } } ] } { "source": "router1.lab.com", "subscription-name": "port_stats", "timestamp": 1594065873938155916, "time": "2020-07-06T22:04:33.938155916+02:00", "prefix": "state/port[port-id=1/1/c1/1]/statistics", "updates": [ { "Path": "in-octets", "values": { "in-octets": "671552" } } ] } { "source": "router1.lab.com", "subscription-name": "port_stats", "timestamp": 1594065873938043848, "time": "2020-07-06T22:04:33.938043848+02:00", "prefix": "state/port[port-id=1/1/c1/1]/statistics", "updates": [ { "Path": "out-octets", "values": { "out-octets": "370930" } } ] } ^C received signal 'interrupt'. terminating... ``` ================================================ FILE: docs/user_guide/targets/target_discovery/consul_discovery.md ================================================ The Consul target loader discovers gNMI targets registered as service instances in a Consul Server. The loader watches services registered in Consul defined by a service name and optionally a set of tags.
### Services watch When at least one service name is set, gNMIc consul loader will watch the instances registered under that service name and build a target configuration using the service ID as the target name and the registered address and port as the target address. The remaining configuration can be set under the service name definition. ```yaml loader: type: consul services: - name: cluster1-gnmi-server config: insecure: true username: admin password: admin ``` ### Templating with Consul It is possible to set the target name to something other than the Consul Service ID using the `name` field under the config. The target name can be customized using [Go Templates](https://golang.org/pkg/text/template/). In addition to setting the target name, it is also possible to use Go Templates on `event-tags` as well. The templates use the Service under Consul, so access to things like `ID`, `Tags`, `Meta`, etc. are all available. ```yaml loader: type: consul services: - name: cluster1-gnmi-server config: name: "{{.Meta.device}}" event-tags: location: "{{.Meta.site_name}}" model: "{{.Meta.device_type}}" tag-1: "{{.Meta.tag_1}}" boring-static-tag: "hello" ``` ### Configuration ```yaml loader: type: consul # address of the loader server address: localhost:8500 # Consul Data center, defaults to dc1 datacenter: dc1 # Consul username, to be used as part of HTTP basicAuth username: # Consul password, to be used as part of HTTP basicAuth password: # Consul Token, is used to provide a per-request ACL token which overrides the agent's default token token: # the key prefix to watch for targets configuration, defaults to "gnmic/config/targets" key-prefix: gnmic/config/targets # if true, registers consulLoader prometheus metrics with the provided # prometheus registry enable-metrics: false # list of services to watch and derive target configurations from. services: # name of the Consul service - name: # a list of strings to further filter the service instances tags: # configuration map to apply to target discovered from this service config: # list of actions to run on target discovery on-add: # list of actions to run on target removal on-delete: # variable dict to pass to actions to be run vars: # path to variable file, the variables defined will be passed to the actions to be run # values in this file will be overwritten by the ones defined in `vars` vars-file: ``` ================================================ FILE: docs/user_guide/targets/target_discovery/discovery_intro.md ================================================ ## Introduction `gnmic` supports dynamic loading of gNMI targets from external systems. This feature allows adding and deleting gNMI targets without the need to restart `gnmic`.
Depending on the discovery method, `gnmic` will either: - Subscribe to changes on the remote system, - Or poll the defined targets from the remote systems. When a change is detected, the new targets are added and the corresponding subscriptions are immediately established. The removed targets are deleted together with their subscriptions. Actions can be run on target discovery (on-add or on-delete), this can be useful to add initial configurations to target ahead of gNMI subscriptions or run checks before subscribing. In the case of on-add actions, !!! notes 1. Only one discovery type is supported at a time. 2. Target updates are not supported, delete and re-add is the way to update a target configuration. ## Discovery types Four types of target discovery methods are supported: ### [File Loader](./file_discovery.md) Watches changes to a local file containing gNMI targets definitions.
### [Consul Server Loader](./consul_discovery.md) Subscribes to Consul KV key prefix changes, the keys and their value represent a target configuration fields.
### [Docker Engine Loader](./docker_discovery.md) Polls containers from a Docker Engine host matching some predefined criteria (docker filters).
### [HTTP Loader](./http_discovery.md) Queries an HTTP endpoint periodically, expected a well formatted JSON dict of targets configurations.
## Expanding Environment Variables in Loaded Configuration You can use environment variables within your loaded target configurations. To enable this feature, set `expand-env` to `true` under the loader configuration: ```yaml loader: type: consul expand-env: true # loader specific configuration ``` ## Running Actions On Discovery All actions support fields `on-add` and `on-delete` which take a list of predefined action names that will be run sequentially on target discovery or deletion. The below configuration example defines 3 actions `configure_interfaces`, `configure_subinterfaces` and `configure_network_instance` which will run when the `docker` loader discovers a target with label `clab-node-kind=srl` ``` yaml loader: type: docker filters: - containers: - label: clab-node-kind=srl config: skip-verify: true username: admin password: NokiaSrl1! on-add: - configure_interfaces - configure_subinterfaces - configure_network_instances actions: configure_interfaces: name: configure_interfaces type: gnmi target: '{{ .Input }}' rpc: set encoding: json_ietf debug: true paths: - /interface[name=ethernet-1/1]/admin-state - /interface[name=ethernet-1/2]/admin-state values: - enable - enable configure_subinterfaces: name: configure_subinterfaces type: gnmi target: '{{ .Input }}' rpc: set encoding: json_ietf debug: true paths: - /interface[name=ethernet-1/1]/subinterface[index=0]/admin-state - /interface[name=ethernet-1/2]/subinterface[index=0]/admin-state values: - enable - enable configure_network_instances: name: configure_network_instances type: gnmi target: '{{ .Input }}' rpc: set encoding: json_ietf debug: true paths: - /network-instance[name=default]/admin-state - /network-instance[name=default]/interface - /network-instance[name=default]/interface values: - enable - '{"name": "ethernet-1/1.0"}' - '{"name": "ethernet-1/2.0"}' ``` ================================================ FILE: docs/user_guide/targets/target_discovery/docker_discovery.md ================================================ The Docker target loader allows discovering gNMI targets from [Docker Engine](https://docs.docker.com/engine/) hosts. It discovers containers as well as their gNMI address, based on a list of [Docker filters](https://docs.docker.com/engine/reference/commandline/ps/#filtering) One gNMI target is added per discovered container. Individual Target configurations are derived from the container exposed ports and labels, as well as the global configuration.
#### Configuration ```yaml loader: # the loader type: docker type: docker # string, the docker daemon address, # leave empty to use the local docker daemon # possible values: # - unix:///var/run/docker.sock # - tcp://:port # - http://:port address: "" # duration, check interval for discovering # new docker containers, default: 30s interval: 30s # duration, the docker queries timeout, # defaults to half of `interval` if left unset or is invalid. timeout: 15s # time to wait before the fist docker query start-delay: 0s # bool, print loader debug statements. debug: false # if true, registers dockerLoader prometheus metrics with the provided # prometheus registry enable-metrics: false # containers, network filters: # see https://docs.docker.com/engine/reference/commandline/ps/#filtering # for the possible values. filters: # containers filters - containers: # containers returned by `docker ps -f "label=clab-node-kind=srl"` - label: clab-node-kind=srl # network filters network: # networks returned by `docker network ls -f "label=containerlab"` label: containerlab # gNMI port value for the containers discovered by this filter. # It can be a port value or a label name set on the container. # valid values: # `port: "57400"` # `port: "label=gnmi-port"` port: # target config for containers discovered by this filter. # These fields will override the matching global config fields. config: username: admin password: secret1 skip-verify: true # list of actions to run on target discovery on-add: # list of actions to run on target removal on-delete: # variable dict to pass to actions to be run vars: # path to variable file, the variables defined will be passed to the actions to be run # values in this file will be overwritten by the ones defined in `vars` vars-file: ``` ##### Filter fields explanation - **containers**: (Optional) A list of lists of docker filters used to select containers from the Docker Engine host. The docker filter `status=running` is implicitly added. If not set, all containers with `status=running` are selected. - **network**: (Optional) A set of docker filters used to select the network to connect to the container. If not filter is set, all docker networks are considered. - **port**: (Optional) This field is used to specify the gNMI port for the discovered containers. An integer can be specified in which case it will be used as the gNMI port for all discovered containers. Alternatively, a string in the format `label=` can be set, where `` is a docker label containing the gNMI port value. If no value is set, the global flag/value `port` is used. - **config**: (Optional) A set of configuration parameters to be applied to all discovered targets by the container filter. The target config fields as defined [here](../targets.md#target-configuration-options) can be set, except `name` and `address` which are discovered by the loader. #### Examples ##### Simple1 A simple docker loader with a single docker container filter. It loads all containers deployed with [containerlab](https://containerlab.srlinux.dev/), in lab called `lab1`. ```yaml loader: type: docker filters: - containers: - label: containerlab=lab1 ``` In the above example, `gnmic` docker loader connects to the local Docker Daemon. It will discover containers having label `containerlab=lab1` and add them as gNMI targets. Default configuration applies to those added targets ##### Simple2 A simple docker loader with a single docker container filter. It loads all containers deployed with [containerlab](https://containerlab.srlinux.dev/), having kind `srl`. ```yaml loader: type: docker filters: - containers: - label: clab-node-kind=srl ``` In the above example, `gnmic` docker loader connects to the local Docker Daemon. It will discover containers having label `clab-node-kind=srl` and add them as gNMI targets. Default configuration applies to those added targets ##### Advanced Example A more advanced docker loader, with 2 filers, custom networks, ports and target configuration. ```yaml loader: type: docker address: unix:///var/run/docker.sock filters: # filter 1 - containers: # containers returned by `docker ps -f "label=clab-node-kind=srl"` - label: clab-node-kind=srl network: # networks returned by `docker network ls -f "label=containerlab"` label: containerlab port: "57400" config: username: admin password: secret1 skip-verify: true # filter 2 - containers: # containers returned by `docker ps -f "label=clab-node-kind=ceos"` - label: clab-node-kind=ceos # containers returned by `docker ps -f "label=clab-node-kind=vr-sros"` - label: clab-node-kind=vr-sros network: # networks returned by `docker network ls -f "name=mgmt"` name: mgmt # the value of label=gnmi-port exported by each container` port: "label=gnmi-port" config: username: admin password: secret2 insecure: true ``` In the above example, `gnmic` docker loader connects to the docker daemon using the local unix socket address. It will discover 2 sets of containers matching 2 filters: - Filter1: - Containers with label `clab-node-kind=srl`. - Use network with label `containerlab` to connect to them. - The port number is the same for all containers and is set to `57400`. - The config fields `username: admin`, `password: secret1` and `skip-verify: true` will be applied to all the containers discovered by this filter. - Filter2: - Containers with labels `clab-node-kind-ceos` or `clab-node-vr-sros` - Use network with `name=mgmt` to connect to them. Note that Docker returns all networks with names containing `mgmt` - The port number is discovered from the label `gnmi-port` set on each container. - The config fields `username: admin`, `password: secret2` and `insecure: true` will be applied to all the containers discovered by this filter. ================================================ FILE: docs/user_guide/targets/target_discovery/file_discovery.md ================================================ `gnmic` is able to watch changes happening to a file that contains the gNMI targets configuration. The file can be located in the local file system or a remote one. In case of remote file, `ftp`, `sftp`, `http(s)` protocols are supported. The read timeout of remote files is set to half of the read `interval` Newly added targets are discovered and subscribed to. Deleted targets are moved from gNMIc's list and their subscriptions are terminated.
#### Configuration A file target loader can be configured in a couple of ways: - using the `--targets-file` flag: ``` bash gnmic --targets-file ./targets-config.yaml subscribe ``` ``` bash gnmic --targets-file sftp://user:pass@server.com/path/to/targets-file.yaml subscribe ``` - using the main configuration file: ``` yaml loader: type: file # path to the file path: ./targets-config.yaml # watch interval at which the file # is read again to determine if a target was added or deleted. interval: 30s # time to wait before the first file read start-delay: 0s # if true, registers fileLoader prometheus metrics with the provided # prometheus registry enable-metrics: false # list of actions to run on target discovery on-add: # list of actions to run on target removal on-delete: # variable dict to pass to actions to be run vars: # path to variable file, the variables defined will be passed to the actions to be run # values in this file will be overwritten by the ones defined in `vars` vars-file: ``` The `--targets-file` flag takes precedence over the `loader` configuration section. The targets file can be either a `YAML` or a `JSON` file (identified by its extension json, yaml or yml), and follows the same format as the main configuration file `targets` section. See [here](../../../user_guide/targets/targets.md#target-option) ### Examples #### Local File ``` yaml loader: type: file # path to the file path: ./targets-config.yaml # watch interval at which the file # is read again to determine if a target was added or deleted. interval: 30s # if true, registers fileLoader prometheus metrics with the provided # prometheus registry enable-metrics: false ``` #### Remote File SFTP remote file ``` yaml loader: type: file # path to the file path: sftp://user:pass@server.com/path/to/targets-file.yaml # watch interval at which the file # is read again to determine if a target was added or deleted. interval: 30s # if true, registers fileLoader prometheus metrics with the provided # prometheus registry enable-metrics: false ``` FTP remote file ``` yaml loader: type: file # path to the file path: ftp://user:pass@server.com/path/to/targets-file.yaml # watch interval at which the file # is read again to determine if a target was added or deleted. interval: 30s # if true, registers fileLoader prometheus metrics with the provided # prometheus registry enable-metrics: false ``` HTTP remote file ``` yaml loader: type: file # path to the file path: http://user:pass@server.com/path/to/targets-file.yaml # watch interval at which the file # is read again to determine if a target was added or deleted. interval: 30s # if true, registers fileLoader prometheus metrics with the provided # prometheus registry enable-metrics: false ``` #### Targets file format === "YAML" ```yaml 10.10.10.10: username: admin insecure: true 10.10.10.11: username: admin 10.10.10.12: 10.10.10.13: 10.10.10.14: ``` === "JSON" ```json { "10.10.10.10": { "username": "admin", "insecure": true }, "10.10.10.11": { "username": "admin", }, "10.10.10.12": {}, "10.10.10.13": {}, "10.10.10.14": {} } ``` Just like the targets in the main configuration file, the missing configuration fields get filled with the global flags, the ENV variables first, the config file main section next and then the default values. ================================================ FILE: docs/user_guide/targets/target_discovery/http_discovery.md ================================================ The HTTP target loader can be used to query targets configurations from a remote HTTP server. It expects a well formatted `application/json` body and a code 200 response. It supports secure connections, basic authentication using a username and password and/or Oauth2 token based authentication.
#### Configuration ``` yaml loader: type: http # resource URL, must include the http(s) schema url: # watch interval at which the HTTP endpoint is queried again # to determine if a target was added or deleted. interval: 60s # HTTP request timeout timeout: 50s # time to wait before the fist HTTP query start-delay: 0s # tls config tls: # string, path to the CA certificate file, # this will be used to verify the clients certificates when `skip-verify` is false ca-file: # string, client certificate file. cert-file: # string, client key file. key-file: # boolean, if true, the client will not verify the server # certificate against the available certificate chain. skip-verify: false # username to be used with basic authentication username: # password to be used with basic authentication password: # token to be used with Oauth2 token based authentication token: # auth scheme (default is `Bearer`) auth-scheme: # text template template: # path to a text template file template-file: # if true, registers httpLoader prometheus metrics with the provided # prometheus registry enable-metrics: false # list of actions to run on target discovery on-add: # list of actions to run on target removal on-delete: # variable dict to pass to actions to be run vars: # path to variable file, the variables defined will be passed to the actions to be run # values in this file will be overwritten by the ones defined in `vars` vars-file: ``` #### Targets file format === "JSON" ```json { "10.10.10.10": { "username": "admin", "insecure": true }, "10.10.10.11": { "username": "admin", }, "10.10.10.12": {}, "10.10.10.13": {}, "10.10.10.14": {} } ``` Just like the targets in the main configuration file, the missing configuration fields get filled with the global flags, the ENV variables first, the config file main section next and then the default values. ================================================ FILE: docs/user_guide/targets/targets.md ================================================ # Targets Sometimes it is needed to perform an operation on multiple devices; be it getting the same leaf value from a given set of the network elements or setting a certain configuration element to some value. For cases like that `gnmic` offers support for multiple targets operations which a user can configure both via CLI flags as well as with the [file-based configuration](../configuration_file.md). ### CLI configuration Specifying multiple targets in the CLI is as easy as repeating the [`--address`](../../global_flags.md#address) flag. ```shell ❯ gnmic -a router1.lab.net:57400 \ -a router2.lab.net:57400 \ get --path /configure/system/name ``` ### File-based configuration With the file-based configuration a user has two options to specify multiple targets: * using `address` option * using `targets` option #### address option With `address` option the user must provide a list of addresses. In the YAML format that would look like that: ```yaml address: - "router1.lab.net:57400" - "router2.lab.net:57400" ``` The limitation this approach has is that it is impossible to set different credentials for the targets, they will essentially share the credentials specified in a file or via flags. #### target option With the `targets` option it is possible to set target specific options (such as credentials, subscriptions, TLS config, outputs), and thus this option is recommended to use: ```yaml targets: router1.lab.net: timeout: 2s username: r1 password: gnmi_pass router2.lab.net:57000: username: r2 password: gnmi_pass tls-key: /path/file1 tls-cert: /path/file2 ``` The target address is defined as the key under the `targets` section of the configuration file. The default port (57400) can be omitted as demonstrated with `router1.lab.net` target address. Have a look at the [file-based targets configuration](https://github.com/openconfig/gnmic/blob/main/config.yaml) example to get a glimpse of what it is capable of. The target inherits the globally defined options if the matching options are not set on a target level. For example, if a target doesn't have a username defined, it will use the username value set on a global level. #### secure/insecure connections `gnmic` supports both secure and insecure gRPC connections to the target. ##### insecure connection Using the `--insecure` flag it is possible to establish an insecure gRPC connection to the target. ```bash gnmic -a router1:57400 \ --insecure \ get --path /configure/system/name ``` ##### secure connection - A one way secure connection without target certificate verification can be established using the `--skip-verify` flag. ```bash gnmic -a router1:57400 \ --skip-verify \ get --path /configure/system/name ``` - Adding target certificate verification can be done using the `--tls-ca` flag. ```bash gnmic -a router1:57400 \ --tls-ca /path/to/ca/file \ get --path /configure/system/name ``` - A two way secure connection can be established using the `--tls-cert` `--tls-key` flags. ```bash gnmic -a router1:57400 \ --tls-cert /path/to/certificate/file \ --tls-key /path/to/certificate/file \ get --path /configure/system/name ``` - It is also possible to control the negotiated TLS version using the `--tls-min-version`, `--tls-max-version` and `--tls-version` (preferred TLS version) flags. ##### Controlling the advertised cipher suites It's possible to configure the advertised cipher suites gNMIc's gNMI client advertises to the target. This can be done by setting the `tls-min-version` and `tls-max-version` or by explicitly listing cipher suites to be advertised. By default the below list is advertised: | Name | Key Exchange | Auth | Enc | MAC | |------------------------------------------------|--------------|-----------|----------------------|-----------| | TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 | ECDHE | ECDSA | AES_128_GCM | SHA256 | | TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 | ECDHE | RSA | AES_128_GCM | SHA256 | | TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 | ECDHE | ECDSA | AES_256_GCM | SHA384 | | TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 | ECDHE | RSA | AES_256_GCM | SHA384 | | TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 | ECDHE | ECDSA | CHACHA20_POLY1305 | SHA256 | | TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 | ECDHE | RSA | CHACHA20_POLY1305 | SHA256 | | TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA | ECDHE | ECDSA | AES_128_CBC | SHA | | TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA | ECDHE | RSA | AES_128_CBC | SHA | | TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA | ECDHE | ECDSA | AES_256_CBC | SHA | | TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA | ECDHE | RSA | AES_256_CBC | SHA | | TLS_RSA_WITH_AES_128_GCM_SHA256 | RSA | RSA | AES_128_GCM | SHA256 | | TLS_RSA_WITH_AES_256_GCM_SHA384 | RSA | RSA | AES_256_GCM | SHA384 | | TLS_RSA_WITH_AES_128_CBC_SHA | RSA | RSA | AES_128_CBC | SHA | | TLS_RSA_WITH_AES_256_CBC_SHA | RSA | RSA | AES_256_CBC | SHA | | TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA | ECDHE | RSA | 3DES_EDE_CBC | SHA | | TLS_RSA_WITH_3DES_EDE_CBC_SHA | RSA | RSA | 3DES_EDE_CBC | SHA | | TLS_AES_128_GCM_SHA256 | (TLS 1.3) | (TLS 1.3) | AES_128_GCM | SHA256 | | TLS_AES_256_GCM_SHA384 | (TLS 1.3) | (TLS 1.3) | AES_256_GCM | SHA384 | | TLS_CHACHA20_POLY1305_SHA256 | (TLS 1.3) | (TLS 1.3) | CHACHA20_POLY1305 | SHA256 | If the `tls-max-version` is set to "1.2", the TLS1.3 cipher suites will not be included: | Name | Key Exchange | Auth | Enc | MAC | |------------------------------------------------|--------------|-------|-------------------|-----------| | TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 | ECDHE | ECDSA | AES_128_GCM | SHA256 | | TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 | ECDHE | RSA | AES_128_GCM | SHA256 | | TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 | ECDHE | ECDSA | AES_256_GCM | SHA384 | | TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 | ECDHE | RSA | AES_256_GCM | SHA384 | | TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 | ECDHE | ECDSA | CHACHA20_POLY1305 | SHA256 | | TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 | ECDHE | RSA | CHACHA20_POLY1305 | SHA256 | | TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA | ECDHE | ECDSA | AES_128_CBC | SHA | | TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA | ECDHE | RSA | AES_128_CBC | SHA | | TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA | ECDHE | ECDSA | AES_256_CBC | SHA | | TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA | ECDHE | RSA | AES_256_CBC | SHA | | TLS_RSA_WITH_AES_128_GCM_SHA256 | RSA | RSA | AES_128_GCM | SHA256 | | TLS_RSA_WITH_AES_256_GCM_SHA384 | RSA | RSA | AES_256_GCM | SHA384 | | TLS_RSA_WITH_AES_128_CBC_SHA | RSA | RSA | AES_128_CBC | SHA | | TLS_RSA_WITH_AES_256_CBC_SHA | RSA | RSA | AES_256_CBC | SHA | | TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA | ECDHE | RSA | 3DES_EDE_CBC | SHA | | TLS_RSA_WITH_3DES_EDE_CBC_SHA | RSA | RSA | 3DES_EDE_CBC | SHA | If the `tls-max-version` and `tls-min-version` are set to "1.1", the below list of cipher suites is advertised: | Name | Key Exchange | Auth | Enc | MAC | |----------------------------------------|--------------|-------|--------------|-----| | TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA | ECDHE | ECDSA | AES_128_CBC | SHA | | TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA | ECDHE | RSA | AES_128_CBC | SHA | | TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA | ECDHE | ECDSA | AES_256_CBC | SHA | | TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA | ECDHE | RSA | AES_256_CBC | SHA | | TLS_RSA_WITH_AES_128_CBC_SHA | RSA | RSA | AES_128_CBC | SHA | | TLS_RSA_WITH_AES_256_CBC_SHA | RSA | RSA | AES_256_CBC | SHA | | TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA | ECDHE | RSA | 3DES_EDE_CBC | SHA | | TLS_RSA_WITH_3DES_EDE_CBC_SHA | RSA | RSA | 3DES_EDE_CBC | SHA | If you want to control which cipher suites are sent and in what order of preference, you can set the `cipher-suites` field under the target: ```yaml targets: target1: # other fields cipher-suites: - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - TLS_AES_128_GCM_SHA256 ``` The full list of supported cipher suites is: | Name | Key Exchange | Auth | Enc | MAC | |------------------------------------------------|--------------|-----------|--------------------|-----------| | TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 | ECDHE | ECDSA | AES_128_GCM | SHA256 | | TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 | ECDHE | RSA | AES_128_GCM | SHA256 | | TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 | ECDHE | ECDSA | AES_256_GCM | SHA384 | | TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 | ECDHE | RSA | AES_256_GCM | SHA384 | | TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 | ECDHE | ECDSA | CHACHA20_POLY1305 | SHA256 | | TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 | ECDHE | RSA | CHACHA20_POLY1305 | SHA256 | | TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA | ECDHE | ECDSA | AES_128_CBC | SHA | | TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA | ECDHE | RSA | AES_128_CBC | SHA | | TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA | ECDHE | ECDSA | AES_256_CBC | SHA | | TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA | ECDHE | RSA | AES_256_CBC | SHA | | TLS_RSA_WITH_AES_128_GCM_SHA256 | RSA | RSA | AES_128_GCM | SHA256 | | TLS_RSA_WITH_AES_256_GCM_SHA384 | RSA | RSA | AES_256_GCM | SHA384 | | TLS_RSA_WITH_AES_128_CBC_SHA | RSA | RSA | AES_128_CBC | SHA | | TLS_RSA_WITH_AES_256_CBC_SHA | RSA | RSA | AES_256_CBC | SHA | | TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA | ECDHE | RSA | 3DES_EDE_CBC | SHA | | TLS_RSA_WITH_3DES_EDE_CBC_SHA | RSA | RSA | 3DES_EDE_CBC | SHA | | TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 | ECDHE | ECDSA | AES_128_CBC | SHA256 | | TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 | ECDHE | RSA | AES_128_CBC | SHA256 | | TLS_RSA_WITH_AES_128_CBC_SHA256 | RSA | RSA | AES_128_CBC | SHA256 | | TLS_ECDHE_ECDSA_WITH_RC4_128_SHA | ECDHE | ECDSA | RC4_128 | SHA | | TLS_ECDHE_RSA_WITH_RC4_128_SHA | ECDHE | RSA | RC4_128 | SHA | | TLS_RSA_WITH_RC4_128_SHA | RSA | RSA | RC4_128 | SHA | | TLS_AES_128_GCM_SHA256 | (TLS 1.3) | (TLS 1.3) | AES_128_GCM | SHA256 | | TLS_AES_256_GCM_SHA384 | (TLS 1.3) | (TLS 1.3) | AES_256_GCM | SHA384 | | TLS_CHACHA20_POLY1305_SHA256 | (TLS 1.3) | (TLS 1.3) | CHACHA20_POLY1305 | SHA256 | #### target configuration options Target supported options: ```yaml targets: # target name or an address (IP or DNS name). # if an address is set it can include a port number or not, # if a port is not included, the default gRPC port will be added. target_key: # target name, will default to the target_key if not specified name: target_key # target address, if missing the target_key is used as an address. # supports comma separated addresses. # if any of the addresses is missing a port, the default gRPC port will be added. # if multiple addresses are set, all of them will be tried simultaneously, # the first established gRPC connection will be used, the other attempts will be canceled. address: # target username username: # target password password: # authentication token, # applied only in the case of a secure gRPC connection. token: # target RPC timeout timeout: # establish an insecure connection insecure: # path to tls ca file tls-ca: # path to tls certificate tls-cert: # path to tls key tls-key: # max tls version to use during negotiation tls-max-version: # min tls version to use during negotiation tls-min-version: # preferred tls version to use during negotiation # this value overwrites both tls-min-version and # tls-max-version tls-version: # enable logging of a pre-master TLS secret log-tls-secret: # do not verify the target certificate when using tls skip-verify: # server name used to verify the hostname on the returned # certificates unless skip-verify is true. tls-server-name: # list of subscription names to establish for this target. # if empty it defaults to all subscriptions defined under # the main level `subscriptions` field subscriptions: # string, case insensitive, defines the gNMI encoding to be used for # the subscriptions to be established for this target. # This encoding value applies only if the subscription configuration does # NOT explicitly define an encoding. encoding: # list of output names to which the gnmi data will be written. # if empty if defaults to all outputs defined under # the main level `outputs` field outputs: # number of subscribe responses to keep in buffer before writing # the target outputs buffer-size: # target retry period retry-timer: # list of tags, relevant when clustering is enabled. tags: # a mapping of static tags to add to all events from this target. # each key/value pair in this mapping will be added to metadata # on all events event-tags: # list of proto file names to decode protoBytes values proto-files: # list of directories to look for the proto files proto-dirs: # enable grpc gzip compression gzip: # proxy type and address, only SOCKS5 is supported currently # example: socks5://
: proxy: # list of custom TLS cipher suites to advertise to the target # during the TLS handshake. cipher-suites: # a duration, sets the TCP keepalive time and keepalive interval. # The number of keepalive probes to send before sending a TCP RST # is not configurable, it inherits its value from the linux kernel # net.ipv4.tcp_keepalive_probes which usually has a default value of 9. # When empty or set to 0s, the Golang default (15s) applies. # Disabled if set to a negative value. tcp-keepalive: 0s # sets gRPC keepalive parameters according to: # https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md grpc-keepalive: # After a duration of this time if the client doesn't see any activity # it pings the server to see if the transport is still alive. # If set below 10s, a minimum value of 10s will be used instead. time: # After having pinged for keepalive check, the client waits # for a duration of Timeout and if no activity is seen even # after that the connection is closed. timeout: # If true, client sends keepalive pings even with no active RPCs. # If false, when there are no active RPCs, # Time and Timeout will be ignored and no keepalive pings will be sent. permit-without-stream: false # set how much data (in bytes) can be read at most for each read syscall. # The default value for this buffer is 32KB. Zero or negative values will # disable read buffer for a connection so data framer can access the underlying conn directly. grpc-read-buffer-size: # determines how much data (in bytes) can be batched before doing a write on the wire. # The default value for this buffer is 32KB. # Zero or negative values will disable the write buffer such that each write will be on underlying connection. # Note: A Send call may not directly translate to a write. grpc-write-buffer-size: # sets the value for initial window size on a connection. The lower bound for window size is 64K and any value smaller than that will be ignored. grpc-conn-window-size: # sets the value for initial window size on a stream. The lower bound for window size is 64K and any value smaller than that will be ignored. grpc-window-size: # sets the initial connection window size to the value provided and disables dynamic flow control. grpc-static-conn-window-size: # sets the initial stream window size to the value provided and disables dynamic flow control. grpc-static-stream-window-size: ``` ### Example Whatever configuration option you choose, the multi-targeted operations will uniformly work across the commands that support them. Consider the `get` command acting on two routers getting their names: ```shell ❯ gnmic -a router1.lab.net:57400 \ -a router2.lab.net:57400 \ get --path /configure/system/name [router1.lab.net:57400] { [router1.lab.net:57400] "source": "router1.lab.net:57400", [router1.lab.net:57400] "timestamp": 1593009759618786781, [router1.lab.net:57400] "time": "2020-06-24T16:42:39.618786781+02:00", [router1.lab.net:57400] "updates": [ [router1.lab.net:57400] { [router1.lab.net:57400] "Path": "configure/system/name", [router1.lab.net:57400] "values": { [router1.lab.net:57400] "configure/system/name": "gnmic_r1" [router1.lab.net:57400] } [router1.lab.net:57400] } [router1.lab.net:57400] ] [router1.lab.net:57400] } [router2.lab.net:57400] { [router2.lab.net:57400] "source": "router2.lab.net:57400", [router2.lab.net:57400] "timestamp": 1593009759748265232, [router2.lab.net:57400] "time": "2020-06-24T16:42:39.748265232+02:00", [router2.lab.net:57400] "updates": [ [router2.lab.net:57400] { [router2.lab.net:57400] "Path": "configure/system/name", [router2.lab.net:57400] "values": { [router2.lab.net:57400] "configure/system/name": "gnmic_r2" [router2.lab.net:57400] } [router2.lab.net:57400] } [router2.lab.net:57400] ] [router2.lab.net:57400] } ``` Notice how in the output the different gNMI targets are prefixed with the target address to make the output easy to read. If those prefixes are not needed, you can make them disappear with [`--no-prefix`](../../global_flags.md#no-prefix) global flag. ================================================ FILE: docs/user_guide/targets/targets_session_sec.md ================================================ # Targets session security In line with the guidelines detailed in the [gNMI Specification](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#31-session-security-authentication-and-rpc-authorization), it is mandatory to establish an encrypted TLS session between the client and the server. This measure is essential to ensure secure communication within the gNMI protocol. ```text The session between the client and server MUST be encrypted using TLS - and a target or client MUST NOT fall back to unencrypted sessions. The target and client SHOULD implement TLS >= 1.2. ``` `gNMIc` provides the ability to tailor and modify the TLS session parameters of the gNMI client according to your specific requirements. ## TLS session types When it comes to establishing a TLS session using `gNMIc`, various options are available to suit different use cases and environmental requirements. Whether it's a one-way TLS session, a session without certificate validation, or a mutual TLS (mTLS) session, each type caters to specific needs. The selection largely depends on the user's scenario and the degree of security and validation necessary. The upcoming sections will detail each of these session types, offering guidelines to aid in choosing the most appropriate for your specific requirements. ### Simple TLS session w/o server certificate validation For scenarios requiring a simple TLS session without server certificate validation, such as in certain testing or development environments, you can use gNMIc's `--skip-verify` flag or the `skip-verify` attribute. This mode bypasses the typical certificate verification process and establishes a secure connection without validating the server's identity. Please exercise caution when using this feature, as it may expose the connection to potential security vulnerabilities. It is recommended primarily for non-production environments or controlled testing situations. === "cli" ```shell gnmic -a router1 --skip-verify \ get --path /interface/oper-state ``` === "file" ```yaml targets: router1: address: router1 skip-verify: true ``` ### Simple TLS session with server certificate validation When establishing a simple TLS session with server certificate validation for enhanced security, gNMIc offers the --tls-ca flag or the tls-ca attribute. These options allow you to point to a Certificate Authority (CA) certificate file. By doing so, the session not only ensures encrypted communication but also verifies the server's identity through its certificate. This validation process greatly enhances the security of the connection, ensuring the client is communicating with the intended server. It's an advisable setting for production environments where data security and integrity are crucial. === "cli" ```shell gnmic -a router1 --tls-ca ./ca.pem \ get --path /interface/oper-state ``` === "file" ```yaml targets: router1: address: router1 tls-ca: ./ca.pem ``` ### Simple TLS session with server certificate validation and server name override There are circumstances where the server's identity, as indicated by its certificate, doesn't match its expected hostname. For such scenarios, gNMIc enables the initiation of a simple TLS session with both server certificate validation and server name override. This functionality can be utilized by employing the `--tls-server-name` flag or the `tls-server-name` attribute. By overriding the server name in the TLS session, users can specify a different hostname that matches the server's certificate, even if it's not the actual hostname of the server. This allows for successful validation and secure communication even in cases of server name discrepancies due to reasons like load balancing, proxying, etc... This feature is particularly beneficial in complex network scenarios or during migrations, where server names might not yet align with their certificates. By ensuring both secure encrypted communication and flexible server name accommodation, it adds an extra layer of adaptability for secure communication, particularly in dynamic or complex network environments. === "cli" ```shell gnmic -a router1 --tls-ca ./ca.pem \ --tls-server-name server1 \ get --path /interface/oper-state ``` === "file" ```yaml targets: router1: address: router1 tls-ca: ./ca.pem tls-server-name: server1 ``` ### Mutual TLS (mTLS) session For heightened security scenarios, gNMIc supports mutual TLS (mTLS) sessions. mTLS not only verifies the server's identity to the client, but also the client's identity to the server. This reciprocal verification is achieved using the --tls-cert and --tls-key flags, or the tls-cert and tls-key attributes. These options allow the user to specify a client certificate and client key, respectively. By providing a client certificate (`--tls-cert` or `tls-cert` attribute) and a client key (`--tls-key` or `tls-key` attribute), gNMIc allows the server to confirm the identity of the client, ensuring that the client is legitimate and authorized to access the server resources. Mutual TLS is particularly beneficial in use cases where both ends of a connection need to confirm the other's identity, providing a significantly higher level of trust and security. It reduces the risk of man-in-the-middle attacks and is especially valuable in environments where sensitive data is transmitted or strict access control is required. === "cli" ```shell gnmic -a router1 --tls-ca ./ca.pem \ --tls-cert ./router1.cert \ --tls-key ./router.key \ get --path /interface/oper-state ``` === "file" ```yaml targets: router1: address: router1 tls-ca: ./ca.pem tls-cert: ./router1.cert tls-key: ./router1.key ``` ### mTLS session with server name override === "cli" ```shell gnmic -a router1 --tls-ca ./ca.pem \ --tls-server-name server1 \ --tls-cert ./router1.cert \ --tls-key ./router.key \ get --path /interface/oper-state ``` === "file" ```yaml targets: router1: address: router1 tls-ca: ./ca.pem tls-server-name: server1 tls-cert: ./router1.cert tls-key: ./router1.key ``` ## Configuring the client's TLS version By default, `gNMIc` establishes a TLS session using the Golang's default TLS version (1.2), minimum version (1.2), and maximum version (1.3). However, there might be scenarios where users need to control the TLS session negotiation to either test the server behavior or force the session into a specific version. To accommodate these needs, gNMIc provides flexibility by allowing users to explicitly set the TLS version. Users can manipulate the negotiated TLS version using the flags (or target attributes) `--tls-version`, `--tls-min-version`, and `--tls-max-version`. These flags give control over the TLS session parameters, facilitating testing and customization of the communication session according to specific requirements. Example: Forcing the client and server to use TLS1.3 === "cli" ```shell gnmic -a router1 --tls-ca ./ca.pem \ --tls-cert ./router1.cert \ --tls-key ./router.key \ --tls-version 1.3 \ --tls-min-version 1.3 \ get --path /interface/oper-state ``` === "file" ```yaml targets: router1: address: router1 tls-ca: ./ca.pem tls-cert: ./router1.cert tls-key: ./router1.key tls-version: 1.3 tls-min-version: 1.3 ``` ## Decrypting gNMI traffic using Wireshark To facilitate advanced debugging or network analysis, gNMIc allows for the decryption of gNMI TLS traffic using the popular network protocol analyzer, Wireshark. The `--log-tls-secret` flag is instrumental in achieving this, as it stores the session pre-master secret, which can subsequently be used to decrypt TLS traffic. When `--log-tls-secret` is used, the session's pre-master secret will be stored in a file named `.tlssecret.log`. This secret enables Wireshark to decrypt the otherwise secure and encrypted TLS traffic between the client and the server. Decryption of TLS traffic is particularly useful for network troubleshooting, performance optimization, or security audits. It allows network administrators or developers to deeply inspect packet data, diagnose network issues, and better understand data flows. However, this practice should be used carefully and ethically, given the sensitive nature of decrypted traffic, especially in production environments. === "cli" ```shell gnmic -a router1 --tls-ca ./ca.pem \ --log-tls-secret \ --tls-cert ./router1.cert \ --tls-key ./router.key \ get --path /interface/oper-state ``` === "file" ```yaml targets: router1: address: router1 tls-ca: ./ca.pem log-tls-secret: true tls-cert: ./router1.cert tls-key: ./router1.key ``` ================================================ FILE: docs/user_guide/tunnel_server.md ================================================ # Tunnel Server ## Introduction `gNMIc` supports gNMI Dial-out as defined by [`openconfig/grpctunnel`](https://github.com/openconfig/grpctunnel). `gNMIc` embeds a tunnel server to which the gNMI targets register. Once registered, `gNMIc` triggers the request gNMI RPC towards the target via the established tunnel. This use case is described [here](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmignoissh-dialout-grpctunnel.md#gnmi-collector-with-embedded-tunnel-server) ## Server operation When running a Subscribe RPC using `gNMIc` with the flag `--use-tunnel-server`,`gNMIc` starts by running the Tunnel server as defined under `tunnel-server`. The next steps depend on the type of RPC (Unary/Stream) and/or Subscribe Mode (poll/once/stream) ### Unary RPCs `gNMIc` waits for `tunnel-server.target-wait-time` for targets to register with the tunnel server, after which it requests a new session from the server for the specified target(s) and runs the RPC through the newly established tunnel. Note that if no target is specified, the RPC runs for all registered targets. ```bash $ cat tunnel_server_config.yaml insecure: true log: true username: admin password: NokiaSrl1! tunnel-server: address: ":57401" ``` ```bash $ gnmic --config tunnel_server_config.yaml \ --use-tunnel-server \ get \ --path /configure/system/name 2022/03/09 10:12:34.729037 [gnmic] version=dev, commit=none, date=unknown, gitURL=, docs=https://gnmic.openconfig.net 2022/03/09 10:12:34.729063 [gnmic] using config file "tunnel_server_config.yaml" 2022/03/09 10:12:34.730472 [gnmic] waiting for targets to register with the tunnel server... 2022/03/09 10:12:36.435521 [gnmic] tunnel server discovered target {ID:sr1 Type:GNMI_GNOI} 2022/03/09 10:12:36.436332 [gnmic] tunnel server discovered target {ID:sr2 Type:GNMI_GNOI} 2022/03/09 10:12:36.731125 [gnmic] adding target {"name":"sr1","address":"sr1","username":"admin","password":"NokiaSrl1!","timeout":10000000000,"insecure":true,"skip-verify":false,"subscriptions":["sub1"],"retry-timer":10000000000,"log-tls-secret":false,"gzip":false,"token":""} 2022/03/09 10:12:36.731158 [gnmic] adding target {"name":"sr2","address":"sr2","username":"admin","password":"NokiaSrl1!","timeout":10000000000,"insecure":true,"skip-verify":false,"subscriptions":["sub1"],"retry-timer":10000000000,"log-tls-secret":false,"gzip":false,"token":""} 2022/03/09 10:12:36.731651 [gnmic] sending gNMI GetRequest: prefix='', path='[elem:{name:"configure"} elem:{name:"system"} elem:{name:"name"}]', type='ALL', encoding='JSON', models='[]', extension='[]' to sr1 2022/03/09 10:12:36.731742 [gnmic] sending gNMI GetRequest: prefix='', path='[elem:{name:"configure"} elem:{name:"system"} elem:{name:"name"}]', type='ALL', encoding='JSON', models='[]', extension='[]' to sr2 2022/03/09 10:12:36.732337 [gnmic] dialing tunnel connection for tunnel target "sr2" 2022/03/09 10:12:36.732572 [gnmic] dialing tunnel connection for tunnel target "sr1" [sr1] [ [sr1] { [sr1] "source": "sr1", [sr1] "timestamp": 1646849561604621769, [sr1] "time": "2022-03-09T10:12:41.604621769-08:00", [sr1] "updates": [ [sr1] { [sr1] "Path": "configure/system/name", [sr1] "values": { [sr1] "configure/system/name": "sr1" [sr1] } [sr1] } [sr1] ] [sr1] } [sr1] ] [sr2] [ [sr2] { [sr2] "source": "sr2", [sr2] "timestamp": 1646849562004804732, [sr2] "time": "2022-03-09T10:12:42.004804732-08:00", [sr2] "updates": [ [sr2] { [sr2] "Path": "configure/system/name", [sr2] "values": { [sr2] "configure/system/name": "sr2" [sr2] } [sr2] } [sr2] ] [sr2] } [sr2] ] ``` ### Subscribe RPC #### Poll and Once subscription When a Poll or Once subscription are requested, `gNMIc` behaves the same way as for a unary RPC, i.e waits for targets to register then runs the RPC. #### Stream subscription In the case of a stream subscription, `gNMIc` triggers the Subscribe RPC as soon as a target registers. Similarly, a stream subscription will be stopped when a target deregisters from the tunnel server. ## Configuration ```yaml tunnel-server: # the address the tunnel server will listen to address: # tls config tls: # string, path to the CA certificate file, # this certificate is used to verify the clients certificates. ca-file: # string, server certificate file. cert-file: # string, server key file. key-file: # string, one of `"", "request", "require", "verify-if-given", or "require-verify" # - request: The server requests a certificate from the client but does not # require the client to send a certificate. # If the client sends a certificate, it is not required to be valid. # - require: The server requires the client to send a certificate and does not # fail if the client certificate is not valid. # - verify-if-given: The server requests a certificate, # does not fail if no certificate is sent. # If a certificate is sent it is required to be valid. # - require-verify: The server requires the client to send a valid certificate. # # if no ca-file is present, `client-auth` defaults to ""` # if a ca-file is set, `client-auth` defaults to "require-verify"` client-auth: "" # the wait time before triggering unary RPCs or subscribe poll/once target-wait-time: 2s # enables the collection of Prometheus gRPC server metrics enable-metrics: false # enable additional debug logs debug: false ``` ## Combining Tunnel server with a gNMI server It is possible to start `gNMIc` with both a `gnmi-server` and `tunnel-server` enabled. This mode allows to run gNMI RPCs against `gNMIc`'s gNMI server, they will routed to the relevant targets (`--target` flag) or to all known target (i.e registered targets) The configuration file would look like: ```yaml insecure: true username: admin password: NokiaSrl1! subscriptions: sub1: paths: - /state/port sample-interval: 10s gnmi-server: address: :57400 tunnel-server: address: :57401 targets: - id: .* type: GNMI_GNOI config: subscriptions: - sub1 ``` Running a Get RPC towards all registered targets ```bash $ gnmic -a localhost:57400 --insecure get \ --path /configure/system/name [ { "source": "localhost", "timestamp": 1646850987401608313, "time": "2022-03-09T10:36:27.401608313-08:00", "target": "sr2", "updates": [ { "Path": "configure/system/name", "values": { "configure/system/name": "sr2" } } ] }, { "source": "localhost", "timestamp": 1646850987205206394, "time": "2022-03-09T10:36:27.205206394-08:00", "target": "sr1", "updates": [ { "Path": "configure/system/name", "values": { "configure/system/name": "sr1" } } ] } ] ``` Running a Get RPC towards a single target ```bash $ gnmic -a localhost:57400 --insecure \ --target sr1 \ get --path /configure/system/name [ { "source": "localhost", "timestamp": 1646851044004381267, "time": "2022-03-09T10:37:24.004381267-08:00", "target": "sr1", "updates": [ { "Path": "configure/system/name", "values": { "configure/system/name": "sr1" } } ] } ] ``` For detailed configuration of the `gnmi-server` check this [page](./gnmi_server.md) ================================================ FILE: examples/deployments/1.single-instance/1.nats-output/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker filters: - containers: - label=clab-node-kind: nokia_srlinux label=containerlab: lab11 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s outputs: nats-output: type: nats address: clab-lab11-nats:4222 subject: telemetry ================================================ FILE: examples/deployments/1.single-instance/1.nats-output/containerlab/nats.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab11 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: spine1: spine2: leaf1: leaf2: leaf3: leaf4: gnmic: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml --log subscribe' nats: kind: linux image: nats:latest ports: - 4222:4222 links: # spine1 links - endpoints: ["spine1:e1-1", "leaf1:e1-1"] - endpoints: ["spine1:e1-2", "leaf2:e1-1"] - endpoints: ["spine1:e1-3", "leaf3:e1-1"] - endpoints: ["spine1:e1-4", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-1", "leaf1:e1-2"] - endpoints: ["spine2:e1-2", "leaf2:e1-2"] - endpoints: ["spine2:e1-3", "leaf3:e1-2"] - endpoints: ["spine2:e1-4", "leaf4:e1-2"] ================================================ FILE: examples/deployments/1.single-instance/1.nats-output/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic1: image: ghcr.io/openconfig/gnmic:latest container_name: gnmic1 volumes: - ./gnmic1.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" networks: - gnmic-net depends_on: - nats nats: image: 'nats:latest' container_name: nats networks: - gnmic-net ports: - "4222:4222" - "6222:6222" - "8222:8222" ================================================ FILE: examples/deployments/1.single-instance/1.nats-output/docker-compose/gnmic1.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true targets: # Add targets configuration here # e.g: # 192.168.1.131:57400: # username: gnmic # password: secret_password # outputs: # - nats-output subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: nats-output: type: nats address: nats:4222 subject: telemetry ================================================ FILE: examples/deployments/1.single-instance/10.prometheus-with-cache/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab110 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface[name=*]/statistics - /interface[name=*]/description stream-mode: sample sample-interval: 10s encoding: ascii sub2: paths: - /acl/cpm-filter/ipv4-filter/entry/statistics - /acl/cpm-filter/ipv6-filter/entry/statistics stream-mode: sample sample-interval: 10s encoding: ascii outputs: prom-output-redis: type: prometheus listen: "clab-lab110-gnmic:9804" service-registration: address: clab-lab110-consul-agent:8500 event-processors: - group_by_interface cache: type: redis address: "clab-lab110-redis:6379" prom-output-nats: type: prometheus listen: "clab-lab110-gnmic:9805" service-registration: address: clab-lab110-consul-agent:8500 event-processors: - group_by_interface cache: type: nats prom-output-js: type: prometheus listen: "clab-lab110-gnmic:9806" service-registration: address: clab-lab110-consul-agent:8500 event-processors: - group_by_interface cache: type: jetstream prom-output-oc: type: prometheus listen: "clab-lab110-gnmic:9807" service-registration: address: clab-lab110-consul-agent:8500 event-processors: - group_by_interface cache: {} processors: group_by_interface: event-group-by: tags: - target - interface_name api-server: enable-metrics: true ================================================ FILE: examples/deployments/1.single-instance/10.prometheus-with-cache/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-lab110-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/1.single-instance/10.prometheus-with-cache/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-lab110-consul-agent:8500 services: - prometheus-prom-output-js - prometheus-prom-output-nats - prometheus-prom-output-oc - prometheus-prom-output-redis - job_name: 'gnmic-internal' scrape_interval: 10s static_configs: - targets: - clab-lab110-gnmic:7890 ================================================ FILE: examples/deployments/1.single-instance/10.prometheus-with-cache/containerlab/prometheus.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab110 mgmt: ipv4-subnet: 192.168.1.0/24 topology: defaults: kind: srl kinds: srl: image: ghcr.io/nokia/srlinux nodes: spine1: spine2: leaf1: leaf2: leaf3: leaf4: gnmic: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7890:7890 cmd: "--config /app/gnmic.yaml --log subscribe" consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: "agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0" prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 redis: kind: linux image: redis:7 ports: - 6379:6379 cmd: redis-server --loglevel warning links: # spine1 links - endpoints: ["spine1:e1-1", "leaf1:e1-1"] - endpoints: ["spine1:e1-2", "leaf2:e1-1"] - endpoints: ["spine1:e1-3", "leaf3:e1-1"] - endpoints: ["spine1:e1-4", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-1", "leaf1:e1-2"] - endpoints: ["spine2:e1-2", "leaf2:e1-2"] - endpoints: ["spine2:e1-3", "leaf3:e1-2"] - endpoints: ["spine2:e1-4", "leaf4:e1-2"] ================================================ FILE: examples/deployments/1.single-instance/11.kafka-kraft-output/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true # docker target loader loader: type: docker filters: - containers: - label=clab-node-kind: nokia_srlinux label=containerlab: lab12 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s outputs: kafka-output: type: kafka address: clab-lab12-kafka-server:9092 topic: telemetry ================================================ FILE: examples/deployments/1.single-instance/11.kafka-kraft-output/containerlab/kafka.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab12 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: spine1: spine2: leaf1: leaf2: leaf3: leaf4: gnmic: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml --log subscribe' kafka-server: kind: linux image: bitnami/kafka:latest ports: - 9092:9092 - 9093:9093 env: KAFKA_ENABLE_KRAFT: yes ALLOW_PLAINTEXT_LISTENER: yes KAFKA_CFG_NODE_ID: 0 KAFKA_CFG_PROCESS_ROLES: broker,controller KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER KAFKA_CFG_LISTENERS: PLAINTEXT://:9092,CONTROLLER://:9093 KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://clab-lab12-kafka-server:9092 KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 0@clab-lab12-kafka-server:9093 links: # spine1 links - endpoints: ["spine1:e1-1", "leaf1:e1-1"] - endpoints: ["spine1:e1-2", "leaf2:e1-1"] - endpoints: ["spine1:e1-3", "leaf3:e1-1"] - endpoints: ["spine1:e1-4", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-1", "leaf1:e1-2"] - endpoints: ["spine2:e1-2", "leaf2:e1-2"] - endpoints: ["spine2:e1-3", "leaf3:e1-2"] - endpoints: ["spine2:e1-4", "leaf4:e1-2"] ================================================ FILE: examples/deployments/1.single-instance/2.kafka-output/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true # docker target loader loader: type: docker filters: - containers: - label=clab-node-kind: nokia_srlinux label=containerlab: lab12 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s outputs: kafka-output: type: kafka address: clab-lab12-kafka-server:9092 topic: telemetry ================================================ FILE: examples/deployments/1.single-instance/2.kafka-output/containerlab/kafka.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab12 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: spine1: spine2: leaf1: leaf2: leaf3: leaf4: gnmic: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml --log subscribe' kafka-server: kind: linux image: bitnami/kafka:3.5.2 ports: - 9092:9092 - 9000:9000 env: KAFKA_CFG_ZOOKEEPER_CONNECT: clab-lab12-zookeeper-server:2181 KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://clab-lab12-kafka-server:9092 ALLOW_PLAINTEXT_LISTENER: "yes" JMX_PORT: 9000 zookeeper-server: kind: linux image: bitnami/zookeeper:latest ports: - 2181:2181 env: ALLOW_ANONYMOUS_LOGIN: "yes" links: # spine1 links - endpoints: ["spine1:e1-1", "leaf1:e1-1"] - endpoints: ["spine1:e1-2", "leaf2:e1-1"] - endpoints: ["spine1:e1-3", "leaf3:e1-1"] - endpoints: ["spine1:e1-4", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-1", "leaf1:e1-2"] - endpoints: ["spine2:e1-2", "leaf2:e1-2"] - endpoints: ["spine2:e1-3", "leaf3:e1-2"] - endpoints: ["spine2:e1-4", "leaf4:e1-2"] ================================================ FILE: examples/deployments/1.single-instance/2.kafka-output/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic1: image: ghcr.io/openconfig/gnmic:latest container_name: gnmic1 volumes: - ./gnmic1.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" networks: - gnmic-net depends_on: - kafka-server kafka-server: image: 'bitnami/kafka:latest' container_name: kafka-server networks: - gnmic-net ports: - "9092:9092" - "9000:9000" environment: - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper-server:2181 - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka-server:9092 - ALLOW_PLAINTEXT_LISTENER=yes - JMX_PORT=9000 depends_on: - zookeeper-server zookeeper-server: image: 'bitnami/zookeeper:latest' container_name: zk-server networks: - gnmic-net ports: - "2181:2181" environment: - ALLOW_ANONYMOUS_LOGIN=yes ================================================ FILE: examples/deployments/1.single-instance/2.kafka-output/docker-compose/gnmic1.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true targets: # Add targets configuration here # e.g: # 192.168.1.131:57400: # username: gnmic # password: secret_password # outputs: # - nats-output subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: kafka-output: type: kafka address: kafka-server:9092 topic: telemetry ================================================ FILE: examples/deployments/1.single-instance/3.influxdb-output/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: ascii log: true loader: type: docker filters: - containers: - label=clab-node-kind: nokia_srlinux label=containerlab: lab13 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s outputs: influxdb-output: type: influxdb url: http://clab-lab13-influxdb:8086 bucket: telemetry # db name token: gnmic:gnmic123 # username:password batch-size: 1000 flush-timer: 10s ================================================ FILE: examples/deployments/1.single-instance/3.influxdb-output/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: InfluxDB orgId: 1 datasources: - name: InfluxDB type: influxdb orgId: 1 url: http://clab-lab13-influxdb:8086 user: gnmic password: gnmic database: telemetry editable: true ================================================ FILE: examples/deployments/1.single-instance/3.influxdb-output/containerlab/influxdb.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab13 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: spine1: spine2: leaf1: leaf2: leaf3: leaf4: gnmic: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml --log subscribe' influxdb: kind: linux image: influxdb:1.8.10 ports: - 8086:8086 env: INFLUXDB_DATA_ENGINE: tsm1 INFLUXDB_REPORTING_DISABLED: "false" INFLUXDB_USER: gnmic INFLUXDB_USER_PASSWORD: gnmic INFLUXDB_DB: telemetry grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: # spine1 links - endpoints: ["spine1:e1-1", "leaf1:e1-1"] - endpoints: ["spine1:e1-2", "leaf2:e1-1"] - endpoints: ["spine1:e1-3", "leaf3:e1-1"] - endpoints: ["spine1:e1-4", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-1", "leaf1:e1-2"] - endpoints: ["spine2:e1-2", "leaf2:e1-2"] - endpoints: ["spine2:e1-3", "leaf3:e1-2"] - endpoints: ["spine2:e1-4", "leaf4:e1-2"] ================================================ FILE: examples/deployments/1.single-instance/3.influxdb-output/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic1: image: ghcr.io/openconfig/gnmic:latest container_name: gnmic1 volumes: - ./gnmic1.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" networks: - gnmic-net depends_on: - influxdb influxdb: image: influxdb:1.8.10 container_name: influxdb networks: - gnmic-net ports: - "8083:8083" - "8086:8086" - "8090:8090" environment: - INFLUXDB_DATA_ENGINE=tsm1 - INFLUXDB_REPORTING_DISABLED=false - INFLUXDB_USER=gnmic - INFLUXDB_USER_PASSWORD=gnmic - INFLUXDB_DB=telemetry volumes: - influx-storage:/var/lib/influxdb volumes: influx-storage: ================================================ FILE: examples/deployments/1.single-instance/3.influxdb-output/docker-compose/gnmic1.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true targets: # Add targets configuration here # eg: # 192.168.1.131:57400: # username: gnmic # password: secret_password subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: influxdb-output: type: influxdb url: http://influxdb:8086 bucket: telemetry # db name token: gnmic:gnmic # username:password batch-size: 1000 flush-timer: 10s ================================================ FILE: examples/deployments/1.single-instance/4.prometheus-output/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true log: true loader: type: docker filters: - containers: - label=clab-node-kind: nokia_srlinux label=containerlab: lab14 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics - /interface/description stream-mode: sample sample-interval: 10s encoding: ascii outputs: prom-output: type: prometheus listen: "clab-lab14-gnmic:9804" cache: {} debug: true strings-as-labels: true service-registration: address: clab-lab14-consul-agent:8500 event-processors: - group_by_interface processors: group_by_interface: event-group-by: tags: - interface_name ================================================ FILE: examples/deployments/1.single-instance/4.prometheus-output/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-lab14-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/1.single-instance/4.prometheus-output/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-lab14-consul-agent:8500 services: - prometheus-prom-output ================================================ FILE: examples/deployments/1.single-instance/4.prometheus-output/containerlab/prometheus.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab14 mgmt: ipv4-subnet: 192.168.1.0/24 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: spine1: spine2: leaf1: leaf2: leaf3: leaf4: gnmic: kind: linux image: ghcr.io/openconfig/gnmic binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: "--config /app/gnmic.yaml --log subscribe" consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: "agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0" prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: # spine1 links - endpoints: ["spine1:e1-1", "leaf1:e1-1"] - endpoints: ["spine1:e1-2", "leaf2:e1-1"] - endpoints: ["spine1:e1-3", "leaf3:e1-1"] - endpoints: ["spine1:e1-4", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-1", "leaf1:e1-2"] - endpoints: ["spine2:e1-2", "leaf2:e1-2"] - endpoints: ["spine2:e1-3", "leaf3:e1-2"] - endpoints: ["spine2:e1-4", "leaf4:e1-2"] ================================================ FILE: examples/deployments/1.single-instance/4.prometheus-output/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic1: image: ghcr.io/openconfig/gnmic:latest container_name: gnmic1 volumes: - ./gnmic1.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" networks: - gnmic-net ports: - 9804:9804 depends_on: - consul-agent consul-agent: image: hashicorp/consul:latest container_name: consul networks: - gnmic-net ports: - 8500:8500 - 8600:8600/udp command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0 prometheus: image: prom/prometheus:latest container_name: prometheus volumes: - ./prometheus/:/etc/prometheus/ - prometheus-data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yaml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/usr/share/prometheus/console_libraries' - '--web.console.templates=/usr/share/prometheus/consoles' - '--log.level=debug' ports: - 9090:9090 networks: - gnmic-net volumes: prometheus-data: ================================================ FILE: examples/deployments/1.single-instance/4.prometheus-output/docker-compose/gnmic1.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true targets: # Add targets configuration here # e.g: # 192.168.1.131:57400: # username: gnmic # password: secret_password subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: prom-output: type: prometheus listen: "gnmic1:9804" service-registration: address: consul-agent:8500 ================================================ FILE: examples/deployments/1.single-instance/4.prometheus-output/docker-compose/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: consul-agent:8500 services: - prometheus-prom-output ================================================ FILE: examples/deployments/1.single-instance/5.multiple-outputs/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab15 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s outputs: file-output: type: file filename: /app/file-out.txt prom-output: type: prometheus listen: "clab-lab15-gnmic:9804" service-registration: address: clab-lab15-consul-agent:8500 influxdb-output: type: influxdb url: http://clab-lab15-influxdb:8086 bucket: telemetry # db name token: gnmic:gnmic # username:password batch-size: 1000 flush-timer: 10s kafka-output: type: kafka address: clab-lab15-kafka-server:9092 topic: telemetry nats-output: type: nats address: clab-lab15-nats:4222 subject: telemetry ================================================ FILE: examples/deployments/1.single-instance/5.multiple-outputs/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 - name: InfluxDB orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-lab15-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true - name: InfluxDB type: influxdb orgId: 1 url: http://clab-lab15-influxdb:8086 user: gnmic password: gnmic database: telemetry editable: true ================================================ FILE: examples/deployments/1.single-instance/5.multiple-outputs/containerlab/multiple-outputs.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab15 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: spine1: spine2: leaf1: leaf2: leaf3: leaf4: gnmic: kind: linux # image: ghcr.io/openconfig/gnmic:latest image: priv-gnmic binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml --log subscribe' nats: kind: linux image: nats:latest ports: - 4222:4222 kafka-server: kind: linux image: bitnami/kafka:latest ports: - 9092:9092 env: KAFKA_CFG_ZOOKEEPER_CONNECT: clab-lab15-zookeeper-server:2181 KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://clab-lab15-kafka-server:9092 ALLOW_PLAINTEXT_LISTENER: "yes" JMX_PORT: 9000 zookeeper-server: kind: linux image: bitnami/zookeeper:latest ports: - 2181:2181 env: ALLOW_ANONYMOUS_LOGIN: "yes" influxdb: kind: linux image: influxdb:1.8.10 ports: - 8086:8086 env: INFLUXDB_DATA_ENGINE: tsm1 INFLUXDB_REPORTING_DISABLED: "false" INFLUXDB_USER: gnmic INFLUXDB_USER_PASSWORD: gnmic INFLUXDB_DB: telemetry consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: # spine1 links - endpoints: ["spine1:e1-1", "leaf1:e1-1"] - endpoints: ["spine1:e1-2", "leaf2:e1-1"] - endpoints: ["spine1:e1-3", "leaf3:e1-1"] - endpoints: ["spine1:e1-4", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-1", "leaf1:e1-2"] - endpoints: ["spine2:e1-2", "leaf2:e1-2"] - endpoints: ["spine2:e1-3", "leaf3:e1-2"] - endpoints: ["spine2:e1-4", "leaf4:e1-2"] ================================================ FILE: examples/deployments/1.single-instance/5.multiple-outputs/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-lab15-consul-agent:8500 services: - prometheus-prom-output ================================================ FILE: examples/deployments/1.single-instance/5.multiple-outputs/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic1: image: ghcr.io/openconfig/gnmic:latest container_name: gnmic1 volumes: - ./gnmic1.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" networks: - gnmic-net ports: - 9804:9804 depends_on: - consul-agent consul-agent: image: hashicorp/consul:latest container_name: consul networks: - gnmic-net ports: - 8500:8500 - 8600:8600/udp command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0 prometheus: image: prom/prometheus:latest container_name: prometheus volumes: - ./prometheus/:/etc/prometheus/ - prometheus-data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yaml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/usr/share/prometheus/console_libraries' - '--web.console.templates=/usr/share/prometheus/consoles' - '--log.level=debug' ports: - 9090:9090 networks: - gnmic-net nats: image: 'nats:latest' container_name: nats networks: - gnmic-net ports: - "4222:4222" - "6222:6222" - "8222:8222" kafka-server: image: 'bitnami/kafka:latest' container_name: kafka-server networks: - gnmic-net ports: - "9092:9092" - "9000:9000" environment: - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper-server:2181 - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka-server:9092 - ALLOW_PLAINTEXT_LISTENER=yes - JMX_PORT=9000 depends_on: - zookeeper-server zookeeper-server: image: 'bitnami/zookeeper:latest' container_name: zk-server networks: - gnmic-net ports: - "2181:2181" environment: - ALLOW_ANONYMOUS_LOGIN=yes influxdb: image: influxdb:1.8.10 container_name: influxdb networks: - gnmic-net ports: - "8083:8083" - "8086:8086" - "8090:8090" environment: - INFLUXDB_DATA_ENGINE=tsm1 - INFLUXDB_REPORTING_DISABLED=false - INFLUXDB_USER=gnmic - INFLUXDB_USER_PASSWORD=gnmic - INFLUXDB_DB=telemetry volumes: - influx-storage:/var/lib/influxdb volumes: prometheus-data: influx-storage: ================================================ FILE: examples/deployments/1.single-instance/5.multiple-outputs/docker-compose/gnmic1.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true targets: # Add targets configuration here # e.g: # 192.168.1.131:57400: # username: gnmic # password: secret_password subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: file-output: type: file filename: /app/file-out.txt prom-output: type: prometheus listen: "gnmic1:9804" service-registration: address: consul-agent:8500 influxdb-output: type: influxdb url: http://influxdb:8086 bucket: telemetry # db name token: gnmic:gnmic # username:password batch-size: 1000 flush-timer: 10s kafka-output: type: kafka address: kafka-server:9092 topic: telemetry nats-output: type: nats address: nats:4222 subject: telemetry ================================================ FILE: examples/deployments/1.single-instance/5.multiple-outputs/docker-compose/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: consul-agent:8500 services: - prometheus-prom-output ================================================ FILE: examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker filters: - containers: - label=clab-node-kind: nokia_srlinux label=containerlab: lab16 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s sub2: paths: - /interface/subinterface/statistics stream-mode: sample sample-interval: 10s outputs: prom-remote: type: prometheus_write url: http://clab-lab16-prometheus:9090/api/v1/write metadata: include: true ================================================ FILE: examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: prometheus orgId: 1 datasources: - name: prometheus type: prometheus orgId: 1 url: http://clab-lab16-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s ================================================ FILE: examples/deployments/1.single-instance/6.prometheus-write-output/containerlab/prometheus.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab16 mgmt: bridge: prom ipv4-subnet: 172.19.19.0/24 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: spine1: spine2: leaf1: leaf2: leaf3: leaf4: gnmic: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: "--config /app/gnmic.yaml --log subscribe" consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: "agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0" prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --web.enable-remote-write-receiver --log.level=debug grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro # - grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: # spine1 links - endpoints: ["spine1:e1-1", "leaf1:e1-1"] - endpoints: ["spine1:e1-2", "leaf2:e1-1"] - endpoints: ["spine1:e1-3", "leaf3:e1-1"] - endpoints: ["spine1:e1-4", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-1", "leaf1:e1-2"] - endpoints: ["spine2:e1-2", "leaf2:e1-2"] - endpoints: ["spine2:e1-3", "leaf3:e1-2"] - endpoints: ["spine2:e1-4", "leaf4:e1-2"] ================================================ FILE: examples/deployments/1.single-instance/7.cortex-output/containerlab/cortex/single-process-config-blocks.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 # Configuration for running Cortex in single-process mode. # This should not be used in production. It is only for getting started # and development. # Disable the requirement that every request to Cortex has a # X-Scope-OrgID header. `fake` will be substituted in instead. auth_enabled: false server: http_listen_port: 9009 # Configure the server to allow messages up to 100MB. grpc_server_max_recv_msg_size: 104857600 grpc_server_max_send_msg_size: 104857600 grpc_server_max_concurrent_streams: 1000 distributor: shard_by_all_labels: true pool: health_check_ingesters: true ingester_client: grpc_client_config: # Configure the client to allow messages up to 100MB. max_recv_msg_size: 104857600 max_send_msg_size: 104857600 grpc_compression: gzip ingester: lifecycler: # The address to advertise for this ingester. Will be autodiscovered by # looking up address on eth0 or en0; can be specified if this fails. # address: 127.0.0.1 # We want to start immediately and flush on shutdown. join_after: 0 min_ready_duration: 0s final_sleep: 0s num_tokens: 512 # Use an in memory ring store, so we don't need to launch a Consul. ring: kvstore: store: inmemory replication_factor: 1 storage: engine: blocks blocks_storage: tsdb: dir: /tmp/cortex/tsdb bucket_store: sync_dir: /tmp/cortex/tsdb-sync # You can choose between local storage and Amazon S3, Google GCS and Azure storage. Each option requires additional configuration # as shown below. All options can be configured via flags as well which might be handy for secret inputs. backend: filesystem # s3, gcs, azure or filesystem are valid options # s3: # bucket_name: cortex # endpoint: s3.amazonaws.com # Configure your S3 credentials below. # secret_access_key: "TODO" # access_key_id: "TODO" # gcs: # bucket_name: cortex # service_account: # if empty or omitted Cortex will use your default service account as per Google's fallback logic # azure: # account_name: # account_key: # container_name: # endpoint_suffix: # max_retries: # Number of retries for recoverable errors (defaults to 20) filesystem: dir: ./data/tsdb compactor: data_dir: /tmp/cortex/compactor sharding_ring: kvstore: store: inmemory frontend_worker: match_max_concurrent: true ruler: enable_api: true enable_sharding: false ruler_storage: backend: local local: directory: /tmp/cortex/rules ================================================ FILE: examples/deployments/1.single-instance/7.cortex-output/containerlab/cortexmetrics.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab17 mgmt: bridge: cortex ipv4-subnet: 172.19.19.0/24 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: spine1: spine2: leaf1: leaf2: leaf3: leaf4: gnmic: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: "--config /app/gnmic.yaml --log subscribe" cortex: kind: linux image: quay.io/cortexproject/cortex:v1.18.1 ports: - 9009:9009 binds: - ./cortex/single-process-config-blocks.yaml:/etc/single-process-config-blocks.yaml:ro cmd: | -config.file=/etc/single-process-config-blocks.yaml grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro # - grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: # spine1 links - endpoints: ["spine1:e1-1", "leaf1:e1-1"] - endpoints: ["spine1:e1-2", "leaf2:e1-1"] - endpoints: ["spine1:e1-3", "leaf3:e1-1"] - endpoints: ["spine1:e1-4", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-1", "leaf1:e1-2"] - endpoints: ["spine2:e1-2", "leaf2:e1-2"] - endpoints: ["spine2:e1-3", "leaf3:e1-2"] - endpoints: ["spine2:e1-4", "leaf4:e1-2"] ================================================ FILE: examples/deployments/1.single-instance/7.cortex-output/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab17 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s sub2: paths: - /interface/subinterface/statistics stream-mode: sample sample-interval: 10s outputs: cortex: type: prometheus_write url: http://clab-lab17-cortex:9009/api/v1/push ================================================ FILE: examples/deployments/1.single-instance/7.cortex-output/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Cortex orgId: 1 datasources: - name: Cortex type: prometheus orgId: 1 url: http://clab-lab17-cortex:9009/prometheus password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/1.single-instance/8.victoria-metrics-output/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab18 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s sub2: paths: - /interface/subinterface/statistics stream-mode: sample sample-interval: 10s outputs: cortex: type: prometheus_write url: http://clab-lab18-victoria:8428/api/v1/write ================================================ FILE: examples/deployments/1.single-instance/8.victoria-metrics-output/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: victoria orgId: 1 datasources: - name: victoria type: prometheus orgId: 1 url: http://clab-lab18-victoria:8428 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/1.single-instance/8.victoria-metrics-output/containerlab/victoriametrics.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab18 mgmt: bridge: victoria ipv4-subnet: 172.19.19.0/24 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: spine1: spine2: leaf1: leaf2: leaf3: leaf4: gnmic: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: "--config /app/gnmic.yaml --log subscribe" victoria: kind: linux image: victoriametrics/victoria-metrics:latest ports: - 8428:8428 grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro # - grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: # spine1 links - endpoints: ["spine1:e1-1", "leaf1:e1-1"] - endpoints: ["spine1:e1-2", "leaf2:e1-1"] - endpoints: ["spine1:e1-3", "leaf3:e1-1"] - endpoints: ["spine1:e1-4", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-1", "leaf1:e1-2"] - endpoints: ["spine2:e1-2", "leaf2:e1-2"] - endpoints: ["spine2:e1-3", "leaf3:e1-2"] - endpoints: ["spine2:e1-4", "leaf4:e1-2"] ================================================ FILE: examples/deployments/1.single-instance/9.jetstream-output/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: ascii log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab19 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s outputs: js-output: type: jetstream address: clab-lab19-nats:4222 subject-format: subscription.target.pathKeys format: proto stream: gnmic write-timeout: 10s num-workers: 2 create-stream: {} debug: true ================================================ FILE: examples/deployments/1.single-instance/9.jetstream-output/containerlab/jetstream.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab19 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: spine1: spine2: leaf1: leaf2: leaf3: leaf4: gnmic: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml --log subscribe' nats: kind: linux image: nats:latest ports: - 4222:4222 - 6222:6222 - 8222:8222 cmd: '--http_port 8222 -js -D' links: # spine1 links - endpoints: ["spine1:e1-1", "leaf1:e1-1"] - endpoints: ["spine1:e1-2", "leaf2:e1-1"] - endpoints: ["spine1:e1-3", "leaf3:e1-1"] - endpoints: ["spine1:e1-4", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-1", "leaf1:e1-2"] - endpoints: ["spine2:e1-2", "leaf2:e1-2"] - endpoints: ["spine2:e1-3", "leaf3:e1-2"] - endpoints: ["spine2:e1-4", "leaf4:e1-2"] ================================================ FILE: examples/deployments/2.clusters/1.influxdb-output/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab21 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s # clustering config clustering: cluster-name: cluster1 targets-watch-timer: 30s locker: type: consul address: clab-lab21-consul-agent:8500 outputs: influxdb-output: type: influxdb url: http://clab-lab21-influxdb:8086 bucket: telemetry # db name token: gnmic:gnmic # username:password batch-size: 1000 flush-timer: 10s ================================================ FILE: examples/deployments/2.clusters/1.influxdb-output/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: InfluxDB orgId: 1 datasources: - name: InfluxDB type: influxdb orgId: 1 url: http://clab-lab21-influxdb:8086 user: gnmic password: gnmic database: telemetry editable: true ================================================ FILE: examples/deployments/2.clusters/1.influxdb-output/containerlab/lab21.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab21 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: super-spine1: super-spine2: spine1: spine2: spine3: spine4: leaf1: leaf2: leaf3: leaf4: leaf5: leaf6: leaf7: leaf8: gnmic1: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7890:7890 env: GNMIC_API: :7890 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab21-gnmic1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab21-gnmic1 cmd: '--config /app/gnmic.yaml subscribe' gnmic2: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7891:7891 env: GNMIC_API: :7891 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab21-gnmic2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab21-gnmic2 cmd: '--config /app/gnmic.yaml subscribe' gnmic3: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7892:7892 env: GNMIC_API: :7892 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab21-gnmic3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab21-gnmic3 cmd: '--config /app/gnmic.yaml subscribe' consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' influxdb: kind: linux image: influxdb:1.8.10 ports: - 8086:8086 env: INFLUXDB_DATA_ENGINE: tsm1 INFLUXDB_REPORTING_DISABLED: "false" INFLUXDB_USER: gnmic INFLUXDB_USER_PASSWORD: gnmic INFLUXDB_DB: telemetry grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: # super-spine1 links - endpoints: ["super-spine1:e1-1", "spine1:e1-1"] - endpoints: ["super-spine1:e1-2", "spine2:e1-1"] - endpoints: ["super-spine1:e1-3", "spine3:e1-1"] - endpoints: ["super-spine1:e1-4", "spine4:e1-1"] # super-spine2 links - endpoints: ["super-spine2:e1-1", "spine1:e1-2"] - endpoints: ["super-spine2:e1-2", "spine2:e1-2"] - endpoints: ["super-spine2:e1-3", "spine3:e1-2"] - endpoints: ["super-spine2:e1-4", "spine4:e1-2"] # spine1 links - endpoints: ["spine1:e1-3", "leaf1:e1-1"] - endpoints: ["spine1:e1-4", "leaf2:e1-1"] - endpoints: ["spine1:e1-5", "leaf3:e1-1"] - endpoints: ["spine1:e1-6", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-3", "leaf1:e1-2"] - endpoints: ["spine2:e1-4", "leaf2:e1-2"] - endpoints: ["spine2:e1-5", "leaf3:e1-2"] - endpoints: ["spine2:e1-6", "leaf4:e1-2"] # spine3 links - endpoints: ["spine3:e1-3", "leaf5:e1-1"] - endpoints: ["spine3:e1-4", "leaf6:e1-1"] - endpoints: ["spine3:e1-5", "leaf7:e1-1"] - endpoints: ["spine3:e1-6", "leaf8:e1-1"] # spine4 links - endpoints: ["spine4:e1-3", "leaf5:e1-2"] - endpoints: ["spine4:e1-4", "leaf6:e1-2"] - endpoints: ["spine4:e1-5", "leaf7:e1-2"] - endpoints: ["spine4:e1-6", "leaf8:e1-2"] ================================================ FILE: examples/deployments/2.clusters/1.influxdb-output/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic1: &gnmic image: ghcr.io/openconfig/gnmic:latest container_name: gnmic1 volumes: - ./gnmic.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" environment: - GNMIC_API=:7890 - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic1 - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic1 networks: - gnmic-net ports: - 7890:7890 depends_on: - consul-agent - influxdb gnmic2: <<: *gnmic container_name: gnmic2 environment: - GNMIC_API=:7891 - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic2 - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic2 ports: - 7891:7891 gnmic3: <<: *gnmic container_name: gnmic3 environment: - GNMIC_API=:7892 - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic3 - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic3 ports: - 7892:7892 consul-agent: image: hashicorp/consul:latest container_name: consul networks: - gnmic-net ports: - 8500:8500 - 8600:8600/udp command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0 influxdb: image: influxdb:1.8.10 container_name: influxdb networks: - gnmic-net ports: - "8083:8083" - "8086:8086" - "8090:8090" environment: - INFLUXDB_DATA_ENGINE=tsm1 - INFLUXDB_REPORTING_DISABLED=false - INFLUXDB_USER=gnmic - INFLUXDB_USER_PASSWORD=gnmic - INFLUXDB_DB=telemetry volumes: - influx-storage:/var/lib/influxdb volumes: influx-storage: ================================================ FILE: examples/deployments/2.clusters/1.influxdb-output/docker-compose/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true # clustering config clustering: cluster-name: cluster1 targets-watch-timer: 30s locker: type: consul address: consul-agent:8500 targets: # Add targets configuration here # eg: # 192.168.1.131:57400: # username: gnmic # password: secret_password subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: influxdb-output: type: influxdb url: http://influxdb:8086 bucket: telemetry # db name token: gnmic:gnmic # username:password batch-size: 1000 flush-timer: 10s ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab22 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s api-server: enable-metrics: true # clustering config clustering: cluster-name: cluster2 targets-watch-timer: 30s locker: type: consul address: clab-lab22-consul-agent:8500 outputs: output1: type: prometheus service-registration: address: clab-lab22-consul-agent:8500 ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-lab22-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/containerlab/lab22.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab22 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: super-spine1: super-spine2: spine1: spine2: spine3: spine4: leaf1: leaf2: leaf3: leaf4: leaf5: leaf6: leaf7: leaf8: gnmic1: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7890:7890 - 9804:9804 env: GNMIC_API: :7890 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab22-gnmic1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab22-gnmic1 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab22-gnmic1:9804 cmd: '--config /app/gnmic.yaml subscribe' gnmic2: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7891:7891 - 9805:9805 env: GNMIC_API: :7891 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab22-gnmic2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab22-gnmic2 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab22-gnmic2:9805 cmd: '--config /app/gnmic.yaml subscribe' gnmic3: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7892:7892 - 9806:9806 env: GNMIC_API: :7892 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab22-gnmic3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab22-gnmic3 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab22-gnmic3:9806 cmd: '--config /app/gnmic.yaml subscribe' consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: # super-spine1 links - endpoints: ["super-spine1:e1-1", "spine1:e1-1"] - endpoints: ["super-spine1:e1-2", "spine2:e1-1"] - endpoints: ["super-spine1:e1-3", "spine3:e1-1"] - endpoints: ["super-spine1:e1-4", "spine4:e1-1"] # super-spine2 links - endpoints: ["super-spine2:e1-1", "spine1:e1-2"] - endpoints: ["super-spine2:e1-2", "spine2:e1-2"] - endpoints: ["super-spine2:e1-3", "spine3:e1-2"] - endpoints: ["super-spine2:e1-4", "spine4:e1-2"] # spine1 links - endpoints: ["spine1:e1-3", "leaf1:e1-1"] - endpoints: ["spine1:e1-4", "leaf2:e1-1"] - endpoints: ["spine1:e1-5", "leaf3:e1-1"] - endpoints: ["spine1:e1-6", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-3", "leaf1:e1-2"] - endpoints: ["spine2:e1-4", "leaf2:e1-2"] - endpoints: ["spine2:e1-5", "leaf3:e1-2"] - endpoints: ["spine2:e1-6", "leaf4:e1-2"] # spine3 links - endpoints: ["spine3:e1-3", "leaf5:e1-1"] - endpoints: ["spine3:e1-4", "leaf6:e1-1"] - endpoints: ["spine3:e1-5", "leaf7:e1-1"] - endpoints: ["spine3:e1-6", "leaf8:e1-1"] # spine4 links - endpoints: ["spine4:e1-3", "leaf5:e1-2"] - endpoints: ["spine4:e1-4", "leaf6:e1-2"] - endpoints: ["spine4:e1-5", "leaf7:e1-2"] - endpoints: ["spine4:e1-6", "leaf8:e1-2"] ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-lab22-consul-agent:8500 services: - prometheus-output1 - cluster2-gnmic-api ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic1: &gnmic image: ghcr.io/openconfig/gnmic:latest container_name: gnmic1 volumes: - ./gnmic.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" environment: - GNMIC_API=:7890 - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic1 - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic1 - GNMIC_OUTPUTS_OUTPUT1_LISTEN=gnmic1:9804 networks: - gnmic-net ports: - 7890:7890 - 9804:9804 depends_on: - consul-agent gnmic2: <<: *gnmic container_name: gnmic2 environment: - GNMIC_API=:7891 - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic2 - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic2 - GNMIC_OUTPUTS_OUTPUT1_LISTEN=gnmic2:9805 ports: - 7891:7891 - 9805:9805 gnmic3: <<: *gnmic container_name: gnmic3 environment: - GNMIC_API=:7892 - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic3 - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic3 - GNMIC_OUTPUTS_OUTPUT1_LISTEN=gnmic3:9806 ports: - 7892:7892 - 9806:9806 consul-agent: image: hashicorp/consul:latest container_name: consul networks: - gnmic-net ports: - 8500:8500 - 8600:8600/udp command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0 prometheus: image: prom/prometheus:latest container_name: prometheus volumes: - ./prometheus/:/etc/prometheus/ - prometheus-data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yaml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/usr/share/prometheus/console_libraries' - '--web.console.templates=/usr/share/prometheus/consoles' - '--log.level=debug' ports: - 9090:9090 networks: - gnmic-net volumes: prometheus-data: ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/docker-compose/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true # clustering config clustering: cluster-name: cluster1 targets-watch-timer: 30s locker: type: consul address: consul-agent:8500 targets: # Add targets configuration here # e.g: # 192.168.1.131:57400: # username: gnmic # password: secret_password subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: output1: type: prometheus service-registration: address: consul-agent:8500 ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/docker-compose/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: consul-agent:8500 services: - prometheus-output1 - cluster2-gnmic-api ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/kubernetes/consul/deployment.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: apps/v1 kind: Deployment metadata: name: consul-deploy labels: app: consul spec: replicas: 1 selector: matchLabels: app: consul template: metadata: labels: app: consul spec: containers: - args: - agent - -server - -ui - -node=server-1 - -bootstrap-expect=1 - -client=0.0.0.0 image: consul imagePullPolicy: IfNotPresent name: consul ports: - containerPort: 8500 name: consul protocol: TCP resources: limits: cpu: 100m memory: 256Mi requests: cpu: 50m memory: 128Mi ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/kubernetes/consul/service.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: v1 kind: Service metadata: name: consul-svc spec: ports: - name: http port: 8500 protocol: TCP selector: app: consul ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/kubernetes/gnmic-app/configmap.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: v1 kind: ConfigMap metadata: name: gnmic-config data: config.yaml: | insecure: true encoding: json_ietf log: true clustering: cluster-name: cluster1 targets-watch-timer: 30s locker: type: consul address: consul-svc:8500 targets: # Add targets configuration here # e.g: # 192.168.1.131:57400: # username: gnmic # password: secret_password subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: output1: type: prometheus ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/kubernetes/gnmic-app/secret.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: v1 kind: Secret metadata: name: gnmic-login type: Opaque stringData: GNMIC_PASSWORD: NokiaSrl1! GNMIC_USERNAME: admin ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/kubernetes/gnmic-app/service.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: v1 kind: Service metadata: name: gnmic-svc labels: app: gnmic spec: ports: - name: http port: 9804 protocol: TCP targetPort: 9804 selector: app: gnmic clusterIP: None ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/kubernetes/gnmic-app/statefulset.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: apps/v1 kind: StatefulSet metadata: name: gnmic-ss labels: app: gnmic spec: replicas: 3 selector: matchLabels: app: gnmic serviceName: gnmic-svc template: metadata: labels: app: gnmic spec: containers: - args: - subscribe - --config - /app/config.yaml image: ghcr.io/openconfig/gnmic imagePullPolicy: IfNotPresent name: gnmic securityContext: allowPrivilegeEscalation: false capabilities: drop: - all readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000 ports: - containerPort: 9804 name: prom-output protocol: TCP - containerPort: 7890 name: gnmic-api protocol: TCP resources: limits: cpu: 100m memory: 400Mi requests: cpu: 50m memory: 200Mi envFrom: - secretRef: name: gnmic-login env: - name: GNMIC_API value: :7890 - name: GNMIC_CLUSTERING_INSTANCE_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: GNMIC_CLUSTERING_SERVICE_ADDRESS value: "$(GNMIC_CLUSTERING_INSTANCE_NAME).gnmic-svc.gnmic.svc.cluster.local" - name: GNMIC_OUTPUTS_OUTPUT1_LISTEN value: "$(GNMIC_CLUSTERING_INSTANCE_NAME).gnmic-svc.gnmic.svc.cluster.local:9804" volumeMounts: - mountPath: /app/config.yaml name: config subPath: config.yaml volumes: - configMap: defaultMode: 420 name: gnmic-config name: config ================================================ FILE: examples/deployments/2.clusters/2.prometheus-output/kubernetes/prometheus/servicemonitor.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: gnmic-sm labels: app: gnmic spec: selector: matchLabels: app: gnmic namespaceSelector: matchNames: - gnmic endpoints: - port: http path: /metrics ================================================ FILE: examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true api-server: enable-metrics: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab23 config: outputs: - nats-output subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s # clustering config clustering: cluster-name: cluster2 targets-watch-timer: 30s locker: type: consul address: clab-lab23-consul-agent:8500 inputs: nats-input: type: nats address: clab-lab23-nats:4222 subject: telemetry outputs: - output1 outputs: nats-output: type: nats address: clab-lab23-nats:4222 subject: telemetry output1: type: prometheus service-registration: address: clab-lab23-consul-agent:8500 use-lock: true ================================================ FILE: examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-lab23-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/lab23.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab23 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: super-spine1: super-spine2: spine1: spine2: spine3: spine4: leaf1: leaf2: leaf3: leaf4: leaf5: leaf6: leaf7: leaf8: gnmic1: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic-config.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7890:7890 - 9804:9804 env: GNMIC_API: :7890 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab23-gnmic1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab23-gnmic1 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab23-gnmic1:9804 cmd: '--config /app/gnmic-config.yaml subscribe' gnmic2: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic-config.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7891:7891 - 9805:9805 env: GNMIC_API: :7891 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab23-gnmic2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab23-gnmic2 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab23-gnmic2:9805 cmd: '--config /app/gnmic-config.yaml subscribe' gnmic3: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic-config.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7892:7892 - 9806:9806 env: GNMIC_API: :7892 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab23-gnmic3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab23-gnmic3 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab23-gnmic3:9806 cmd: '--config /app/gnmic-config.yaml subscribe' consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug nats: kind: linux image: nats:latest ports: - 4222:4222 grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: # super-spine1 links - endpoints: ["super-spine1:e1-1", "spine1:e1-1"] - endpoints: ["super-spine1:e1-2", "spine2:e1-1"] - endpoints: ["super-spine1:e1-3", "spine3:e1-1"] - endpoints: ["super-spine1:e1-4", "spine4:e1-1"] # super-spine2 links - endpoints: ["super-spine2:e1-1", "spine1:e1-2"] - endpoints: ["super-spine2:e1-2", "spine2:e1-2"] - endpoints: ["super-spine2:e1-3", "spine3:e1-2"] - endpoints: ["super-spine2:e1-4", "spine4:e1-2"] # spine1 links - endpoints: ["spine1:e1-3", "leaf1:e1-1"] - endpoints: ["spine1:e1-4", "leaf2:e1-1"] - endpoints: ["spine1:e1-5", "leaf3:e1-1"] - endpoints: ["spine1:e1-6", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-3", "leaf1:e1-2"] - endpoints: ["spine2:e1-4", "leaf2:e1-2"] - endpoints: ["spine2:e1-5", "leaf3:e1-2"] - endpoints: ["spine2:e1-6", "leaf4:e1-2"] # spine3 links - endpoints: ["spine3:e1-3", "leaf5:e1-1"] - endpoints: ["spine3:e1-4", "leaf6:e1-1"] - endpoints: ["spine3:e1-5", "leaf7:e1-1"] - endpoints: ["spine3:e1-6", "leaf8:e1-1"] # spine4 links - endpoints: ["spine4:e1-3", "leaf5:e1-2"] - endpoints: ["spine4:e1-4", "leaf6:e1-2"] - endpoints: ["spine4:e1-5", "leaf7:e1-2"] - endpoints: ["spine4:e1-6", "leaf8:e1-2"] ================================================ FILE: examples/deployments/2.clusters/3.nats-input-prometheus-output/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-lab23-consul-agent:8500 services: - prometheus-output1 - cluster2-gnmic-api ================================================ FILE: examples/deployments/2.clusters/3.nats-input-prometheus-output/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic1: &gnmic image: ghcr.io/openconfig/gnmic:latest container_name: gnmic1 volumes: - ./gnmic.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" environment: - GNMIC_API=:7890 - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic1 - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic1 - GNMIC_OUTPUTS_OUTPUT1_LISTEN=gnmic1:9804 networks: - gnmic-net ports: - 7890:7890 - 9804:9804 depends_on: - consul-agent - nats gnmic2: <<: *gnmic container_name: gnmic2 environment: - GNMIC_API=:7891 - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic2 - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic2 - GNMIC_OUTPUTS_OUTPUT1_LISTEN=gnmic2:9805 ports: - 7891:7891 - 9805:9805 gnmic3: <<: *gnmic container_name: gnmic3 environment: - GNMIC_API=:7892 - GNMIC_CLUSTERING_INSTANCE_NAME=gnmic3 - GNMIC_CLUSTERING_SERVICE_ADDRESS=gnmic3 - GNMIC_OUTPUTS_OUTPUT1_LISTEN=gnmic3:9806 ports: - 7892:7892 - 9806:9806 consul-agent: image: hashicorp/consul:latest container_name: consul networks: - gnmic-net ports: - 8500:8500 - 8600:8600/udp command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0 prometheus: image: prom/prometheus:latest container_name: prometheus volumes: - ./prometheus/:/etc/prometheus/ - prometheus-data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yaml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/usr/share/prometheus/console_libraries' - '--web.console.templates=/usr/share/prometheus/consoles' - '--log.level=debug' ports: - 9090:9090 networks: - gnmic-net nats: image: 'nats:latest' container_name: nats networks: - gnmic-net ports: - "4222:4222" - "6222:6222" - "8222:8222" volumes: prometheus-data: ================================================ FILE: examples/deployments/2.clusters/3.nats-input-prometheus-output/docker-compose/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true # clustering config clustering: cluster-name: cluster1 targets-watch-timer: 30s locker: type: consul address: consul-agent:8500 targets: # Add targets configuration here # e.g: # 192.168.1.131:57400: # username: gnmic # password: secret_password # outputs: # - nats-output subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s inputs: nats-input: type: nats address: nats:4222 subject: telemetry outputs: - output1 outputs: nats-output: type: nats address: nats:4222 subject: telemetry output1: type: prometheus service-registration: address: consul-agent:8500 use-lock: true ================================================ FILE: examples/deployments/2.clusters/3.nats-input-prometheus-output/docker-compose/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: consul-agent:8500 services: - prometheus-output1 - cluster2-gnmic-api ================================================ FILE: examples/deployments/2.clusters/4.gnmi-server/containerlab/gnmi-server.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab24 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: super-spine1: super-spine2: spine1: spine2: spine3: spine4: leaf1: leaf2: leaf3: leaf4: leaf5: leaf6: leaf7: leaf8: gnmic1: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7890:7890 env: GNMIC_API: :7890 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab24-gnmic1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab24-gnmic1 GNMIC_GNMI_SERVER_ADDRESS: clab-lab24-gnmic1:57400 GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-lab24-gnmic1 gnmic2: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7891:7891 env: GNMIC_API: :7891 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab24-gnmic2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab24-gnmic2 GNMIC_GNMI_SERVER_ADDRESS: clab-lab24-gnmic2:57400 GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-lab24-gnmic2 gnmic3: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7892:7892 env: GNMIC_API: :7892 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab24-gnmic3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab24-gnmic3 GNMIC_GNMI_SERVER_ADDRESS: clab-lab24-gnmic3:57400 GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-lab24-gnmic3 agg-gnmic: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic-agg.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7893:7893 - 9804:9804 env: GNMIC_API: :7893 consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro - grafana/dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:ro - grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: # super-spine1 links - endpoints: ["super-spine1:e1-1", "spine1:e1-1"] - endpoints: ["super-spine1:e1-2", "spine2:e1-1"] - endpoints: ["super-spine1:e1-3", "spine3:e1-1"] - endpoints: ["super-spine1:e1-4", "spine4:e1-1"] # super-spine2 links - endpoints: ["super-spine2:e1-1", "spine1:e1-2"] - endpoints: ["super-spine2:e1-2", "spine2:e1-2"] - endpoints: ["super-spine2:e1-3", "spine3:e1-2"] - endpoints: ["super-spine2:e1-4", "spine4:e1-2"] # spine1 links - endpoints: ["spine1:e1-3", "leaf1:e1-1"] - endpoints: ["spine1:e1-4", "leaf2:e1-1"] - endpoints: ["spine1:e1-5", "leaf3:e1-1"] - endpoints: ["spine1:e1-6", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-3", "leaf1:e1-2"] - endpoints: ["spine2:e1-4", "leaf2:e1-2"] - endpoints: ["spine2:e1-5", "leaf3:e1-2"] - endpoints: ["spine2:e1-6", "leaf4:e1-2"] # spine3 links - endpoints: ["spine3:e1-3", "leaf5:e1-1"] - endpoints: ["spine3:e1-4", "leaf6:e1-1"] - endpoints: ["spine3:e1-5", "leaf7:e1-1"] - endpoints: ["spine3:e1-6", "leaf8:e1-1"] # spine4 links - endpoints: ["spine4:e1-3", "leaf5:e1-2"] - endpoints: ["spine4:e1-4", "leaf6:e1-2"] - endpoints: ["spine4:e1-5", "leaf7:e1-2"] - endpoints: ["spine4:e1-6", "leaf8:e1-2"] ================================================ FILE: examples/deployments/2.clusters/4.gnmi-server/containerlab/gnmic-agg.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 log: true skip-verify: true loader: type: consul address: clab-lab24-consul-agent:8500 debug: true services: - name: cluster2-gnmi-server config: insecure: true subscriptions: cluster2: paths: - / stream-mode: on-change api-server: enable-metrics: true outputs: output1: type: prometheus listen: clab-lab24-agg-gnmic:9804 service-registration: address: clab-lab24-consul-agent:8500 ================================================ FILE: examples/deployments/2.clusters/4.gnmi-server/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab24 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s api-server: enable-metrics: true # clustering config clustering: cluster-name: cluster2 targets-watch-timer: 30s locker: type: consul address: clab-lab24-consul-agent:8500 gnmi-server: enable-metrics: true service-registration: address: clab-lab24-consul-agent:8500 outputs: out1: type: file filename: /dev/null ================================================ FILE: examples/deployments/2.clusters/4.gnmi-server/containerlab/grafana/dashboards/gNMIc/gnmic_compute_metrics.json ================================================ { "annotations": { "list": [ { "builtIn": 1, "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "type": "dashboard" } ] }, "editable": true, "gnetId": null, "graphTooltip": 0, "id": 2, "links": [], "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 0 }, "hiddenSeries": false, "id": 16, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "process_open_fds", "interval": "", "legendFormat": "{{instance}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Open File Descriptors (#)", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 0 }, "hiddenSeries": false, "id": 4, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "go_goroutines", "interval": "", "legendFormat": "{{instance}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Go Routines (#)", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 10 }, "hiddenSeries": false, "id": 14, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "go_memstats_stack_inuse_bytes/1000000", "interval": "", "legendFormat": "{{instance}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Memory Stack In Use (MB)", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 10 }, "hiddenSeries": false, "id": 6, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "go_memstats_alloc_bytes/1000000", "interval": "", "legendFormat": "{{instance}} mem alloc", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Memory Alloc (MB)", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 20 }, "hiddenSeries": false, "id": 12, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "rate(go_memstats_mallocs_total[1m])/1000000", "interval": "", "legendFormat": "", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Memory malloc MB/s", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 20 }, "hiddenSeries": false, "id": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "go_gc_duration_seconds*1000", "interval": "", "legendFormat": "{{instance}} quantile={{quantile}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Go GC duration (ms)", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 30 }, "hiddenSeries": false, "id": 10, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "go_memstats_heap_inuse_bytes/1000000", "interval": "", "legendFormat": "{{instance}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Memory Heap inUse (MB)", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 40 }, "hiddenSeries": false, "id": 8, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "rate(go_memstats_alloc_bytes_total[1m])/1000000", "interval": "", "legendFormat": "{{instance}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Memory alloc MB/s ", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "refresh": "10s", "schemaVersion": 26, "style": "dark", "tags": [], "templating": { "list": [] }, "time": { "from": "now-6h", "to": "now" }, "timepicker": {}, "timezone": "", "title": "gNMIc Compute metrics", "uid": "EYxvhi77k", "version": 13 } ================================================ FILE: examples/deployments/2.clusters/4.gnmi-server/containerlab/grafana/dashboards/gNMIc/gnmic_grpc_metrics.json ================================================ { "annotations": { "list": [ { "builtIn": 1, "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "type": "dashboard" } ] }, "editable": true, "gnetId": null, "graphTooltip": 0, "id": 1, "links": [], "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 0 }, "hiddenSeries": false, "id": 8, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "grpc_server_started_total", "interval": "", "legendFormat": "{{instance}} {{grpc_method}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "gRPC Server Started", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 0 }, "hiddenSeries": false, "id": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "rate(grpc_client_msg_received_total[1m])", "interval": "", "legendFormat": "{{instance}} {{grpc_method}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "gRPC Client Msg Rcv/second", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 10 }, "hiddenSeries": false, "id": 6, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "rate(grpc_client_msg_sent_total[1m])", "interval": "", "legendFormat": "{{instance}} {{grpc_method}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "gRPC Client Msg Sent/s", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fieldConfig": { "defaults": { "custom": {} }, "overrides": [] }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 10 }, "hiddenSeries": false, "id": 4, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "grpc_client_started_total", "interval": "", "legendFormat": "{{instance}} {{grpc_method}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "gRPC Client started", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "schemaVersion": 26, "style": "dark", "tags": [], "templating": { "list": [] }, "time": { "from": "now-6h", "to": "now" }, "timepicker": {}, "timezone": "", "title": "gNMIc gRPC metrics", "uid": "9W_Qzi7nz", "version": 6 } ================================================ FILE: examples/deployments/2.clusters/4.gnmi-server/containerlab/grafana/dashboards.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 providers: - name: 'gNMIc Internal Metrics' orgId: 1 folder: '' type: file disableDeletion: false editable: true options: path: /var/lib/grafana/dashboards foldersFromFilesStructure: true ================================================ FILE: examples/deployments/2.clusters/4.gnmi-server/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-lab24-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/2.clusters/4.gnmi-server/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-lab24-consul-agent:8500 services: - prometheus-output1 - cluster2-gnmic-api ================================================ FILE: examples/deployments/2.clusters/5.shared-cache/jetstream/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: ascii log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab25-1 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface[name=*]/statistics stream-mode: sample sample-interval: 30s sub2: paths: - /interface[name=*]/admin-state stream-mode: on-change sub3: paths: - /interface[name=*]/oper-state stream-mode: on-change api-server: enable-metrics: true # clustering config clustering: cluster-name: cluster2 targets-watch-timer: 30s locker: type: consul address: clab-lab25-1-consul-agent:8500 gnmi-server: enable-metrics: true service-registration: address: clab-lab25-1-consul-agent:8500 cache: type: jetstream address: clab-lab25-1-nats:4222 debug: true outputs: output1: type: prometheus service-registration: address: clab-lab25-1-consul-agent:8500 ================================================ FILE: examples/deployments/2.clusters/5.shared-cache/jetstream/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-lab25-1-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/2.clusters/5.shared-cache/jetstream/containerlab/lab25-1.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab25-1 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: super-spine1: super-spine2: spine1: spine2: spine3: spine4: leaf1: leaf2: leaf3: leaf4: leaf5: leaf6: leaf7: leaf8: gnmic1: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7890:7890 - 9804:9804 env: GNMIC_API: :7890 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-1-gnmic1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-1-gnmic1 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-1-gnmic1:9804 cmd: '--config /app/gnmic.yaml subscribe' gnmic2: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7891:7891 - 9805:9805 env: GNMIC_API: :7891 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-1-gnmic2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-1-gnmic2 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-1-gnmic2:9805 cmd: '--config /app/gnmic.yaml subscribe' gnmic3: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7892:7892 - 9806:9806 env: GNMIC_API: :7892 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-1-gnmic3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-1-gnmic3 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-1-gnmic3:9806 cmd: '--config /app/gnmic.yaml subscribe' consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 nats: kind: linux image: nats:latest ports: - 4222:4222 - 6222:6222 - 8222:8222 cmd: '--http_port 8222 -js -D' links: # super-spine1 links - endpoints: ["super-spine1:e1-1", "spine1:e1-1"] - endpoints: ["super-spine1:e1-2", "spine2:e1-1"] - endpoints: ["super-spine1:e1-3", "spine3:e1-1"] - endpoints: ["super-spine1:e1-4", "spine4:e1-1"] # super-spine2 links - endpoints: ["super-spine2:e1-1", "spine1:e1-2"] - endpoints: ["super-spine2:e1-2", "spine2:e1-2"] - endpoints: ["super-spine2:e1-3", "spine3:e1-2"] - endpoints: ["super-spine2:e1-4", "spine4:e1-2"] # spine1 links - endpoints: ["spine1:e1-3", "leaf1:e1-1"] - endpoints: ["spine1:e1-4", "leaf2:e1-1"] - endpoints: ["spine1:e1-5", "leaf3:e1-1"] - endpoints: ["spine1:e1-6", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-3", "leaf1:e1-2"] - endpoints: ["spine2:e1-4", "leaf2:e1-2"] - endpoints: ["spine2:e1-5", "leaf3:e1-2"] - endpoints: ["spine2:e1-6", "leaf4:e1-2"] # spine3 links - endpoints: ["spine3:e1-3", "leaf5:e1-1"] - endpoints: ["spine3:e1-4", "leaf6:e1-1"] - endpoints: ["spine3:e1-5", "leaf7:e1-1"] - endpoints: ["spine3:e1-6", "leaf8:e1-1"] # spine4 links - endpoints: ["spine4:e1-3", "leaf5:e1-2"] - endpoints: ["spine4:e1-4", "leaf6:e1-2"] - endpoints: ["spine4:e1-5", "leaf7:e1-2"] - endpoints: ["spine4:e1-6", "leaf8:e1-2"] ================================================ FILE: examples/deployments/2.clusters/5.shared-cache/jetstream/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-lab25-1-consul-agent:8500 services: - cluster2-gnmic-api - prometheus-output1 ================================================ FILE: examples/deployments/2.clusters/5.shared-cache/nats/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: ascii log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab25-2 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface[name=*]/statistics stream-mode: sample sample-interval: 30s sub2: paths: - /interface[name=*]/admin-state stream-mode: on-change sub3: paths: - /interface[name=*]/oper-state stream-mode: on-change api-server: enable-metrics: true # clustering config clustering: cluster-name: cluster2 targets-watch-timer: 30s locker: type: consul address: clab-lab25-2-consul-agent:8500 gnmi-server: enable-metrics: true service-registration: address: clab-lab25-2-consul-agent:8500 cache: type: nats address: clab-lab25-2-nats:4222 debug: true outputs: output1: type: prometheus service-registration: address: clab-lab25-2-consul-agent:8500 ================================================ FILE: examples/deployments/2.clusters/5.shared-cache/nats/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-lab25-2-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/2.clusters/5.shared-cache/nats/containerlab/lab25-2.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab25-2 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: super-spine1: super-spine2: spine1: spine2: spine3: spine4: leaf1: leaf2: leaf3: leaf4: leaf5: leaf6: leaf7: leaf8: gnmic1: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7890:7890 - 9804:9804 env: GNMIC_API: :7890 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-2-gnmic1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-2-gnmic1 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-2-gnmic1:9804 cmd: '--config /app/gnmic.yaml subscribe' gnmic2: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7891:7891 - 9805:9805 env: GNMIC_API: :7891 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-2-gnmic2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-2-gnmic2 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-2-gnmic2:9805 cmd: '--config /app/gnmic.yaml subscribe' gnmic3: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7892:7892 - 9806:9806 env: GNMIC_API: :7892 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-2-gnmic3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-2-gnmic3 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-2-gnmic3:9806 cmd: '--config /app/gnmic.yaml subscribe' consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 nats: kind: linux image: nats:latest ports: - 4222:4222 cmd: '-D' links: # super-spine1 links - endpoints: ["super-spine1:e1-1", "spine1:e1-1"] - endpoints: ["super-spine1:e1-2", "spine2:e1-1"] - endpoints: ["super-spine1:e1-3", "spine3:e1-1"] - endpoints: ["super-spine1:e1-4", "spine4:e1-1"] # super-spine2 links - endpoints: ["super-spine2:e1-1", "spine1:e1-2"] - endpoints: ["super-spine2:e1-2", "spine2:e1-2"] - endpoints: ["super-spine2:e1-3", "spine3:e1-2"] - endpoints: ["super-spine2:e1-4", "spine4:e1-2"] # spine1 links - endpoints: ["spine1:e1-3", "leaf1:e1-1"] - endpoints: ["spine1:e1-4", "leaf2:e1-1"] - endpoints: ["spine1:e1-5", "leaf3:e1-1"] - endpoints: ["spine1:e1-6", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-3", "leaf1:e1-2"] - endpoints: ["spine2:e1-4", "leaf2:e1-2"] - endpoints: ["spine2:e1-5", "leaf3:e1-2"] - endpoints: ["spine2:e1-6", "leaf4:e1-2"] # spine3 links - endpoints: ["spine3:e1-3", "leaf5:e1-1"] - endpoints: ["spine3:e1-4", "leaf6:e1-1"] - endpoints: ["spine3:e1-5", "leaf7:e1-1"] - endpoints: ["spine3:e1-6", "leaf8:e1-1"] # spine4 links - endpoints: ["spine4:e1-3", "leaf5:e1-2"] - endpoints: ["spine4:e1-4", "leaf6:e1-2"] - endpoints: ["spine4:e1-5", "leaf7:e1-2"] - endpoints: ["spine4:e1-6", "leaf8:e1-2"] ================================================ FILE: examples/deployments/2.clusters/5.shared-cache/nats/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-lab25-2-consul-agent:8500 services: - cluster2-gnmic-api - prometheus-output1 ================================================ FILE: examples/deployments/2.clusters/5.shared-cache/redis/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: ascii log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab25-3 subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface[name=*]/statistics stream-mode: sample sample-interval: 30s sub2: paths: - /interface[name=*]/admin-state stream-mode: on-change sub3: paths: - /interface[name=*]/oper-state stream-mode: on-change api-server: enable-metrics: true # clustering config clustering: cluster-name: cluster2 targets-watch-timer: 30s locker: type: consul address: clab-lab25-3-consul-agent:8500 gnmi-server: enable-metrics: true service-registration: address: clab-lab25-3-consul-agent:8500 cache: type: redis address: clab-lab25-3-redis:6379 debug: true outputs: output1: type: prometheus service-registration: address: clab-lab25-3-consul-agent:8500 ================================================ FILE: examples/deployments/2.clusters/5.shared-cache/redis/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-lab25-3-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/2.clusters/5.shared-cache/redis/containerlab/lab25-3.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab25-3 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: super-spine1: super-spine2: spine1: spine2: spine3: spine4: leaf1: leaf2: leaf3: leaf4: leaf5: leaf6: leaf7: leaf8: gnmic1: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7890:7890 - 9804:9804 env: GNMIC_API: :7890 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-3-gnmic1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-3-gnmic1 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-3-gnmic1:9804 cmd: '--config /app/gnmic.yaml subscribe' gnmic2: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7891:7891 - 9805:9805 env: GNMIC_API: :7891 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-3-gnmic2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-3-gnmic2 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-3-gnmic2:9805 cmd: '--config /app/gnmic.yaml subscribe' gnmic3: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7892:7892 - 9806:9806 env: GNMIC_API: :7892 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab25-3-gnmic3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab25-3-gnmic3 GNMIC_OUTPUTS_OUTPUT1_LISTEN: clab-lab25-3-gnmic3:9806 cmd: '--config /app/gnmic.yaml subscribe' consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 redis: kind: linux image: redis:7 ports: - 6379:6379 cmd: redis-server --loglevel warning links: # super-spine1 links - endpoints: ["super-spine1:e1-1", "spine1:e1-1"] - endpoints: ["super-spine1:e1-2", "spine2:e1-1"] - endpoints: ["super-spine1:e1-3", "spine3:e1-1"] - endpoints: ["super-spine1:e1-4", "spine4:e1-1"] # super-spine2 links - endpoints: ["super-spine2:e1-1", "spine1:e1-2"] - endpoints: ["super-spine2:e1-2", "spine2:e1-2"] - endpoints: ["super-spine2:e1-3", "spine3:e1-2"] - endpoints: ["super-spine2:e1-4", "spine4:e1-2"] # spine1 links - endpoints: ["spine1:e1-3", "leaf1:e1-1"] - endpoints: ["spine1:e1-4", "leaf2:e1-1"] - endpoints: ["spine1:e1-5", "leaf3:e1-1"] - endpoints: ["spine1:e1-6", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-3", "leaf1:e1-2"] - endpoints: ["spine2:e1-4", "leaf2:e1-2"] - endpoints: ["spine2:e1-5", "leaf3:e1-2"] - endpoints: ["spine2:e1-6", "leaf4:e1-2"] # spine3 links - endpoints: ["spine3:e1-3", "leaf5:e1-1"] - endpoints: ["spine3:e1-4", "leaf6:e1-1"] - endpoints: ["spine3:e1-5", "leaf7:e1-1"] - endpoints: ["spine3:e1-6", "leaf8:e1-1"] # spine4 links - endpoints: ["spine4:e1-3", "leaf5:e1-2"] - endpoints: ["spine4:e1-4", "leaf6:e1-2"] - endpoints: ["spine4:e1-5", "leaf7:e1-2"] - endpoints: ["spine4:e1-6", "leaf8:e1-2"] ================================================ FILE: examples/deployments/2.clusters/5.shared-cache/redis/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-lab25-3-consul-agent:8500 services: - cluster2-gnmic-api - prometheus-output1 ================================================ FILE: examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/containerlab/gnmic-collector.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab31 config: outputs: - nats-output subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s outputs: nats-output: type: nats address: clab-lab31-nats:4222 subject: telemetry ================================================ FILE: examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/containerlab/gnmic-relay.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 log: true inputs: nats-input: type: nats address: clab-lab31-nats:4222 subject: telemetry outputs: - prom-output outputs: prom-output: type: prometheus listen: "clab-lab31-gnmic-relay:9804" service-registration: address: clab-lab31-consul-agent:8500 ================================================ FILE: examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-lab31-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/containerlab/lab31.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab31 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux nodes: srl1: srl2: gnmic-collector: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic-collector.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' gnmic-relay: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic-relay.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 9804:9804 cmd: '--config /app/gnmic.yaml subscribe' consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug nats: kind: linux image: nats:latest ports: - 4222:4222 grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: - endpoints: ["srl1:e1-1", "srl2:e1-1"] ================================================ FILE: examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-lab31-consul-agent:8500 ================================================ FILE: examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic-collector: &gnmic image: ghcr.io/openconfig/gnmic:latest container_name: gnmic-collector volumes: - ./gnmic-collector.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" networks: - gnmic-net depends_on: - nats gnmic-relay: <<: *gnmic container_name: gnmic-relay volumes: - ./gnmic-relay.yaml:/app/gnmic.yaml ports: - 9804:9804 depends_on: - nats - consul-agent consul-agent: image: hashicorp/consul:latest container_name: consul networks: - gnmic-net ports: - 8500:8500 - 8600:8600/udp command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0 prometheus: image: prom/prometheus:latest container_name: prometheus volumes: - ./prometheus/:/etc/prometheus/ - prometheus-data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yaml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/usr/share/prometheus/console_libraries' - '--web.console.templates=/usr/share/prometheus/consoles' - '--log.level=debug' ports: - 9090:9090 networks: - gnmic-net nats: image: 'nats:latest' container_name: nats networks: - gnmic-net ports: - "4222:4222" - "6222:6222" - "8222:8222" volumes: prometheus-data: ================================================ FILE: examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/gnmic-collector.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true targets: # Add targets configuration here # eg: # 192.168.1.131:57400: # username: gnmic # password: secret_password subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: nats-output: type: nats address: nats:4222 subject: telemetry ================================================ FILE: examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/gnmic-relay.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 log: true inputs: nats-input: type: nats address: nats:4222 subject: telemetry outputs: - prom-output outputs: prom-output: type: prometheus listen: "gnmic-relay:9804" service-registration: address: consul-agent:8500 ================================================ FILE: examples/deployments/3.pipelines/1.gnmic-nats-gnmic-prometheus/docker-compose/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: consul-agent:8500 ================================================ FILE: examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/containerlab/gnmic-collector.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab32 config: outputs: - nats-output subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s outputs: nats-output: type: nats address: clab-lab32-nats:4222 subject: telemetry ================================================ FILE: examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/containerlab/gnmic-relay.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 log: true inputs: nats-input: type: nats address: nats:4222 subject: telemetry outputs: - influxdb-output outputs: influxdb-output: type: influxdb url: http://influxdb:8086 bucket: telemetry # db name token: gnmic:gnmic # username:password batch-size: 1000 flush-timer: 10s ================================================ FILE: examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: InfluxDB orgId: 1 datasources: - name: InfluxDB type: influxdb orgId: 1 url: http://clab-lab32-influxdb:8086 user: gnmic password: gnmic database: telemetry editable: true ================================================ FILE: examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/containerlab/lab32.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab32 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: # type: ixr6 image: ghcr.io/nokia/srlinux nodes: srl1: srl2: gnmic-collector: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic-collector.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' gnmic-relay: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic-relay.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 9804:9804 cmd: '--config /app/gnmic.yaml subscribe' consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' influxdb: kind: linux image: influxdb:1.8.10 ports: - 8086:8086 env: INFLUXDB_DATA_ENGINE: tsm1 INFLUXDB_REPORTING_DISABLED: "false" INFLUXDB_USER: gnmic INFLUXDB_USER_PASSWORD: gnmic INFLUXDB_DB: telemetry nats: kind: linux image: nats:latest ports: - 4222:4222 grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: - endpoints: ["srl1:e1-1", "srl2:e1-1"] ================================================ FILE: examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic-collector: &gnmic image: ghcr.io/openconfig/gnmic:latest container_name: gnmic-collector volumes: - ./gnmic-collector.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" networks: - gnmic-net depends_on: - nats - influxdb gnmic-relay: <<: *gnmic container_name: gnmic-relay volumes: - ./gnmic-relay.yaml:/app/gnmic.yaml depends_on: - nats - influxdb influxdb: image: influxdb:1.8.10 container_name: influxdb networks: - gnmic-net ports: - "8083:8083" - "8086:8086" - "8090:8090" environment: - INFLUXDB_DATA_ENGINE=tsm1 - INFLUXDB_REPORTING_DISABLED=false - INFLUXDB_USER=gnmic - INFLUXDB_USER_PASSWORD=gnmic - INFLUXDB_DB=telemetry volumes: - influx-storage:/var/lib/influxdb nats: image: 'nats:latest' container_name: nats networks: - gnmic-net ports: - "4222:4222" - "6222:6222" - "8222:8222" volumes: influx-storage: ================================================ FILE: examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/docker-compose/gnmic-collector.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true targets: # Add targets configuration here # eg: # 192.168.1.131:57400: # username: gnmic # password: secret_password subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: nats-output: type: nats address: nats:4222 subject: telemetry ================================================ FILE: examples/deployments/3.pipelines/2.gnmic-nats-gnmic-influxdb/docker-compose/gnmic-relay.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 log: true inputs: nats-input: type: nats address: nats:4222 subject: telemetry outputs: - influxdb-output outputs: influxdb-output: type: influxdb url: http://influxdb:8086 bucket: telemetry # db name token: gnmic:gnmic # username:password batch-size: 1000 flush-timer: 10s ================================================ FILE: examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/containerlab/gnmic-relay.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 log: true inputs: nats-input: type: nats address: clab-lab33a-nats:4222 subject: telemetry outputs: - prom-output outputs: prom-output: type: prometheus listen: "clab-lab33a-gnmic-relay:9804" service-registration: address: clab-lab33a-consul-agent:8500 ================================================ FILE: examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true api-server: enable-metrics: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab33a config: outputs: - nats-output subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s # clustering config clustering: cluster-name: cluster2 targets-watch-timer: 30s locker: type: consul address: clab-lab33a-consul-agent:8500 outputs: nats-output: type: nats address: clab-lab33a-nats:4222 subject: telemetry ================================================ FILE: examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-lab23-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/containerlab/lab33a.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab33a topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: # type: ixr6 image: ghcr.io/nokia/srlinux nodes: srl1: srl2: gnmic-collector1: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic-config.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7890:7890 env: GNMIC_API: :7890 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab33a-gnmic-collector1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab33a-gnmic-collector1 cmd: '--config /app/gnmic-config.yaml subscribe' gnmic-collector2: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic-config.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7891:7891 env: GNMIC_API: :7891 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab33a-gnmic-collector2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab33a-gnmic-collector2 cmd: '--config /app/gnmic-config.yaml subscribe' gnmic-collector3: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic-config.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7892:7892 env: GNMIC_API: :7892 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab33a-gnmic-collector3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab33a-gnmic-collector3 cmd: '--config /app/gnmic-config.yaml subscribe' gnmic-relay: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic-relay.yaml:/app/gnmic-config.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 9804:9804 cmd: '--config /app/gnmic-config.yaml subscribe' consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug nats: kind: linux image: nats:latest ports: - 4222:4222 grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: - endpoints: ["srl1:e1-1", "srl2:e1-1"] ================================================ FILE: examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-lab33a-consul-agent:8500 services: - prometheus-prom-output - cluster2-gnmic-api ================================================ FILE: examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic-collector1: &gnmic image: ghcr.io/openconfig/gnmic:latest container_name: gnmic-collector1 volumes: - ./gnmic-collector.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" environment: - GNMIC_API=":7890" - GNMIC_CLUSTERING_INSTANCE_NAME="gnmic-collector1" - GNMIC_CLUSTERING_SERVICE_ADDRESS="gnmic-collector1" networks: - gnmic-net ports: - 7890:7890 depends_on: - nats gnmic-collector2: <<: *gnmic container_name: gnmic-collector2 environment: - GNMIC_API=":7891" - GNMIC_CLUSTERING_INSTANCE_NAME="gnmic-collector2" - GNMIC_CLUSTERING_SERVICE_ADDRESS="gnmic-collector2" ports: - 7891:7891 gnmic-collector3: <<: *gnmic container_name: gnmic-collector3 environment: - GNMIC_API=":7892" - GNMIC_CLUSTERING_INSTANCE_NAME="gnmic-collector3" - GNMIC_CLUSTERING_SERVICE_ADDRESS="gnmic-collector3" ports: - 7892:7892 gnmic-relay: image: ghcr.io/openconfig/gnmic:latest container_name: gnmic-relay command: "subscribe --config /app/gnmic.yaml" volumes: - ./gnmic-relay.yaml:/app/gnmic.yaml ports: - 9804:9804 depends_on: - nats - consul-agent consul-agent: image: hashicorp/consul:latest container_name: consul networks: - gnmic-net ports: - 8500:8500 - 8600:8600/udp command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0 prometheus: image: prom/prometheus:latest container_name: prometheus volumes: - ./prometheus/:/etc/prometheus/ - prometheus-data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yaml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/usr/share/prometheus/console_libraries' - '--web.console.templates=/usr/share/prometheus/consoles' - '--log.level=debug' ports: - 9090:9090 networks: - gnmic-net nats: image: 'nats:latest' container_name: nats networks: - gnmic-net ports: - "4222:4222" - "6222:6222" - "8222:8222" volumes: prometheus-data: ================================================ FILE: examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/gnmic-collector.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true # clustering config clustering: cluster-name: cluster1 targets-watch-timer: 30s locker: type: consul address: consul-agent:8500 targets: # Add targets configuration here # eg: # 192.168.1.131:57400: # username: gnmic # password: secret_password subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: nats-output: type: nats address: nats:4222 subject: telemetry ================================================ FILE: examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/gnmic-relay.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 log: true inputs: nats-input: type: input address: nats:4222 subject: telemetry outputs: - prom-output outputs: prom-output: type: prometheus listen: "gnmic-relay:9804" service-registration: address: consul-agent:8500 ================================================ FILE: examples/deployments/3.pipelines/3a.gnmic-cluster-nats-gnmic-prometheus/docker-compose/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: consul-agent:8500 ================================================ FILE: examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/containerlab/gnmic-relay.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 log: true inputs: nats-input: type: nats address: clab-lab33b-nats:4222 subject: telemetry outputs: - prom-output outputs: prom-output: type: prometheus listen: "clab-lab33b-gnmic-relay:9804" service-registration: address: clab-lab33b-consul-agent:8500 ================================================ FILE: examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/containerlab/gnmic.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true api-server: enable-metrics: true loader: type: docker filters: - containers: - label=clab-node-kind: srl label=containerlab: lab33b config: outputs: - nats-output subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s # clustering config clustering: cluster-name: cluster2 targets-watch-timer: 30s locker: type: consul address: clab-lab33b-consul-agent:8500 outputs: nats-output: type: nats address: clab-lab33b-nats:4222 subject: telemetry ================================================ FILE: examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/containerlab/grafana/datasources/datasource.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-lab23-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/containerlab/lab33b.clab.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 name: lab33b topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: # type: ixr6 image: ghcr.io/nokia/srlinux nodes: srl1: srl2: gnmic-collector1: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic-config.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7890:7890 env: GNMIC_API: :7890 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab33b-gnmic-collector1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab33b-gnmic-collector1 cmd: '--config /app/gnmic-config.yaml subscribe' gnmic-collector2: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic-config.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7891:7891 env: GNMIC_API: :7891 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab33b-gnmic-collector2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab33b-gnmic-collector2 cmd: '--config /app/gnmic-config.yaml subscribe' gnmic-collector3: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic.yaml:/app/gnmic-config.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 7892:7892 env: GNMIC_API: :7892 GNMIC_CLUSTERING_INSTANCE_NAME: clab-lab33b-gnmic-collector3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-lab33b-gnmic-collector3 cmd: '--config /app/gnmic-config.yaml subscribe' gnmic-relay: kind: linux image: ghcr.io/openconfig/gnmic:latest binds: - ./gnmic-relay.yaml:/app/gnmic-config.yaml:ro - /var/run/docker.sock:/var/run/docker.sock ports: - 9804:9804 cmd: '--config /app/gnmic-config.yaml subscribe' consul-agent: kind: linux image: hashicorp/consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' prometheus: kind: linux image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug nats: kind: linux image: nats:latest ports: - 4222:4222 grafana: kind: linux image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro #- grafana/dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 links: - endpoints: ["srl1:e1-1", "srl2:e1-1"] ================================================ FILE: examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/containerlab/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-lab33b-consul-agent:8500 services: - prometheus-prom-output - cluster2-gnmic-api ================================================ FILE: examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic-collector1: &gnmic image: ghcr.io/openconfig/gnmic:latest container_name: gnmic-collector1 volumes: - ./gnmic-collector.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" environment: - GNMIC_API=":7890" - GNMIC_CLUSTERING_INSTANCE_NAME="gnmic-collector1" - GNMIC_CLUSTERING_SERVICE_ADDRESS="gnmic-collector1" networks: - gnmic-net ports: - 7890:7890 depends_on: - nats gnmic-collector2: <<: *gnmic container_name: gnmic-collector2 environment: - GNMIC_API=":7891" - GNMIC_CLUSTERING_INSTANCE_NAME="gnmic-collector2" - GNMIC_CLUSTERING_SERVICE_ADDRESS="gnmic-collector2" ports: - 7891:7891 gnmic-collector3: <<: *gnmic container_name: gnmic-collector3 environment: - GNMIC_API=":7892" - GNMIC_CLUSTERING_INSTANCE_NAME="gnmic-collector3" - GNMIC_CLUSTERING_SERVICE_ADDRESS="gnmic-collector3" ports: - 7892:7892 gnmic-relay1: <<: *gnmic container_name: gnmic-relay1 volumes: - ./gnmic-relay.yaml:/app/gnmic.yaml environment: - GNMIC_OUTPUTS_OUTPUT1_LISTEN="gnmic-relay1:9804" ports: - 9804:9804 depends_on: - nats - consul-agent gnmic-relay2: <<: *gnmic container_name: gnmic-relay2 volumes: - ./gnmic-relay.yaml:/app/gnmic.yaml environment: - GNMIC_OUTPUTS_OUTPUT1_LISTEN="gnmic-relay2:9805" ports: - 9805:9805 depends_on: - nats - consul-agent gnmic-relay3: <<: *gnmic container_name: gnmic-relay volumes: - ./gnmic-relay3.yaml:/app/gnmic.yaml ports: - 9806:9806 depends_on: - nats - consul-agent consul-agent: image: hashicorp/consul:latest container_name: consul networks: - gnmic-net ports: - 8500:8500 - 8600:8600/udp command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0 prometheus: image: prom/prometheus:latest container_name: prometheus volumes: - ./prometheus/:/etc/prometheus/ - prometheus-data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yaml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/usr/share/prometheus/console_libraries' - '--web.console.templates=/usr/share/prometheus/consoles' - '--log.level=debug' ports: - 9090:9090 networks: - gnmic-net nats: image: 'nats:latest' container_name: nats networks: - gnmic-net ports: - "4222:4222" - "6222:6222" - "8222:8222" volumes: prometheus-data: ================================================ FILE: examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/gnmic-collector.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true api-server: enable-metrics: true # clustering config clustering: cluster-name: cluster1 targets-watch-timer: 30s locker: type: consul address: consul-agent:8500 targets: # Add targets configuration here # eg: # 192.168.1.131:57400: # username: gnmic # password: secret_password subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: nats-output: type: nats address: nats:4222 subject: telemetry ================================================ FILE: examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/gnmic-relay.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 log: true inputs: nats-input: type: nats address: nats:4222 subject: telemetry outputs: - prom-output outputs: output1: type: prometheus service-registration: address: consul-agent:8500 use-lock: true ================================================ FILE: examples/deployments/3.pipelines/3b.gnmic-cluster-nats-gnmic-cluster-prometheus/docker-compose/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: consul-agent:8500 services: - prometheus-prom-output - cluster2-gnmic-api ================================================ FILE: examples/deployments/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/docker-compose/docker-compose.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 version: '3' networks: gnmic-net: driver: bridge services: gnmic-collector: &gnmic image: ghcr.io/openconfig/gnmic:latest container_name: gnmic-collector volumes: - ./gnmic-collector.yaml:/app/gnmic.yaml command: "subscribe --config /app/gnmic.yaml" networks: - gnmic-net depends_on: - nats gnmic-relay1: <<: *gnmic container_name: gnmic-relay1 volumes: - ./gnmic-relay1.yaml:/app/gnmic.yaml ports: - 9804:9804 depends_on: - nats - consul-agent gnmic-relay2: <<: *gnmic container_name: gnmic-relay2 volumes: - ./gnmic-relay2.yaml:/app/gnmic.yaml depends_on: - nats - influxdb consul-agent: image: hashicorp/consul:latest container_name: consul networks: - gnmic-net ports: - 8500:8500 - 8600:8600/udp command: agent -server -ui -node=server-1 -bootstrap-expect=1 -client=0.0.0.0 prometheus: image: prom/prometheus:latest container_name: prometheus volumes: - ./prometheus/:/etc/prometheus/ - prometheus-data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yaml' - '--storage.tsdb.path=/prometheus' - '--web.console.libraries=/usr/share/prometheus/console_libraries' - '--web.console.templates=/usr/share/prometheus/consoles' - '--log.level=debug' ports: - 9090:9090 networks: - gnmic-net nats: image: 'nats:latest' container_name: nats networks: - gnmic-net ports: - "4222:4222" - "6222:6222" - "8222:8222" influxdb: image: influxdb:1.8.10 container_name: influxdb networks: - gnmic-net ports: - "8083:8083" - "8086:8086" - "8090:8090" environment: - INFLUXDB_DATA_ENGINE=tsm1 - INFLUXDB_REPORTING_DISABLED=false - INFLUXDB_USER=gnmic - INFLUXDB_USER_PASSWORD=gnmic - INFLUXDB_DB=telemetry volumes: - influx-storage:/var/lib/influxdb volumes: prometheus-data: influx-storage: ================================================ FILE: examples/deployments/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/docker-compose/gnmic-collector.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 username: admin password: NokiaSrl1! insecure: true encoding: json_ietf log: true targets: # Add targets configuration here # eg: # 192.168.1.131:57400: # username: gnmic # password: secret_password subscriptions: # Add subscriptions configuration here # e.g: # sub1: # paths: # - /interface/statistics # stream-mode: sample # sample-interval: 1s outputs: nats-output: type: nats address: nats:4222 subject: telemetry ================================================ FILE: examples/deployments/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/docker-compose/gnmic-relay1.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 log: true inputs: nats-input: type: input address: nats:4222 subject: telemetry outputs: - prom-output outputs: prom-output: type: prometheus listen: "gnmic-relay1:9804" service-registration: address: consul-agent:8500 ================================================ FILE: examples/deployments/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/docker-compose/gnmic-relay2.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 log: true inputs: nats-input: type: input address: nats:4222 subject: telemetry outputs: - influxdb-output outputs: influxdb-output: type: influxdb url: http://influxdb:8086 bucket: telemetry # db name token: gnmic:gnmic # username:password batch-size: 1000 flush-timer: 10s ================================================ FILE: examples/deployments/3.pipelines/4.gnmic-nats-gnmic-prometheus-gnmic-influxdb/docker-compose/prometheus/prometheus.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: consul-agent:8500 ================================================ FILE: examples/pkg/capabilities_rpc/main.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package main import ( "context" "fmt" "log" "google.golang.org/protobuf/encoding/prototext" "github.com/openconfig/gnmic/pkg/api" ) func main() { // create a target tg, err := api.NewTarget( api.Name("srl1"), api.Address("10.0.0.1:57400"), api.Username("admin"), api.Password("admin"), api.SkipVerify(true), ) if err != nil { log.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() // create a gNMI client err = tg.CreateGNMIClient(ctx) if err != nil { log.Fatal(err) } defer tg.Close() // send a gNMI capabilities request to the created target capResp, err := tg.Capabilities(ctx) if err != nil { log.Fatal(err) } fmt.Println(prototext.Format(capResp)) } ================================================ FILE: examples/pkg/get_rpc/main.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package main import ( "context" "fmt" "log" "google.golang.org/protobuf/encoding/prototext" "github.com/openconfig/gnmic/pkg/api" ) func main() { // create a target tg, err := api.NewTarget( api.Name("srl1"), api.Address("srl1:57400"), api.Username("admin"), api.Password("admin"), api.SkipVerify(true), ) if err != nil { log.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() // create a gNMI client err = tg.CreateGNMIClient(ctx) if err != nil { log.Fatal(err) } defer tg.Close() // create a GetREquest getReq, err := api.NewGetRequest( api.Path("/system/name"), api.Encoding("json_ietf"), ) if err != nil { log.Fatal(err) } fmt.Println(prototext.Format(getReq)) // send the created gNMI GetRequest to the created target getResp, err := tg.Get(ctx, getReq) if err != nil { log.Fatal(err) } fmt.Println(prototext.Format(getResp)) } ================================================ FILE: examples/pkg/set_rpc/main.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package main import ( "context" "fmt" "log" "google.golang.org/protobuf/encoding/prototext" "github.com/openconfig/gnmic/pkg/api" ) func main() { // create a target tg, err := api.NewTarget( api.Name("srl1"), api.Address("srl1:57400"), api.Username("admin"), api.Password("admin"), api.SkipVerify(true), ) if err != nil { log.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() err = tg.CreateGNMIClient(ctx) if err != nil { log.Fatal(err) } defer tg.Close() // create a gNMI SetRequest setReq, err := api.NewSetRequest( api.Update( api.Path("/interface[name=ethernet-1/1]"), api.Value(map[string]interface{}{ "admin-state": "enable", }, "json_ietf")), ) if err != nil { log.Fatal(err) } fmt.Println(prototext.Format(setReq)) // send the created gNMI SetRequest to the created target setResp, err := tg.Set(ctx, setReq) if err != nil { log.Fatal(err) } fmt.Println(prototext.Format(setResp)) } ================================================ FILE: examples/pkg/subscribe_rpc/main.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package main import ( "context" "fmt" "log" "time" "google.golang.org/protobuf/encoding/prototext" "github.com/openconfig/gnmic/pkg/api" ) func main() { // create a target tg, err := api.NewTarget( api.Name("srl1"), api.Address("srl1:57400"), api.Username("admin"), api.Password("admin"), api.SkipVerify(true), ) if err != nil { log.Fatal(err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() err = tg.CreateGNMIClient(ctx) if err != nil { log.Fatal(err) } defer tg.Close() // create a gNMI subscribeRequest subReq, err := api.NewSubscribeRequest( api.SubscriptionListMode("stream"), api.Subscription( api.Path("system/name"), api.SubscriptionMode("sample"), api.SampleInterval(10*time.Second), )) if err != nil { log.Fatal(err) } fmt.Println(prototext.Format(subReq)) // start the subscription go tg.Subscribe(ctx, subReq, "sub1") // start a goroutine that will stop the subscription after x seconds go func() { select { case <-ctx.Done(): return case <-time.After(42 * time.Second): tg.StopSubscription("sub1") } }() subRspChan, subErrChan := tg.ReadSubscriptions() for { select { case rsp := <-subRspChan: fmt.Println(prototext.Format(rsp.Response)) case tgErr := <-subErrChan: log.Fatalf("subscription %q stopped: %v", tgErr.SubscriptionName, tgErr.Err) } } } ================================================ FILE: examples/plugins/demo/main.go ================================================ package main import ( "io" "log" "os" "github.com/hashicorp/go-plugin" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/formatters/event_plugin" ) const ( processorType = "event-add-device_function" loggingPrefix = "[" + processorType + "] " ) type MyEventProcessor struct { formatters.BaseProcessor Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` targetsConfigs map[string]*types.TargetConfig actionsDefinitions map[string]map[string]interface{} processorsDefinitions map[string]map[string]any logger *log.Logger } func (p *MyEventProcessor) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) p.setupLogger() if err != nil { return err } return nil } func (p *MyEventProcessor) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range event { if e.Tags == nil { e.Tags = make(map[string]string) } e.Tags["device_function"] = "CORE" } return event } func (p *MyEventProcessor) Close() error { return nil } func (p *MyEventProcessor) WithActions(act map[string]map[string]interface{}) { p.actionsDefinitions = act } func (p *MyEventProcessor) WithTargets(tcs map[string]*types.TargetConfig) { p.targetsConfigs = tcs } func (p *MyEventProcessor) WithProcessors(procs map[string]map[string]any) { p.processorsDefinitions = procs } func (p *MyEventProcessor) WithLogger(l *log.Logger) { } func (p *MyEventProcessor) setupLogger() { if !p.Debug { p.logger = log.New(io.Discard, "", 0) } } func main() { logger := log.New(os.Stderr, "", log.Flags()&^log.Ldate&^log.Ltime&^log.Lmsgprefix) logger.Printf("starting plugin") plug := &MyEventProcessor{logger: logger} plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "GNMIC_PLUGIN", MagicCookieValue: "gnmic", }, Plugins: map[string]plugin.Plugin{ processorType: &event_plugin.EventProcessorPlugin{Impl: plug}, }, Logger: nil, }) } ================================================ FILE: examples/plugins/event-add-hostname/README.md ================================================ # Add hostname processor plugin `event-add-hostname` is an event processor that gNMIc starts as a plugin. It enriches received gNMI notifications with the collector hostname as a tag. ## Build To build the plugin run: ```bash cd examples/plugins/event-add-hostname go build -o event-add-hostname ``` ## Running the plugin - To run the plugin point gNMIc to the directory where the plugin binary resides. Either using the flag `--plugin-processors-path | -P`: ```bash gnmic --config gnmic.yaml subscribe -P /path/to/plugin/bin ``` Or using the config file: ```yaml plugins: path: /path/to/plugin/bin glob: "*" start-timeout: 0s ``` This allows gNMIc to discover the plugin executable and initialize it. Make sure the files gNMIc loads are executable. - Next configure the plugin as a processor: ```yaml processors: proc1: event-add-hostname: debug: true # the tag name to add with the host hostname as a tag value. hostname-tag-name: "collector-host" # read-interval controls how often the plugin runs the hostname cmd to get the host hostanme # by default it's at most every 1 minute read-interval: 1m ``` The processor type `event-add-hostname` should match the executable filename. - Then add that processor under an output just like a you would do it with a regular processor: ```yaml outputs: out1: type: file format: event event-processors: - proc1 ``` The resulting event message should have a new tag called `collector-host` ```json [ { "name": "sub1", "timestamp": 1704572759243640092, "tags": { "collector-host": "kss", "interface_name": "ethernet-1/1", "source": "clab-ex-srl1", "subscription-name": "sub1" }, "values": { "/srl_nokia-interfaces:interface/statistics/out-octets": "4105346" } } ] ``` ================================================ FILE: examples/plugins/event-add-hostname/event-add-hostname.go ================================================ package main import ( "bytes" "log" "os" "os/exec" "sync" "time" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/formatters/event_plugin" ) const ( processorType = "event-add-hostname" loggingPrefix = "[" + processorType + "] " hostnameCmd = "hostname" ) type addHostnameProcessor struct { formatters.BaseProcessor Debug bool `mapstructure:"debug,omitempty" yaml:"debug,omitempty" json:"debug,omitempty"` ReadInterval time.Duration `mapstructure:"read-interval,omitempty" yaml:"read-interval,omitempty" json:"read-interval,omitempty"` HostnameTagName string `mapstructure:"hostname-tag-name,omitempty" yaml:"hostname-tag-name,omitempty" json:"hostname-tag-name,omitempty"` m *sync.RWMutex hostname string lastRead time.Time targetsConfigs map[string]*types.TargetConfig actionsDefinitions map[string]map[string]interface{} processorsDefinitions map[string]map[string]any logger hclog.Logger } func (p *addHostnameProcessor) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } p.setupLogger() if p.ReadInterval <= 0 { p.ReadInterval = time.Minute } if p.HostnameTagName == "" { p.HostnameTagName = "collector-hostname" } return p.readHostname() } func (p *addHostnameProcessor) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg { p.m.Lock() defer p.m.Unlock() err := p.readHostname() if err != nil { p.logger.Error("failed to read hostname", "error", err) } for _, e := range event { if e.Tags == nil { e.Tags = make(map[string]string) } e.Tags[p.HostnameTagName] = p.hostname } return event } func (p *addHostnameProcessor) Close() error { return nil } func (p *addHostnameProcessor) WithActions(act map[string]map[string]interface{}) { p.actionsDefinitions = act } func (p *addHostnameProcessor) WithTargets(tcs map[string]*types.TargetConfig) { p.targetsConfigs = tcs } func (p *addHostnameProcessor) WithProcessors(procs map[string]map[string]any) { p.processorsDefinitions = procs } func (p *addHostnameProcessor) WithLogger(l *log.Logger) { } func (p *addHostnameProcessor) setupLogger() { p.logger = hclog.New(&hclog.LoggerOptions{ Output: os.Stderr, TimeFormat: "2006/01/02 15:04:05.999999", }) if p.Debug { p.logger.SetLevel(hclog.Debug) } } func (p *addHostnameProcessor) readHostname() error { now := time.Now() if p.lastRead.After(now.Add(-p.ReadInterval)) { return nil } // cmd := exec.Command(hostnameCmd) out, err := cmd.Output() if err != nil { return err } p.hostname = string(bytes.TrimSpace(out)) p.lastRead = now return nil } func main() { logger := hclog.New(&hclog.LoggerOptions{ Output: os.Stderr, DisableTime: true, }) logger.Info("starting plugin processor", "name", processorType) plug := &addHostnameProcessor{ m: new(sync.RWMutex), } plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "GNMIC_PLUGIN", MagicCookieValue: "gnmic", }, Plugins: map[string]plugin.Plugin{ processorType: &event_plugin.EventProcessorPlugin{Impl: plug}, }, Logger: logger, }) } ================================================ FILE: examples/plugins/event-gnmi-get/README.md ================================================ # gNMI Get based notification enriching processor plugin `event-gnmi-get` is an event processor that gNMIc starts as a plugin. It enriches received gNMI notifications with tags retrieved using a gNMI Get RPC. ## Building the plugin ```bash cd examples/plugins/event-gnmi-get go build -o event-gnmi-get ``` ## Running the plugin - To run the plugin point gNMIc to the directory where the plugin binary resides. Either using the flag `--plugin-processors-path | -P`: ```bash gnmic --config gnmic.yaml subscribe -P /path/to/plugin/bin ``` Or using the config file: ```yaml plugins: path: /path/to/plugin/bin glob: "*" start-timeout: 0s ``` This allows gNMIc to discover the plugin executable and initialize it. Make sure the files gNMIc loads are executable. - Next configure the plugin as a processor: ```yaml processors: proc2: event-gnmi-get: debug: true encoding: ascii data-type: all paths: - path: "platform/chassis/type" tag-name: "chassis-type" - path: "platform/chassis/hw-mac-address" tag-name: "hw-mac-address" - path: "system/name/host-name" tag-name: "hostname" ``` The processor type `event-gnmi-get` should match the executable filename. - Then add that processor under an output just like a you would do it with a regular processor: ```yaml outputs: out1: type: file format: event event-processors: - proc2 ``` The resulting event message should have a set of new tags called `chassis-type`, `hw-mac-address` and `hostname`. ```json [ { "name": "sub1", "timestamp": 1704573345190497607, "tags": { "chassis-type": "7220 IXR-D2", "hostname": "srl1", "interface_name": "ethernet-1/1", "hw-mac-address": "1A:F2:00:FF:00:00", "source": "clab-ex-srl1", "subscription-name": "sub1" }, "values": { "/srl_nokia-interfaces:interface/statistics/out-octets": "4108666" } } ] ``` ## Examples ### trigger get request directly to the node ```yaml processors: proc1: event-gnmi-get: debug: true encoding: ascii data-type: all paths: - path: "platform/chassis/type" tag-name: "chassis-type" - path: "platform/chassis/hw-mac-address" tag-name: "hw-mac-address" - path: "system/name/host-name" tag-name: "hostname" ``` ### trigger get request through gNMIc's gNMI server ```yaml # enable gNMIc gNMI server gnmi-server: address: :57401 processors: proc1: event-gnmi-get: debug: true encoding: ascii data-type: all # set the gNMI Get target to the local gNMI server address target: localhost:57401 # include the actual target name in the GetRequest Prefix prefix-target: '{{ index .Tags "source" }}' paths: - path: "platform/chassis/type" tag-name: "chassis-type" - path: "platform/chassis/hw-mac-address" tag-name: "hw-mac-address" - path: "system/name/host-name" tag-name: "hostname" ``` ================================================ FILE: examples/plugins/event-gnmi-get/event-gnmi-get.go ================================================ package main import ( "bytes" "context" "fmt" "log" "math" "os" "strconv" "sync" "text/template" "time" "github.com/AlekSi/pointer" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api" gpath "github.com/openconfig/gnmic/pkg/api/path" "github.com/openconfig/gnmic/pkg/api/target" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/formatters/event_plugin" gtemplate "github.com/openconfig/gnmic/pkg/gtemplate" ) const ( processorType = "event-gnmi-get" loggingPrefix = "[" + processorType + "] " defaultTarget = `{{ index .Tags "source" }}` ) type gNMIGetProcessor struct { formatters.BaseProcessor Debug bool `mapstructure:"debug,omitempty" yaml:"debug,omitempty" json:"debug,omitempty"` ReadPeriod time.Duration `mapstructure:"read-period,omitempty" yaml:"read-period,omitempty" json:"read-period,omitempty"` Target string `mapstructure:"target,omitempty" yaml:"target,omitempty" json:"target,omitempty"` PrefixTarget string `mapstructure:"prefix-target,omitempty" yaml:"prefix-target,omitempty" json:"prefix-target,omitempty"` Paths []*pathToTag `mapstructure:"paths,omitempty" yaml:"paths,omitempty" json:"paths,omitempty"` Type string `mapstructure:"data-type,omitempty" yaml:"type,omitempty" json:"type,omitempty"` Encoding string `mapstructure:"encoding,omitempty" yaml:"encoding,omitempty" json:"encoding,omitempty"` SkipVerify bool `mapstructure:"skip-verify,omitempty" yaml:"skip-verify,omitempty" json:"skip-verify,omitempty"` m *sync.RWMutex prefixTargetTpl *template.Template targetTpl *template.Template // values read indexed by targetName vals map[string]*readValues targetsConfigs map[string]*types.TargetConfig actionsDefinitions map[string]map[string]interface{} processorsDefinitions map[string]map[string]any logger hclog.Logger } type pathToTag struct { Path string `mapstructure:"path,omitempty" yaml:"path,omitempty" json:"path,omitempty"` TagName string `mapstructure:"tag-name,omitempty" yaml:"tag-name,omitempty" json:"tag-name,omitempty"` pathTpl *template.Template } type readValues struct { vals map[string]string lastRead time.Time } func (p *gNMIGetProcessor) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } for _, o := range opts { o(p) } p.setupLogger() p.logger.Info("initializing", "processor", processorType, "cfg", cfg) if p.Target == "" { p.Target = defaultTarget } if p.ReadPeriod <= 0 { p.ReadPeriod = time.Minute } if p.Type == "" { p.Type = "all" } // init PrefixTarget if any if p.PrefixTarget != "" { p.prefixTargetTpl, err = gtemplate.CreateTemplate(fmt.Sprintf("%s-prefix-target", processorType), p.PrefixTarget) if err != nil { return err } } // init target template p.targetTpl, err = gtemplate.CreateTemplate(fmt.Sprintf("%s-target", processorType), p.Target) if err != nil { return err } // init paths templates for i, pd := range p.Paths { pd.pathTpl, err = gtemplate.CreateTemplate(fmt.Sprintf("path-%d", i), pd.Path) if err != nil { return err } } p.logger.Info("initialized", "processor", processorType) return nil } func (p *gNMIGetProcessor) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg { p.m.Lock() defer p.m.Unlock() for _, e := range event { targetName, err := p.readPaths(e) if err != nil { p.logger.Error("failed to read paths", "error", err) } if _, ok := p.vals[targetName]; !ok { p.logger.Error("unknown target", "target", targetName) continue } if e.Tags == nil { e.Tags = make(map[string]string) } for k, v := range p.vals[targetName].vals { e.Tags[k] = v } } return event } func (p *gNMIGetProcessor) Close() error { return nil } func (p *gNMIGetProcessor) WithActions(act map[string]map[string]interface{}) { p.actionsDefinitions = act } func (p *gNMIGetProcessor) WithTargets(tcs map[string]*types.TargetConfig) { p.targetsConfigs = tcs } func (p *gNMIGetProcessor) WithProcessors(procs map[string]map[string]any) { p.processorsDefinitions = procs } func (p *gNMIGetProcessor) WithLogger(l *log.Logger) { } func (p *gNMIGetProcessor) setupLogger() { p.logger = hclog.New(&hclog.LoggerOptions{ Output: os.Stderr, TimeFormat: "2006/01/02 15:04:05.999999", }) if p.Debug { p.logger.SetLevel(hclog.Debug) } } func (p *gNMIGetProcessor) readPaths(e *formatters.EventMsg) (string, error) { now := time.Now() var err error b := new(bytes.Buffer) switch { case p.prefixTargetTpl != nil: err = p.prefixTargetTpl.Execute(b, e) case p.targetTpl != nil: err = p.targetTpl.Execute(b, e) } if err != nil { return "", err } targetName := b.String() _, ok := p.vals[targetName] if !ok { p.vals[targetName] = &readValues{ vals: map[string]string{}, } } if p.vals[targetName].lastRead.After(now.Add(-p.ReadPeriod)) { return targetName, nil } // vals, err := p.gnmiGet(targetName, e) if err != nil { return "", err } p.logger.Debug("vals from get", "vals", vals) p.vals[targetName].vals = vals p.vals[targetName].lastRead = time.Now() return targetName, nil } func (p *gNMIGetProcessor) gnmiGet(targetName string, e *formatters.EventMsg) (map[string]string, error) { tc, err := p.selectTarget(targetName) if err != nil { return nil, err } t := target.NewTarget(tc) req, keyPathMapping, err := p.createGetRequest(e) if err != nil { return nil, err } p.logger.Debug("keyPathMapping", "mapping", keyPathMapping) ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) defer cancel() err = t.CreateGNMIClient(ctx) if err != nil { return nil, err } defer t.Close() resp, err := t.Get(ctx, req) if err != nil { return nil, fmt.Errorf("target %q GetRequest failed: %v", t.Config.Name, err) } return p.extractTags(resp, keyPathMapping), nil } func (p *gNMIGetProcessor) selectTarget(tName string) (*types.TargetConfig, error) { if tName == "" { return nil, fmt.Errorf("target name is empty") } if p.prefixTargetTpl != nil { tc := &types.TargetConfig{ Name: p.Target, Address: p.Target, SkipVerify: pointer.ToBool(p.SkipVerify), Timeout: 10 * time.Second, } if !p.SkipVerify { tc.Insecure = pointer.ToBool(true) } return tc, nil } if tc, ok := p.targetsConfigs[tName]; ok { return tc, nil } return nil, fmt.Errorf("unknown target %s", tName) } func (p *gNMIGetProcessor) createGetRequest(e *formatters.EventMsg) (*gnmi.GetRequest, map[string]string, error) { gnmiOpts := make([]api.GNMIOption, 0, 3) gnmiOpts = append(gnmiOpts, api.Encoding(p.Encoding)) gnmiOpts = append(gnmiOpts, api.DataType(p.Type)) var err error b := new(bytes.Buffer) if p.prefixTargetTpl != nil { err = p.prefixTargetTpl.Execute(b, e) if err != nil { return nil, nil, fmt.Errorf("prefix-target parse error: %v", err) } ps := b.String() gnmiOpts = append(gnmiOpts, api.Target(ps)) } pathToKey := map[string]string{} for _, ptt := range p.Paths { b.Reset() err = ptt.pathTpl.Execute(b, e) if err != nil { return nil, nil, fmt.Errorf("path parse error: %v", err) } ps := b.String() gnmiOpts = append(gnmiOpts, api.Path(ps)) pathToKey[ps] = ptt.TagName } req, err := api.NewGetRequest(gnmiOpts...) if err != nil { return nil, nil, err } return req, pathToKey, nil } func (p *gNMIGetProcessor) extractTags(rsp *gnmi.GetResponse, mapping map[string]string) map[string]string { rs := map[string]string{} for _, n := range rsp.GetNotification() { for _, upd := range n.GetUpdate() { xp := gpath.GnmiPathToXPath(upd.GetPath(), false) p.logger.Debug("path", "xp", xp, "v", upd.GetVal()) if k, ok := mapping[xp]; ok { rs[k] = extractValue(upd.GetVal()) } } } return rs } func extractValue(tv *gnmi.TypedValue) string { switch tv.Value.(type) { case *gnmi.TypedValue_AsciiVal: return tv.GetAsciiVal() case *gnmi.TypedValue_BoolVal: return fmt.Sprintf("%t", tv.GetBoolVal()) case *gnmi.TypedValue_BytesVal: return string(tv.GetBytesVal()) case *gnmi.TypedValue_DecimalVal: //lint:ignore SA1019 still need DecimalVal for backward compatibility v := tv.GetDecimalVal() f := float64(v.Digits) / math.Pow10(int(v.Precision)) return strconv.FormatFloat(f, 'e', -1, 64) case *gnmi.TypedValue_FloatVal: //lint:ignore SA1019 still need GetFloatVal for backward compatibility return strconv.FormatFloat(float64(tv.GetFloatVal()), 'e', -1, 64) case *gnmi.TypedValue_DoubleVal: return strconv.FormatFloat(tv.GetDoubleVal(), 'e', -1, 64) case *gnmi.TypedValue_IntVal: return strconv.Itoa(int(tv.GetIntVal())) case *gnmi.TypedValue_StringVal: return tv.GetStringVal() case *gnmi.TypedValue_UintVal: return strconv.Itoa(int(tv.GetUintVal())) case *gnmi.TypedValue_LeaflistVal: // TODO: case *gnmi.TypedValue_ProtoBytes: return string(tv.GetProtoBytes()) // ? case *gnmi.TypedValue_AnyVal: return string(tv.GetAnyVal().GetValue()) // ? case *gnmi.TypedValue_JsonIetfVal: jsondata := tv.GetJsonIetfVal() return string(jsondata) case *gnmi.TypedValue_JsonVal: jsondata := tv.GetJsonVal() return string(jsondata) } return "" } func main() { logger := hclog.New(&hclog.LoggerOptions{ Output: os.Stderr, DisableTime: true, }) logger.Info("starting plugin processor", "name", processorType) plug := &gNMIGetProcessor{ m: new(sync.RWMutex), vals: make(map[string]*readValues), } plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "GNMIC_PLUGIN", MagicCookieValue: "gnmic", }, Plugins: map[string]plugin.Plugin{ processorType: &event_plugin.EventProcessorPlugin{Impl: plug}, }, Logger: logger, }) } ================================================ FILE: examples/plugins/go-event-plugin/event-go-plugin.go ================================================ package main import ( "log" "os" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/formatters/event_plugin" ) const ( processorType = "event-go-plugin" loggingPrefix = "[" + processorType + "] " ) type goSampleProcessorPlugin struct { formatters.BaseProcessor Debug bool `mapstructure:"debug,omitempty" yaml:"debug,omitempty" json:"debug,omitempty"` targetsConfigs map[string]*types.TargetConfig actionsDefinitions map[string]map[string]interface{} processorsDefinitions map[string]map[string]any logger hclog.Logger } func (p *goSampleProcessorPlugin) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } for _, o := range opts { o(p) } // initialize logger p.logger.Info("initializing", "processor", processorType, "cfg", cfg) // initialize your processor's config and handle the options // - set default // - validate config p.logger.Info("initialized", "processor", processorType) return nil } func (p *goSampleProcessorPlugin) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg { // apply the processor's logic here // return the new/modified event messages return event } func (p *goSampleProcessorPlugin) Close() error { return nil } func (p *goSampleProcessorPlugin) WithActions(act map[string]map[string]interface{}) { p.actionsDefinitions = act } func (p *goSampleProcessorPlugin) WithTargets(tcs map[string]*types.TargetConfig) { p.targetsConfigs = tcs } func (p *goSampleProcessorPlugin) WithProcessors(procs map[string]map[string]any) { p.processorsDefinitions = procs } func (p *goSampleProcessorPlugin) WithLogger(l *log.Logger) { } func main() { logger := hclog.New(&hclog.LoggerOptions{ Output: os.Stderr, DisableTime: true, }) logger.Info("starting plugin processor", "name", processorType) plug := &goSampleProcessorPlugin{} plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "GNMIC_PLUGIN", MagicCookieValue: "gnmic", }, Plugins: map[string]plugin.Plugin{ processorType: &event_plugin.EventProcessorPlugin{Impl: plug}, }, Logger: logger, }) } ================================================ FILE: examples/plugins/minimal/event-my-processor.go ================================================ package main import ( "log" "os" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/formatters/event_plugin" ) const ( // TODO: Choose a name for your processor processorType = "event-my-processor" ) type myProcessor struct { formatters.BaseProcessor // TODO: Add your config struct fields here } func (p *myProcessor) Init(cfg interface{}, opts ...formatters.Option) error { // decode the plugin config err := formatters.DecodeConfig(cfg, p) if err != nil { return err } // apply options for _, o := range opts { o(p) } // TODO: Other initialization steps... return nil } func (p *myProcessor) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg { // TODO: The processor's logic is applied here return event } func (p *myProcessor) Close() error { return nil } func (p *myProcessor) WithActions(act map[string]map[string]interface{}) { } func (p *myProcessor) WithTargets(tcs map[string]*types.TargetConfig) { } func (p *myProcessor) WithProcessors(procs map[string]map[string]any) { } func (p *myProcessor) WithLogger(l *log.Logger) { } func main() { logger := hclog.New(&hclog.LoggerOptions{ Output: os.Stderr, DisableTime: true, }) logger.Info("starting plugin processor", "name", processorType) // TODO: Create and initialize your processor's struct plug := &myProcessor{} // start it plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "GNMIC_PLUGIN", MagicCookieValue: "gnmic", }, Plugins: map[string]plugin.Plugin{ processorType: &event_plugin.EventProcessorPlugin{Impl: plug}, }, Logger: logger, }) } ================================================ FILE: examples/set-request-templates/Nokia/SRL/1.interfaces/interfaces_template.gotmpl ================================================ {{ $target := index .Vars .TargetName }} updates: {{- range $interface := $target.interfaces }} - path: "/interface[name={{ $interface.name }}]" encoding: "json_ietf" value: admin-state: {{ $interface.admin_state | default "disable" }} description: {{ $interface.description | default "" }} {{- if $interface.mtu }} mtu: {{ $interface.mtu }} {{- end }} {{- if $interface.vlan_tagging }} vlan-tagging: {{ $interface.vlan_tagging }} {{- end }} {{- if $ethernet := $interface.ethernet }} ethernet: {{- if $ethernet.aggregate_id }} aggregate-id: {{ $ethernet.aggregate_id }} {{- end }} {{- if $ethernet.auto_negotiate }} auto-negotiate: {{ $ethernet.auto_negotiate }} {{- end }} {{- if $ethernet.duplex_mode }} duplex-mode: {{ $ethernet.duplex_mode }} {{- end }} {{- if $ethernet.flow_control.receive }} flow-control: receive: {{ $ethernet.flow_control.receive }} {{- end }} {{- end }} {{- if $interface.lag }} lag: {{- if $interface.lag.lag_type }} lag-type: {{ $interface.lag.lag_type }} {{- end }} {{- if $interface.lag.min_links }} min-links: {{ $interface.lag.min_links }} {{- end }} {{- if $interface.lag.member_speed }} member-speed: {{ $interface.lag.member_speed }} {{- end }} {{- if $interface.lag.lacp_fallback_mode }} lacp-fallback-mode: {{ $interface.lag.lacp_fallback_mode }} {{- end }} {{- if $interface.lag.lacp_fallback_timeout }} lacp-fallback-timeout: {{ $interface.lag.lacp_fallback_timeout }} {{- end }} {{- if $interface.lag.lag_speed }} lag-speedt: {{ $interface.lag.lag_speed }} {{- end }} {{- end }} {{- end }} ================================================ FILE: examples/set-request-templates/Nokia/SRL/1.interfaces/interfaces_template_vars.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 node1: interfaces: - name: # ethernet-1/1 admin_state: # "enable" | "disable" description: vlan_tagging: # true || false mtu: loopback-mode: # true || false ethernet: aggregate_id: auto_negotiate: duplex_mode: flow_control: receive: # true || false lag: lag_type: min_links: member_speed: lacp_fallback_mode: lacp_fallback_timeout: lag_speed: subinterface: - admin_state: # "enable" | "disable" ipv4_address: ipv6_address: vlan_id: untagged: {} acl: input: ipv4_filter: ipv6_filter: output: ipv4_filter: ipv6_filter: ================================================ FILE: examples/set-request-templates/Nokia/SRL/1.interfaces/subinterfaces_template.gotmpl ================================================ updates: {{ $target := index .Vars .TargetName }} {{- range $interface := $target.interfaces }} {{- range $idx, $subinterface := $interface.subinterface }} - path: "/interface[name={{ $interface.name }}]/subinterface[index={{ $idx }}]" encoding: "json_ietf" value: admin-state: {{ $subinterface.admin_state | default "disable" }} {{- if $subinterface.type }} type: {{ $subinterface.type }} {{- end }} {{- if $subinterface.description }} description: {{ $subinterface.description }} {{- end }} {{- if $subinterface.ip_mtu }} ip-mtu: {{ $subinterface.ip_mtu }} {{- end }} {{- if $subinterface.ipv4_address }} ipv4: address: - ip-prefix: {{ $subinterface.ipv4_address }} {{- end }} {{- if $subinterface.ipv6_address }} ipv6: address: - ip-prefix: {{ $subinterface.ipv6_address }} {{- end }} {{- if $subinterface.vlan_id }} vlan: encap: single-tagged: vlan-id: {{ $subinterface.vlan_id }} {{- else if $subinterface.untagged }} vlan: encap: untagged: {} {{- end }} {{- if $acl := $subinterface.acl }} acl: {{- if $input := $acl.input }} input: {{- if $input.ipv4_filter }} ipv4-filter: {{ $input.ipv4_filter }} {{- end }} {{- if $input.ipv6_filter }} ipv6-filter: {{ $acl.input.ipv6_filter }} {{- end }} {{- end }} {{- if $output := $acl.output }} output: {{- if $output.ipv4_filter }} ipv4-filter: {{ $output.ipv4_filter }} {{- end }} {{- if $output.ipv6_filter }} ipv6-filter: {{ $output.ipv6_filter }} {{- end }} {{- end }} {{- end }} {{- end }} {{- end }} ================================================ FILE: examples/set-request-templates/Nokia/SRL/2.network-instance/network_instance_bgp_evpn_template.gotmpl ================================================ replaces: {{- range $netins := index .Vars .TargetName "network-instances" }} {{- if $bgpevpn := index $netins "protocols" "bgp-evpn" }} - path: "/network-instance[name={{ index $netins "name" }}/protocols/bgp-evpn]" encoding: json_ietf value: bgp-instance - id: 1 admin-state: {{ index $bgpevpn "admin-state" | default "disable" }} default-admin-tag: {{ index $bgpevpn "default-admin-tag" | default 0 }} encapsulation-type: {{ index $bgpevpn "encapsulation-type" | default "vxlan" }} {{- if index $bgpevpn "vxlan-interface" }} vxlan-interface: {{ index $bgpevpn "vxlan-interface" }} {{- end }} {{- if index $bgpevpn "evi" }} evi: {{ index $bgpevpn "evi" }} {{- end }} ecmp: {{ index $bgpevpn "ecmp" | default 1 }} {{- if $routes := index $bgpevpn "routes" }} routes: {{- if $routetable := index $routes "route-table" }} route-table: mac-ip: advertise-gateway-mac: {{ index $routetable "mac-ip" "advertise-gateway-mac" | default false }} {{- end }} {{- if $bridgetable := index $routes "bridge-table" }} bridge-table: mac-ip: advertise: {{ index $bridgetable "mac-ip" "advertise" | default false }} inclusive-mcast: advertise: {{ index $bridgetable "inclusive-mcast" "advertise" | default true }} {{- if index $bridgetable "inclusive-mcast" "originating-ip" }} originating-ip: {{ index $bridgetable "inclusive-mcast" "originating-ip" }} {{- end }} {{- if index $bridgetable "next-hop" }} next-hop: {{ index $bridgetable "next-hop" }} {{- end }} {{- end }} {{- end }} {{- end }} {{- end }} ================================================ FILE: examples/set-request-templates/Nokia/SRL/2.network-instance/network_instance_bgp_template.gotmpl ================================================ replaces: {{- range $netins := index .Vars .TargetName "network-instances" }} {{- if $bgp := index $netins "protocols" "bgp" }} - path: "/network-instance[name={{ index $netins "name" }}/protocols/bgp]" encoding: json_ietf value: admin-state: {{ index $bgp "admin-state" | default "disable" }} {{- end }} {{- end }} ================================================ FILE: examples/set-request-templates/Nokia/SRL/2.network-instance/network_instance_bgp_vpn_template.gotmpl ================================================ replaces: {{- range $netins := index .Vars .TargetName "network-instances" }} {{- if $bgpvpn := index $netins "protocols" "bgp-vpn" }} - path: "/network-instance[name={{ index $netins "name" }}/protocols/bgp-vpn]" encoding: json_ietf value: bgp-instance: {{- range $idx, $bgpins := $bgpvpn}} - id: {{ $idx }} admin-state: {{ index $bgpvpn "admin-state" | default "disable" }} default-admin-tag: {{ index $bgpvpn "default-admin-tag" | default 0 }} {{- end }} {{- end }} {{- end }} ================================================ FILE: examples/set-request-templates/Nokia/SRL/2.network-instance/network_instance_template.gotmpl ================================================ replaces: {{- range $netins := index .Vars .TargetName "network-instances" }} - path: "/network-instance[name={{ index $netins "name" }}]" encoding: json_ietf value: admin-state: {{ index $netins "admin-state" | default "disable" }} description: {{ index $netins "description" | default "" }} type: {{ index $netins "type" | default "default" }} {{- if index $netins "path-mtu-disovery" }} mtu: path-mtu-discovery: {{ index $netins "path-mtu-disovery" }} {{- end }} {{- if index $netins "router-id" }} router-id: {{ index $netins "router-id" }} {{- end }} {{ $interfaces := index $netins "interfaces" }} {{- if ne (len $interfaces) 0 }} interface: {{- range $interface := $interfaces }} - name: {{ $interface }} {{- end }} {{- end }} {{- if index $netins "vxlan-interface" }} vxlan-interface: {{ index $netins "vxlan-interface" }} {{- end }} {{- if index $netins "ip-forwarding" }} ip-forwarding: receive-ipv4-check: {{ index $netins "ip-forwarding" "receive-ipv4-check" | default false }} receive-ipv6-check: {{ index $netins "ip-forwarding" "receive-ipv6-check" | default false }} {{- end }} {{ $protocols := index $netins "protocols" }} {{- if ne (len $protocols) 0 }} protocols: {{- end }} {{- end }} ================================================ FILE: examples/set-request-templates/Nokia/SRL/2.network-instance/network_instance_template_vars.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 node1:57400: network-intances: - name: "" admin-state: enable aggregate-routes: route: - admin-state: enable aggregator: address: "" as-number: "" communities: add: "" generate-icmp: "" prefix: "" summary-only: "false" bridge-table: discard-unknown-dest-mac: "false" mac-duplication: action: stop-learning admin-state: enable hold-down-time: "9" monitoring-window: "3" num-moves: "5" mac-learning: admin-state: enable aging: admin-state: enable age-time: "300" mac-limit: maximum-entries: "250" warning-threshold-pct: "95" protect-anycast-gw-mac: "false" static-mac: mac: - address: "" destination: "" description: "" interface: - name: "" ip-forwarding: receive-ipv4-check: "" receive-ipv6-check: "" ip-load-balancing: resilient-hash-prefix: - hash-buckets-per-path: "1" ip-prefix: "" max-paths: "1" mpls: admin-state: disable static-mpls-entry: - collect-stats: "false" next-hop-group: "" operation: swap preference: "5" top-label: "" ttl-propagation: "false" mtu: path-mtu-discovery: "" next-hop-groups: group: - admin-state: enable blackhole: generate-icmp: "false" name: "" nexthop: - admin-state: enable failure-detection: enable-bfd: local-address: "" local-discriminator: "" remote-discriminator: "" index: "" ip-address: "" pushed-mpls-label-stack: "" resolve: "true" protocols: bgp: admin-state: enable as-path-options: allow-own-as: "0" remove-private-as: ignore-peer-as: "false" leading-only: "false" mode: disabled authentication: keychain: "" autonomous-system: "" convergence: min-wait-to-advertise: "0" dynamic-neighbors: accept: match: - allowed-peer-as: "" peer-group: "" prefix: "" max-sessions: "0" ebgp-default-policy: export-reject-all: "true" import-reject-all: "true" evpn: admin-state: disable advertise-ipv6-next-hops: "false" keep-all-routes: "" rapid-update: "false" export-policy: "" failure-detection: enable-bfd: "false" fast-failover: "true" graceful-restart: admin-state: disable stale-routes-time: "360" group: - admin-state: enable as-path-options: allow-own-as: "" remove-private-as: ignore-peer-as: "false" leading-only: "false" mode: "" replace-peer-as: "" authentication: keychain: "" description: "" evpn: admin-state: "" advertise-ipv6-next-hops: "" prefix-limit: max-received-routes: "4294967295" warning-threshold-pct: "90" export-policy: "" failure-detection: enable-bfd: "" fast-failover: "" graceful-restart: admin-state: "" stale-routes-time: "" group-name: "" import-policy: "" ipv4-unicast: admin-state: "" advertise-ipv6-next-hops: "" prefix-limit: max-received-routes: "4294967295" warning-threshold-pct: "90" receive-ipv6-next-hops: "" ipv6-unicast: admin-state: "" prefix-limit: max-received-routes: "4294967295" warning-threshold-pct: "90" local-as: - as-number: "" prepend-global-as: "true" prepend-local-as: "true" local-preference: "" next-hop-self: "false" peer-as: "" route-reflector: client: "" cluster-id: "" send-community: large: "" standard: "" send-default-route: export-policy: "" ipv4-unicast: "false" ipv6-unicast: "false" timers: connect-retry: "120" hold-time: "90" keepalive-interval: "" minimum-advertisement-interval: "5" trace-options: flag: - modifier: "" name: "" transport: local-address: "" passive-mode: "false" tcp-mss: "" import-policy: "" ipv4-unicast: admin-state: enable advertise-ipv6-next-hops: "false" convergence: max-wait-to-advertise: "0" multipath: allow-multiple-as: "true" max-paths-level-1: "1" max-paths-level-2: "1" receive-ipv6-next-hops: "false" ipv6-unicast: admin-state: disable convergence: max-wait-to-advertise: "0" multipath: allow-multiple-as: "true" max-paths-level-1: "1" max-paths-level-2: "1" local-preference: "100" neighbor: - admin-state: enable as-path-options: allow-own-as: "" remove-private-as: ignore-peer-as: "false" leading-only: "false" mode: "" replace-peer-as: "" authentication: keychain: "" description: "" evpn: admin-state: "" advertise-ipv6-next-hops: "" prefix-limit: max-received-routes: "" warning-threshold-pct: "" export-policy: "" failure-detection: enable-bfd: "" fast-failover: "" graceful-restart: admin-state: "" stale-routes-time: "" warm-restart: admin-state: "" import-policy: "" ipv4-unicast: admin-state: "" advertise-ipv6-next-hops: "" prefix-limit: max-received-routes: "" warning-threshold-pct: "" receive-ipv6-next-hops: "" ipv6-unicast: admin-state: "" prefix-limit: max-received-routes: "" warning-threshold-pct: "" local-as: - as-number: "" prepend-global-as: "" prepend-local-as: "" local-preference: "" next-hop-self: "" peer-address: "" peer-as: "" peer-group: "" route-reflector: client: "" cluster-id: "" send-community: large: "" standard: "" send-default-route: export-policy: "" ipv4-unicast: "" ipv6-unicast: "" timers: connect-retry: "" hold-time: "" keepalive-interval: "" minimum-advertisement-interval: "" trace-options: flag: - modifier: "" name: "" transport: local-address: "" passive-mode: "" tcp-mss: "" preference: ebgp: "170" ibgp: "170" route-advertisement: rapid-withdrawal: "false" wait-for-fib-install: "true" route-reflector: client: "false" cluster-id: "" router-id: "" send-community: large: "true" standard: "true" trace-options: flag: - modifier: "" name: "" transport: tcp-mss: "1024" bgp-evpn: bgp-instance: - admin-state: enable default-admin-tag: "0" ecmp: "1" encapsulation-type: vxlan evi: "" id: "" routes: bridge-table: inclusive-mcast: advertise: "true" originating-ip: "" mac-ip: advertise: "true" next-hop: use-system-ipv4-address route-table: mac-ip: advertise-gateway-mac: "false" vxlan-interface: "" bgp-vpn: bgp-instance: - export-policy: "" id: "" import-policy: "" route-distinguisher: rd: "" route-target: export-rt: "" import-rt: "" directly-connected: te-database-install: bgp-ls: bgp-ls-identifier: "" igp-identifier: "" isis: instance: - admin-state: disable attached-bit: ignore: "false" suppress: "false" authentication: csnp-authentication: "" hello-authentication: "" keychain: "" psnp-authentication: "" auto-cost: reference-bandwidth: "" export-policy: "" graceful-restart: helper-mode: "false" inter-level-propagation-policies: level1-to-level2: summary-address: - ip-prefix: "" route-tag: "" interface: - admin-state: enable authentication: hello-authentication: "" keychain: "" circuit-type: "" hello-padding: disable interface-name: "" ipv4-unicast: admin-state: enable enable-bfd: "false" include-bfd-tlv: "false" ipv6-unicast: admin-state: enable enable-bfd: "false" include-bfd-tlv: "false" ldp-synchronization: disable: "" end-of-lib: "" hold-down-timer: "" level: - authentication: keychain: "" disable: "false" ipv6-unicast-metric: "" level-number: "" metric: "" priority: "64" timers: hello-interval: "9" hello-multiplier: "3" passive: "false" timers: csnp-interval: "10" lsp-pacing-interval: "100" trace-options: trace: "" ipv4-unicast: admin-state: enable ipv6-unicast: admin-state: enable multi-topology: "false" ldp-synchronization: end-of-lib: "false" hold-down-timer: "60" level: - authentication: csnp-authentication: "" hello-authentication: "" keychain: "" psnp-authentication: "" bgp-ls-exclude: "false" level-number: "" metric-style: wide route-preference: external: "" internal: "" trace-options: trace: "" level-capability: L2 max-ecmp-paths: "1" name: "" net: "" overload: advertise-external: "false" advertise-interlevel: "false" immediate: max-metric: "false" set-bit: "false" on-boot: max-metric: "" set-bit: "" timeout: "" poi-tlv: "false" te-database-install: bgp-ls: bgp-ls-identifier: "" igp-identifier: "" timers: lsp-generation: initial-wait: "10" max-wait: "5000" second-wait: "1000" lsp-lifetime: "1200" lsp-refresh: half-lifetime: "true" interval: "600" spf: initial-wait: "1000" max-wait: "10000" second-wait: "1000" trace-options: trace: "" traffic-engineering: advertisement: "false" legacy-link-attribute-advertisement: "true" transport: lsp-mtu-size: "1492" ldp: admin-state: disable discovery: interfaces: hello-holdtime: "15" hello-interval: "5" interface: - admin-state: "" hello-holdtime: "15" hello-interval: "5" ipv4: admin-state: enable name: "" dynamic-label-block: "" graceful-restart: helper-enable: "false" max-reconnect-time: "120" max-recovery-time: "120" ipv4: fec-resolution: longest-prefix: "false" multipath: max-paths: "" peers: peer: - ipv4: fec-limit: "" label-space-id: "" lsr-id: "" tcp-transport: authentication: keychain: "" session-keepalive-holdtime: "180" session-keepalive-interval: "60" tcp-transport: authentication: keychain: "" trace-options: interface: - name: "" peer: - label-space-id: "" lsr-id: "" linux: export-neighbors: "true" export-routes: "false" import-routes: "false" ospf: instance: - address-family: "" admin-state: disable advertise-router-capability: "" area: - advertise-router-capability: "true" area-id: "" area-range: - advertise: "true" ip-prefix-mask: "" bgp-ls-exclude: "false" blackhole-aggregate: "true" export-policy: "" interface: - admin-state: enable advertise-router-capability: "true" advertise-subnet: "true" authentication: keychain: "" dead-interval: "40" failure-detection: enable-bfd: "false" hello-interval: "10" interface-name: "" interface-type: "" lsa-filter-out: none metric: "" mtu: "" passive: "" priority: "1" retransmit-interval: "5" trace-options: trace: adjacencies: "" interfaces: "" packet: detail: "" modifier: "" type: "" transit-delay: "1" nssa: area-range: - advertise: "true" ip-prefix-mask: "" originate-default-route: adjacency-check: "true" type-nssa: "false" redistribute-external: "" summaries: "" stub: default-metric: "1" summaries: "" asbr: trace-path: none export-limit: log-percent: "" number: "" export-policy: "" external-db-overflow: interval: "0" limit: "0" external-preference: "150" graceful-restart: helper-mode: "false" strict-lsa-checking: "false" instance-id: "" max-ecmp-paths: "1" name: "" overload: active: "false" overload-include-ext-1: "false" overload-include-ext-2: "false" overload-include-stub: "false" overload-on-boot: timeout: "60" rtr-adv-lsa-limit: log-only: "" max-lsa-count: "" overload-timeout: "" warning-threshold: "0" preference: "10" reference-bandwidth: "400000000" router-id: "" te-database-install: bgp-ls: bgp-ls-identifier: "" igp-identifier: "" timers: incremental-spf-wait: "1000" lsa-accumulate: "1000" lsa-arrival: "1000" lsa-generate: lsa-initial-wait: "5000" lsa-second-wait: "5000" max-lsa-wait: "5000" redistribute-delay: "1000" spf-wait: spf-initial-wait: "1000" spf-max-wait: "10000" spf-second-wait: "1000" trace-options: trace: adjacencies: "" graceful-restart: "" interfaces: "" lsdb: link-state-id: "" router-id: "" type: "" misc: "" packet: detail: "" modifier: "" type: "" routes: dest-address: "" spf: dest-address: "" traffic-engineering: advertisement: "false" legacy-link-attribute-advertisement: "true" version: "" router-id: "" static-routes: route: - admin-state: enable metric: "1" next-hop-group: "" preference: "5" prefix: "" traffic-engineering: admin-groups: group: - bit-position: "" name: "" autonomous-system: "" interface: - admin-group: "" delay: static: "" interface-name: "" srlg-membership: "" te-metric: "" ipv4-te-router-id: "" ipv6-te-router-id: "" shared-risk-link-groups: group: - cost: "" name: "" static-member: - from-address: "" to-address: "" value: "" type: default vxlan-interface: - name: "" ================================================ FILE: examples/set-request-templates/Nokia/SRL/3.acl/acl_template.gotmpl ================================================ replaces: - path: "/acl" encoding: "json_ietf" value: {{ index .Vars "acl" }} ================================================ FILE: examples/set-request-templates/Nokia/SRL/3.acl/acl_template_vars.yaml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 acl: capture-filter: ipv4-filter: statistics-per-entry: # true | false entry: - sequence-id: description: match: source-ip: prefix: address: mask: destination-ip: prefix: address: mask: source-port: operator: value: range: start: end: destination-port: operator: value: range: start: end: icmp: type: code: [] tcp-flags: protocol: fragment: first-fragment: action: accept: {} copy: {} ipv6-filter: statistics-per-entry: # true | false entry: - sequence-id: description: match: source-ip: prefix: address: mask: destination-ip: prefix: address: mask: icmp6: type: code: [] next-header: protocol: fragment: first-fragment: action: accept: {} drop: {} cpm-filter: ipv4-filter: statistics-per-entry: # true | false entry: - sequence-id: description: match: source-ip: prefix: address: mask: destination-ip: prefix: address: mask: source-port: operator: value: range: start: end: destination-port: operator: value: range: start: end: icmp: type: code: [] tcp-flags: protocol: fragment: first-fragment: action: accept: log: # true | false rate-limit: distributed-policer: system-cpu-policer: drop: log: # true | false ipv6-filter: statistics-per-entry: # true | false entry: - sequence-id: description: match: source-ip: prefix: address: mask: destination-ip: prefix: address: mask: icmp6: type: code: [] next-header: protocol: fragment: first-fragment: action: accept: log: # true | false rate-limit: distributed-policer: system-cpu-policer: drop: log: # true | false ipv4-filter: - name: "" description: subinterface-specific: statistics-per-entry: # true | false entry: - sequence-id: description: action: accept: log: # true | false drop: log: # true | false match: source-ip: prefix: address: mask: destination-ip: prefix: address: mask: source-port: operator: value: range: start: end: destination-port: operator: value: range: start: end: icmp: type: code: [] tcp-flags: protocol: fragment: first-fragment: ipv6-filter: - name: "" description: subinterface-specific: statistics-per-entry: # true | false entry: - sequence-id: description: action: accept: log: # true | false drop: log: # true | false match: source-ip: prefix: address: mask: destination-ip: prefix: address: mask: source-port: operator: value: range: start: end: destination-port: operator: value: range: start: end: icmp6: type: code: [] next-header: tcp-flags: protocol: fragment: first-fragment: policers: policer: - name: "" entry-specific: # true | false peak-rate: max-burst: system-cpu-policer: - name: "" entry-specific: # true | false peak-packet-rate: max-packet-burst: system-filter: ipv4-filter: entry: - sequence-id: description: action: accept: drop: log: # true | false match: source-ip: prefix: address: mask: destination-ip: prefix: address: mask: source-port: operator: value: range: start: end: destination-port: operator: value: range: start: end: icmp: type: code: [] tcp-flags: protocol: fragment: first-fragment: ipv6-filter: entry: - sequence-id: description: action: accept: drop: log: # true | false match: source-ip: prefix: address: mask: destination-ip: prefix: address: mask: source-port: operator: value: range: start: end: destination-port: operator: value: range: start: end: icmp6: type: code: [] next-header: tcp-flags: protocol: fragment: first-fragment: tcam-profile: # default | ipv4-egress-scaled ================================================ FILE: go.mod ================================================ module github.com/openconfig/gnmic go 1.24.12 replace github.com/openconfig/gnmic/pkg/api v0.1.11 => ./pkg/api replace github.com/openconfig/gnmic/pkg/cache v0.1.3 => ./pkg/cache require ( github.com/IBM/sarama v1.46.3 github.com/adrg/xdg v0.5.3 github.com/c-bata/go-prompt v0.2.6 github.com/docker/docker v28.5.1+incompatible github.com/fsnotify/fsnotify v1.9.0 github.com/fullstorydev/grpcurl v1.9.3 github.com/go-redsync/redsync/v4 v4.13.0 github.com/go-resty/resty/v2 v2.16.5 github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 github.com/gorilla/handlers v1.5.2 github.com/gorilla/mux v1.8.1 github.com/gosnmp/gosnmp v1.42.1 github.com/grafana/pyroscope-go v1.2.7 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/guptarohit/asciigraph v0.7.3 github.com/hairyhenderson/gomplate/v3 v3.11.8 github.com/hairyhenderson/yaml v0.0.0-20220618171115-2d35fca545ce github.com/hashicorp/consul/api v1.32.0 github.com/hashicorp/go-plugin v1.7.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/huandu/xstrings v1.5.0 github.com/influxdata/influxdb-client-go/v2 v2.14.0 github.com/itchyny/gojq v0.12.14 github.com/jellydator/ttlcache/v3 v3.4.0 github.com/jhump/protoreflect v1.17.0 github.com/jlaffaye/ftp v0.2.0 github.com/karimra/go-map-flattener v0.0.1 github.com/karimra/sros-dialout v0.0.0-20260117201857-18e893af823c github.com/manifoldco/promptui v0.9.0 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/mapstructure v1.5.0 github.com/nats-io/nats.go v1.49.0 github.com/nsf/termbox-go v1.1.1 github.com/olekukonko/tablewriter v0.0.5 github.com/openconfig/gnmi v0.14.1 github.com/openconfig/gnmic/pkg/api v0.1.11 github.com/openconfig/gnmic/pkg/cache v0.1.3 github.com/openconfig/goyang v1.6.3 github.com/openconfig/ygot v0.34.0 github.com/pkg/sftp v1.13.9 github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_model v0.6.2 github.com/prometheus/prometheus v0.306.0 github.com/redis/go-redis/v9 v9.14.0 github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.11.1 github.com/xdg/scram v1.0.5 github.com/zestor-dev/zestor v0.0.2 go.opentelemetry.io/proto/otlp v1.8.0 go.starlark.net v0.0.0-20260102030733-3fee463870c9 golang.org/x/crypto v0.48.0 golang.org/x/oauth2 v0.34.0 golang.org/x/sync v0.19.0 google.golang.org/grpc v1.79.3 google.golang.org/protobuf v1.36.11 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.32.3 k8s.io/apimachinery v0.32.3 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) require ( bitbucket.org/creachadair/stringset v0.0.14 // indirect cel.dev/expr v0.25.1 // indirect cloud.google.com/go/auth v0.16.2 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect cloud.google.com/go/iam v1.5.2 // indirect cloud.google.com/go/monitoring v1.24.2 // indirect dario.cat/mergo v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 // indirect github.com/Knetic/govaluate v3.0.0+incompatible // indirect github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.14 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.6 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.5 // indirect github.com/aws/aws-sdk-go-v2/service/s3 v1.26.10 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect github.com/bcicen/bfstree v1.0.0 // indirect github.com/bufbuild/protocompile v0.14.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/clipperhouse/stringish v0.1.1 // indirect github.com/clipperhouse/uax29/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.6.3 // indirect github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/derekparker/trie v0.0.0-20230829180723-39f4de51ef7d // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-redis/redis/v8 v8.11.5 // indirect github.com/gomodule/redigo v2.0.0+incompatible // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-tpm v0.9.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hairyhenderson/go-fsimpl v0.0.0-20220529183339-9deae3e35047 // indirect github.com/hashicorp/go-msgpack v1.1.5 // indirect github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 // indirect github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/yamux v0.1.2 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/juju/ratelimit v1.0.2 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nats-io/jwt/v2 v2.8.0 // indirect github.com/oapi-codegen/runtime v1.0.0 // indirect github.com/oklog/run v1.2.0 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/skeema/knownhosts v1.3.1 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zealic/xignore v0.3.3 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect go.opentelemetry.io/otel/sdk v1.40.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect go.opentelemetry.io/otel/trace v1.40.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa // indirect golang.org/x/term v0.40.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) require ( cloud.google.com/go v0.120.0 // indirect cloud.google.com/go/storage v1.50.0 // indirect github.com/AlekSi/pointer v1.2.0 github.com/Masterminds/goutils v1.1.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.1.6 // indirect github.com/Shopify/ejson v1.3.3 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/aws/aws-sdk-go v1.55.7 // indirect github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect github.com/aws/aws-sdk-go-v2/config v1.29.14 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect github.com/aws/smithy-go v1.22.2 // indirect github.com/bcicen/go-units v1.0.3 github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/docker/libkv v0.2.2-0.20180912205406-458977154600 // indirect github.com/dustin/gojson v0.0.0-20160307161227-2e71ec9dd5ad // indirect github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-git/go-git/v5 v5.16.5 // indirect github.com/gogo/protobuf v1.3.2 github.com/golang/glog v1.2.5 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v1.0.0 github.com/google/wire v0.5.0 // indirect github.com/googleapis/gax-go/v2 v2.14.2 // indirect github.com/gosimple/slug v1.12.0 // indirect github.com/gosimple/unidecode v1.0.1 // indirect github.com/hairyhenderson/toml v0.4.2-0.20210923231440-40456b8e66cf // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.6.3 github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/hashicorp/vault/api v1.6.0 // indirect github.com/hashicorp/vault/sdk v0.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/itchyny/timefmt-go v0.1.7 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jcmturner/gofork v1.7.6 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/joho/godotenv v1.4.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/klauspost/compress v1.18.3 // indirect github.com/kr/fs v0.1.0 // indirect github.com/magiconair/properties v1.8.9 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.19 // indirect github.com/mattn/go-tty v0.0.4 // indirect github.com/nats-io/nats-server/v2 v2.12.4 // indirect github.com/nats-io/nkeys v0.4.15 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/openconfig/grpctunnel v0.1.0 github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/term v1.2.0-beta.2 // indirect github.com/prometheus/common v0.66.1 github.com/prometheus/procfs v0.16.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect github.com/rs/zerolog v1.29.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/spf13/afero v1.12.0 // indirect github.com/spf13/cast v1.7.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/ugorji/go/codec v1.2.11 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xdg/stringprep v1.0.0 // indirect go.etcd.io/bbolt v1.3.10 // indirect go.opencensus.io v0.24.0 // indirect go4.org/intern v0.0.0-20230205224052-192e9f60865c // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect gocloud.dev v0.25.1-0.20220408200107-09b10f7359f7 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sys v0.41.0 // indirect golang.org/x/text v0.34.0 golang.org/x/time v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/api v0.239.0 // indirect google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect inet.af/netaddr v0.0.0-20230525184311-b8eac61e914a // indirect k8s.io/client-go v0.32.3 ) ================================================ FILE: go.sum ================================================ bitbucket.org/creachadair/stringset v0.0.14 h1:t1ejQyf8utS4GZV/4fM+1gvYucggZkfhb+tMobDxYOE= bitbucket.org/creachadair/stringset v0.0.14/go.mod h1:Ej8fsr6rQvmeMDf6CCWMWGb14H9mz8kmDgPPTdiVT0w= cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA= cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA= cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q= cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4= cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= cloud.google.com/go/kms v1.1.0/go.mod h1:WdbppnCDMDpOvoYBMn1+gNmOeEoZYqAv+HeuKARGCXI= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= cloud.google.com/go/monitoring v1.4.0/go.mod h1:y6xnxfwI3hTFWOdkOaD7nfJVlwuC3/mS/5kvtT131p4= cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/pubsub v1.19.0/go.mod h1:/O9kmSe9bb9KRnIAWkzmqhPjHo6LtzGOBYd/kr06XSs= cloud.google.com/go/secretmanager v1.3.0/go.mod h1:+oLTkouyiYiabAQNugCeTS3PAArGiMJuBqvJnJsyH+U= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.21.0/go.mod h1:XmRlxkgPjlBONznT2dDUU/5XlpU2OjMnKuqnZI01LAA= cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs= cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= cloud.google.com/go/trace v1.2.0/go.mod h1:Wc8y/uYyOhPy12KEnXG9XGrvfMz5F5SrYecQlbW1rwM= cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= contrib.go.opencensus.io/exporter/aws v0.0.0-20200617204711-c478e41e60e9/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= contrib.go.opencensus.io/exporter/stackdriver v0.13.10/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8= contrib.go.opencensus.io/integrations/ocsql v0.1.7/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= github.com/Azure/azure-amqp-common-go/v3 v3.2.1/go.mod h1:O6X1iYHP7s2x7NjUKsXVhkwWrQhxrd+d8/3rRadj4CI= github.com/Azure/azure-amqp-common-go/v3 v3.2.2/go.mod h1:O6X1iYHP7s2x7NjUKsXVhkwWrQhxrd+d8/3rRadj4CI= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v59.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= github.com/Azure/azure-service-bus-go v0.11.5/go.mod h1:MI6ge2CuQWBVq+ly456MY7XqNLJip5LO1iSFodbNLbU= github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-amqp v0.16.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg= github.com/Azure/go-amqp v0.16.4/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/azure/auth v0.5.9/go.mod h1:hg3/1yw0Bq87O3KvvnJoAh34/0zbP7SFizX/qN5JvjU= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/GoogleCloudPlatform/cloudsql-proxy v1.29.0/go.mod h1:spvB9eLJH9dutlbPSRmHvSXXHOwGRyeXh1jVdquA2G8= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0 h1:nNMpRpnkWDAaqcpxMJvxa/Ud98gjbYwayJY4/9bdjiU= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/IBM/sarama v1.46.3 h1:njRsX6jNlnR+ClJ8XmkO+CM4unbrNr/2vB5KK6UA+IE= github.com/IBM/sarama v1.46.3/go.mod h1:GTUYiF9DMOZVe3FwyGT+dtSPceGFIgA+sPc5u6CBwko= github.com/Knetic/govaluate v3.0.0+incompatible h1:7o6+MAPhYTCF0+fdvoz1xDedhRb4f6s9Tn1Tt7/WTEg= github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/Shopify/ejson v1.3.3 h1:dPzgmvFhUPTJIzwdF5DaqbwW1dWaoR8ADKRdSTy6Mss= github.com/Shopify/ejson v1.3.3/go.mod h1:VZMUtDzvBW/PAXRUF5fzp1ffb1ucT8MztrZXXLYZurw= github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78= github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op h1:Ucf+QxEKMbPogRO5guBNe5cgd9uZgfoJLOYs8WWhtjM= github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= github.com/aws/aws-sdk-go-v2 v1.16.4/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1 h1:SdK4Ppk5IzLs64ZMvr6MrSficMtjY2oS0WOORXTlxwU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM= github.com/aws/aws-sdk-go-v2/config v1.15.3/go.mod h1:9YL3v07Xc/ohTsxFXzan9ZpFpdTOFl4X65BAKYaz8jg= github.com/aws/aws-sdk-go-v2/config v1.15.9/go.mod h1:rv/l/TbZo67kp99v/3Kb0qV6Fm1KEtKyruEV2GvVfgs= github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM= github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g= github.com/aws/aws-sdk-go-v2/credentials v1.11.2/go.mod h1:j8YsY9TXTm31k4eFhspiQicfXPLZ0gYXA50i4gxPE8g= github.com/aws/aws-sdk-go-v2/credentials v1.12.4/go.mod h1:7g+GGSp7xtR823o1jedxKmqRZGqLdoHQfI4eFasKKxs= github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM= github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3/go.mod h1:uk1vhHHERfSVCUnqSqz8O48LBYDSC+k6brng09jcMOk= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.5/go.mod h1:WAPnuhG5IQ/i6DETFl5NmX3kKqCzw7aau9NHAGcm4QE= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.3/go.mod h1:0dHuD2HZZSiwfJSy1FO5bX1hQ1TxVV1QXXjpn3XUE44= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.14 h1:qpJmFbypCfwPok5PGTSnQy1NKbv4Hn8xGsee9l4xOPE= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.14/go.mod h1:IOYB+xOZik8YgdTlnDSwbvKmCkikA3nVue8/Qnfzs0c= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9/go.mod h1:AnVH5pvai0pAF4lXRq0bmhbes1u9R8wTE+g+183bZNM= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.11/go.mod h1:tmUB6jakq5DFNcXsXOA/ZQ7/C8VnSKYkx58OI7Fh79g= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3/go.mod h1:ssOhaLpRlh88H3UmEcsBoVKq309quMvm3Ds8e9d4eJM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.5/go.mod h1:fV1AaS2gFc1tM0RCb015FJ0pvWVUfJZANzjwoO4YakM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10/go.mod h1:8DcYQcz0+ZJaSxANlHIsbbi6S+zMwjwdDqwW3r9AzaE= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.12/go.mod h1:00c7+ALdPh4YeEUPXJzyU0Yy01nPGOq2+9rUaz05z9g= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.2 h1:1fs9WkbFcMawQjxEI0B5L0SqvBhJZebxWM6Z3x/qHWY= github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.2/go.mod h1:0jDVeWUFPbI3sOfsXXAsIdiawXcn7VBLx/IlFVTRP64= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.1/go.mod h1:GeUru+8VzrTXV/83XyMJ80KpH8xO89VPoUileyNQ+tc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.3/go.mod h1:Seb8KNmD6kVTjwRjVEgOT5hPin6sq+v4C2ycJQDwuH8= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.6 h1:9mvDAsMiN+07wcfGM+hJ1J3dOKZ2YOpDiPZ6ufRJcgw= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.6/go.mod h1:Eus+Z2iBIEfhOvhSdMTcscNOMy6n3X9/BJV0Zgax98w= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3/go.mod h1:wlY6SVjuwvh3TVRpTqdy4I1JpBFLX4UGeKZdWntaocw= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.5/go.mod h1:ZbkttHXaVn3bBo/wpJbQGiiIWR90eTBUVBrEHUEQlho= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.3/go.mod h1:Bm/v2IaN6rZ+Op7zX+bOUMdL4fsrYZiD0dsjLhNKwZc= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.5 h1:DyPYkrH4R2zn+Pdu6hM3VTuPsQYAE6x2WB24X85Sgw0= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.5/go.mod h1:XtL92YWo0Yq80iN3AgYRERJqohg4TozrqRlxYhHGJ7g= github.com/aws/aws-sdk-go-v2/service/kms v1.16.3/go.mod h1:QuiHPBqlOFCi4LqdSskYYAWpQlx3PKmohy+rE2F+o5g= github.com/aws/aws-sdk-go-v2/service/s3 v1.26.3/go.mod h1:g1qvDuRsJY+XghsV6zg00Z4KJ7DtFFCx8fJD2a491Ak= github.com/aws/aws-sdk-go-v2/service/s3 v1.26.10 h1:GWdLZK0r1AK5sKb8rhB9bEXqXCK8WNuyv4TBAD6ZviQ= github.com/aws/aws-sdk-go-v2/service/s3 v1.26.10/go.mod h1:+O7qJxF8nLorAhuIVhYTHse6okjHJJm4EwhhzvpnkT0= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.15.4/go.mod h1:PJc8s+lxyU8rrre0/4a0pn2wgwiDvOEzoOjcJUBr67o= github.com/aws/aws-sdk-go-v2/service/sns v1.17.4/go.mod h1:kElt+uCcXxcqFyc+bQqZPFD9DME/eC6oHBXvFzQ9Bcw= github.com/aws/aws-sdk-go-v2/service/sqs v1.18.3/go.mod h1:skmQo0UPvsjsuYYSYMVmrPc1HWCbHUJyrCEp+ZaLzqM= github.com/aws/aws-sdk-go-v2/service/ssm v1.24.1/go.mod h1:NR/xoKjdbRJ+qx0pMR4mI+N/H1I1ynHwXnO6FowXJc0= github.com/aws/aws-sdk-go-v2/service/sso v1.11.3/go.mod h1:7UQ/e69kU7LDPtY40OyoHYgRmgfGM4mgsLYtcObdveU= github.com/aws/aws-sdk-go-v2/service/sso v1.11.7/go.mod h1:TFVe6Rr2joVLsYQ1ABACXgOC6lXip/qpX2x5jWg/A9w= github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8= github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= github.com/aws/aws-sdk-go-v2/service/sts v1.16.3/go.mod h1:bfBj0iVmsUyUg4weDB4NxktD9rDGeKSVWnjTnwbx9b8= github.com/aws/aws-sdk-go-v2/service/sts v1.16.6/go.mod h1:rP1rEOKAGZoXp4iGDxSXFvODAtXpm34Egf0lL0eshaQ= github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY= github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bcicen/bfstree v1.0.0 h1:Fx9vcyXYspj2GIJqAvd1lwCNI+cQF/r2JJqxHHmsAO0= github.com/bcicen/bfstree v1.0.0/go.mod h1:u//juIip96SNFkG4iMn9z0KzqLSeFSpBKoBo5ceq1uE= github.com/bcicen/go-units v1.0.3 h1:REknRsBTdM2+ihTw1DiOsviGQSX7I6jQaPCWTWerBl4= github.com/bcicen/go-units v1.0.3/go.mod h1:c7/sSz9cc6XvnrjsyNwoKHqN6KDDf8LME5vSf+U5Y08= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= github.com/c-bata/go-prompt v0.2.6 h1:POP+nrHE+DfLYx370bedwNhsqmpCUynWPxuHi0C5vZI= github.com/c-bata/go-prompt v0.2.6/go.mod h1:/LMAke8wD2FsNu9EXNdHxNLbd9MedkPnCdfpU9wwHfY= github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.0.0/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8= github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= github.com/derekparker/trie v0.0.0-20230829180723-39f4de51ef7d h1:hUWoLdw5kvo2xCsqlsIBMvWUc1QCSsCYD2J2+Fg6YoU= github.com/derekparker/trie v0.0.0-20230829180723-39f4de51ef7d/go.mod h1:C7Es+DLenIpPc9J6IYw4jrK0h7S9bKj4DNl8+KxGEXU= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM= github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libkv v0.2.2-0.20180912205406-458977154600 h1:x0AMRhackzbivKKiEeSMzH6gZmbALPXCBG0ecBmRlco= github.com/docker/libkv v0.2.2-0.20180912205406-458977154600/go.mod h1:r5hEwHwW8dr0TFBYGCarMNbrQOiwL1xoqDYZ/JqoTK0= github.com/dustin/gojson v0.0.0-20160307161227-2e71ec9dd5ad h1:Qk76DOWdOp+GlyDKBAG3Klr9cn7N+LcYc82AZ2S7+cA= github.com/dustin/gojson v0.0.0-20160307161227-2e71ec9dd5ad/go.mod h1:mPKfmRa823oBIgl2r20LeMSpTAteW5j7FLkc0vjmzyQ= github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA= github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU= github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fullstorydev/grpcurl v1.9.3 h1:PC1Xi3w+JAvEE2Tg2Gf2RfVgPbf9+tbuQr1ZkyVU3jk= github.com/fullstorydev/grpcurl v1.9.3/go.mod h1:/b4Wxe8bG6ndAjlfSUjwseQReUDUvBJiFEB7UllOlUE= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.7.3/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/go-git/go-git/v5 v5.16.5 h1:mdkuqblwr57kVfXri5TTH+nMFLNUxIj9Z7F5ykFbw5s= github.com/go-git/go-git/v5 v5.16.5/go.mod h1:QOMLpNf1qxuSY4StA/ArOdfFR2TrKEjJiye2kel2m+M= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redis/v7 v7.4.1 h1:PASvf36gyUpr2zdOUS/9Zqc80GbM+9BDyiJSJDDOrTI= github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-redsync/redsync/v4 v4.13.0 h1:49X6GJfnbLGaIpBBREM/zA4uIMDXKAh1NDkvQ1EkZKA= github.com/go-redsync/redsync/v4 v4.13.0/go.mod h1:HMW4Q224GZQz6x1Xc7040Yfgacukdzu7ifTDAKiyErQ= github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-replayers/grpcreplay v1.1.0 h1:S5+I3zYyZ+GQz68OfbURDdt/+cSMqCK1wrvNx7WBzTE= github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk= github.com/google/go-replayers/httpreplay v1.1.1 h1:H91sIMlt1NZzN7R+/ASswyouLJfW0WLW7fhyUFvDEkY= github.com/google/go-replayers/httpreplay v1.1.1/go.mod h1:gN9GeLIs7l6NUoVaSSnv2RiqK1NiwAmD0MrKeC9IIks= github.com/google/go-tpm v0.9.8 h1:slArAR9Ft+1ybZu0lBwpSmpwhRXaa85hWtMinMyRAWo= github.com/google/go-tpm v0.9.8/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE= github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/protobuf v3.11.4+incompatible/go.mod h1:lUQ9D1ePzbH2PrIS7ob/bjm9HXyH5WHB0Akwh7URreM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.5.0 h1:I7ELFeVBr3yfPIcc8+MWvrjk+3VjbcSzoXm3JVa+jD8= github.com/google/wire v0.5.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosimple/slug v1.12.0 h1:xzuhj7G7cGtd34NXnW/yF0l+AGNfWqwgh/IXgFy7dnc= github.com/gosimple/slug v1.12.0/go.mod h1:UiRaFH+GEilHstLUmcBgWcI42viBN7mAb818JrYOeFQ= github.com/gosimple/unidecode v1.0.1 h1:hZzFTMMqSswvf0LBJZCZgThIZrpDHFXux9KeGmn6T/o= github.com/gosimple/unidecode v1.0.1/go.mod h1:CP0Cr1Y1kogOtx0bJblKzsVWrqYaqfNOnHzpgWw4Awc= github.com/gosnmp/gosnmp v1.42.1 h1:MEJxhpC5v1coL3tFRix08PYmky9nyb1TLRRgJAmXm8A= github.com/gosnmp/gosnmp v1.42.1/go.mod h1:CxVS6bXqmWZlafUj9pZUnQX5e4fAltqPcijxWpCitDo= github.com/grafana/pyroscope-go v1.2.7 h1:VWBBlqxjyR0Cwk2W6UrE8CdcdD80GOFNutj0Kb1T8ac= github.com/grafana/pyroscope-go v1.2.7/go.mod h1:o/bpSLiJYYP6HQtvcoVKiE9s5RiNgjYTj1DhiddP2Pc= github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og= github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/guptarohit/asciigraph v0.7.3 h1:p05XDDn7cBTWiBqWb30mrwxd6oU0claAjqeytllnsPY= github.com/guptarohit/asciigraph v0.7.3/go.mod h1:dYl5wwK4gNsnFf9Zp+l06rFiDZ5YtXM6x7SRWZ3KGag= github.com/hairyhenderson/go-fsimpl v0.0.0-20220529183339-9deae3e35047 h1:nSSfN9G8O8XXDqB3aDEHJ8K+0llYYToNlTcWOe1Pti8= github.com/hairyhenderson/go-fsimpl v0.0.0-20220529183339-9deae3e35047/go.mod h1:30RY4Ey+bg+BGKBufZE2IEmxk7hok9U9mjdgZYomwN4= github.com/hairyhenderson/gomplate/v3 v3.11.8 h1:T63wLRk+Y9C601ChYa/+FZ30XT/UEWydMDZhOOJM3K0= github.com/hairyhenderson/gomplate/v3 v3.11.8/go.mod h1:xs1LnI1NftnB6o0Zvy1aLgDMSGUvGjz4uCQAZSIMP04= github.com/hairyhenderson/toml v0.4.2-0.20210923231440-40456b8e66cf h1:I1sbT4ZbIt9i+hB1zfKw2mE8C12TuGxPiW7YmtLbPa4= github.com/hairyhenderson/toml v0.4.2-0.20210923231440-40456b8e66cf/go.mod h1:jDHmWDKZY6MIIYltYYfW4Rs7hQ50oS4qf/6spSiZAxY= github.com/hairyhenderson/yaml v0.0.0-20220618171115-2d35fca545ce h1:cVkYhlWAxwuS2/Yp6qPtcl0fGpcWxuZNonywHZ6/I+s= github.com/hairyhenderson/yaml v0.0.0-20220618171115-2d35fca545ce/go.mod h1:7TyiGlHI+IO+iJbqRZ82QbFtvgj/AIcFm5qc9DLn7Kc= github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hanwen/go-fuse/v2 v2.1.0/go.mod h1:oRyA5eK+pvJyv5otpO/DgccS8y/RvYMaO00GgRLGryc= github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg= github.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= github.com/hashicorp/go-plugin v1.7.0 h1:YghfQH/0QmPNc/AZMTFE3ac8fipZyZECHdDPshfk+mA= github.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.5/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hashicorp/vault/api v1.6.0 h1:B8UUYod1y1OoiGHq9GtpiqSnGOUEWHaA26AY8RQEDY4= github.com/hashicorp/vault/api v1.6.0/go.mod h1:h1K70EO2DgnBaTz5IsL6D5ERsNt5Pce93ueVS2+t0Xc= github.com/hashicorp/vault/sdk v0.5.0 h1:EED7p0OCU3OY5SAqJwSANofY1YKMytm+jDHDQ2EzGVQ= github.com/hashicorp/vault/sdk v0.5.0/go.mod h1:UJZHlfwj7qUJG8g22CuxUgkdJouFrBNvBHCyx8XAPdo= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4= github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/itchyny/gojq v0.12.14 h1:6k8vVtsrhQSYgSGg827AD+PVVaB1NLXEdX+dda2oZCc= github.com/itchyny/gojq v0.12.14/go.mod h1:y1G7oO7XkcR1LPZO59KyoCRy08T3j9vDYRV0GgYSS+s= github.com/itchyny/timefmt-go v0.1.7 h1:xyftit9Tbw+Dc/huSSPJaEmX1TVL8lw5vxjJLK4GMMA= github.com/itchyny/timefmt-go v0.1.7/go.mod h1:5E46Q+zj7vbTgWY8o5YkMeYb4I6GeWLFnetPy5oBrAI= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= github.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg= github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/johannesboyne/gofakes3 v0.0.0-20220517215058-83a58ec253b6 h1:Twy/cqAmdLarn9QEiRvyX5eUyuKFxqMEiy5GQGIqwjo= github.com/johannesboyne/gofakes3 v0.0.0-20220517215058-83a58ec253b6/go.mod h1:LIAXxPvcUXwOcTIj9LSNSUpE9/eMHalTWxsP/kmWxQI= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI= github.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karimra/go-map-flattener v0.0.1 h1:hkNYOZxHKdRHPwP5pM1glOPoL12U7Cpmbp7OcEH2BUc= github.com/karimra/go-map-flattener v0.0.1/go.mod h1:qwSIH4cR7eD1dkmjx0S/rqsO33C6VYaTHLrdfntJQkM= github.com/karimra/sros-dialout v0.0.0-20260117201857-18e893af823c h1:dlqPOgewPbpD8HhckpNqNKZRRXIEJLcVadzJIZT4RNM= github.com/karimra/sros-dialout v0.0.0-20260117201857-18e893af823c/go.mod h1:KcjPi49Pbs+EF8Ykob5AzLcze653Qb4HFz+i2aFEEJU= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw= github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-ieproxy v0.0.6 h1:tVDlituRyeHMMkHpGpUu8CJG+hxPMwbYCkIUK2PUCbo= github.com/mattn/go-ieproxy v0.0.6/go.mod h1:6ZpRmhBaYuBX1U2za+9rC9iCGLsSp2tftelZne7CPko= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0= github.com/mattn/go-tty v0.0.4 h1:NVikla9X8MN0SQAqCYzpGyXv0jY7MNl3HOWD2dkle7E= github.com/mattn/go-tty v0.0.4/go.mod h1:u5GGXBtZU6RQoKV8gY5W6UhMudbR5vXnUe7j3pxse28= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE= github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE= github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 h1:KGuD/pM2JpL9FAYvBrnBBeENKZNh6eNtjqytV6TYjnk= github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/jwt/v2 v2.8.0 h1:K7uzyz50+yGZDO5o772eRE7atlcSEENpL7P+b74JV1g= github.com/nats-io/jwt/v2 v2.8.0/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA= github.com/nats-io/nats-server/v2 v2.12.4 h1:ZnT10v2LU2Xcoiy8ek9X6Se4YG8EuMfIfvAEuFVx1Ts= github.com/nats-io/nats-server/v2 v2.12.4/go.mod h1:5MCp/pqm5SEfsvVZ31ll1088ZTwEUdvRX1Hmh/mTTDg= github.com/nats-io/nats.go v1.49.0 h1:yh/WvY59gXqYpgl33ZI+XoVPKyut/IcEaqtsiuTJpoE= github.com/nats-io/nats.go v1.49.0/go.mod h1:fDCn3mN5cY8HooHwE2ukiLb4p4G4ImmzvXyJt+tGwdw= github.com/nats-io/nkeys v0.4.15 h1:JACV5jRVO9V856KOapQ7x+EY8Jo3qw1vJt/9Jpwzkk4= github.com/nats-io/nkeys v0.4.15/go.mod h1:CpMchTXC9fxA5zrMo4KpySxNjiDVvr8ANOSZdiNfUrs= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nsf/termbox-go v1.1.1 h1:nksUPLCb73Q++DwbYUBEglYBRPZyoXJdrj5L+TkjyZY= github.com/nsf/termbox-go v1.1.1/go.mod h1:T0cTdVuOwf7pHQNtfhnEbzHbcNyCEcVU4YPpouCbVxo= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oapi-codegen/runtime v1.0.0 h1:P4rqFX5fMFWqRzY9M/3YF9+aPSPPB06IzP2P7oOxrWo= github.com/oapi-codegen/runtime v1.0.0/go.mod h1:LmCUMQuPB4M/nLXilQXhHw+BLZdDb18B34OO356yJ/A= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E= github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/openconfig/gnmi v0.0.0-20200508230933-d19cebf5e7be/go.mod h1:M/EcuapNQgvzxo1DDXHK4tx3QpYM/uG4l591v33jG2A= github.com/openconfig/gnmi v0.14.1 h1:qKMuFvhIRR2/xxCOsStPQ25aKpbMDdWr3kI+nP9bhMs= github.com/openconfig/gnmi v0.14.1/go.mod h1:whr6zVq9PCU8mV1D0K9v7Ajd3+swoN6Yam9n8OH3eT0= github.com/openconfig/goyang v0.0.0-20200115183954-d0a48929f0ea/go.mod h1:dhXaV0JgHJzdrHi2l+w0fZrwArtXL7jEFoiqLEdmkvU= github.com/openconfig/goyang v1.6.3 h1:9nWXBwd6b4+nZr8ni7O4zUXVhrVMXCLFz8os5YWFuo4= github.com/openconfig/goyang v1.6.3/go.mod h1:5WolITjek1NF8yrNERyVZ7jqjOClJTpO8p/+OwmETM4= github.com/openconfig/grpctunnel v0.1.0 h1:EN99qtlExZczgQgp5ANnHRC/Rs62cAG+Tz2BQ5m/maM= github.com/openconfig/grpctunnel v0.1.0/go.mod h1:G04Pdu0pml98tdvXrvLaU+EBo3PxYfI9MYqpvdaEHLo= github.com/openconfig/ygot v0.6.0/go.mod h1:o30svNf7O0xK+R35tlx95odkDmZWS9JyWWQSmIhqwAs= github.com/openconfig/ygot v0.34.0 h1:9OkVjy3SGi4mbvAZc4HTQBU9u4MT6k4j5DdX+hgRiC4= github.com/openconfig/ygot v0.34.0/go.mod h1:eMNQHrJpanet+pQoBw/P3ua4sLY/tRTXyJ7ALkWCvl4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.9 h1:4NGkvGudBL7GteO3m6qnaQ4pC0Kvf0onSVc9gR3EWBw= github.com/pkg/sftp v1.13.9/go.mod h1:OBN7bVXdstkFFN/gdnHPUb5TE8eb8G1Rp9wCItqjkkA= github.com/pkg/term v1.2.0-beta.2 h1:L3y/h2jkuBVFdWiJvNfYfKmzcCnILw7mJWm2JQuMppw= github.com/pkg/term v1.2.0-beta.2/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/prometheus/prometheus v0.306.0 h1:Q0Pvz/ZKS6vVWCa1VSgNyNJlEe8hxdRlKklFg7SRhNw= github.com/prometheus/prometheus v0.306.0/go.mod h1:7hMSGyZHt0dcmZ5r4kFPJ/vxPQU99N5/BGwSPDxeZrQ= github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/v9 v9.14.0 h1:u4tNCjXOyzfgeLN+vAZaW1xUooqWDqVEsZN0U01jfAE= github.com/redis/go-redis/v9 v9.14.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/redis/rueidis v1.0.19 h1:s65oWtotzlIFN8eMPhyYwxlwLR1lUdhza2KtWprKYSo= github.com/redis/rueidis v1.0.19/go.mod h1:8B+r5wdnjwK3lTFml5VtxjzGOQAC+5UmujoD12pDrEo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 h1:WnNuhiq+FOY3jNj6JXFT+eLN3CQ/oPIsDPRanvwsmbI= github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08YuiTGPZLls0Wq99X9bWd0Q5ZSBesM= github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg/scram v1.0.5 h1:TuS0RFmt5Is5qm9Tm2SoD89OPqe4IRiFtyFY4iwWXsw= github.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zealic/xignore v0.3.3 h1:EpLXUgZY/JEzFkTc+Y/VYypzXtNz+MSOMVCGW5Q4CKQ= github.com/zealic/xignore v0.3.3/go.mod h1:lhS8V7fuSOtJOKsvKI7WfsZE276/7AYEqokv3UiqEAU= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= github.com/zestor-dev/zestor v0.0.2 h1:UyM7G7QPwDRThgCWxm3DUCaAkKte6s7fVhOWBVvDH3Y= github.com/zestor-dev/zestor v0.0.2/go.mod h1:gffTEDJU8OE+V1gewC+yL1x6MpZOeNsys5jMQPv0S3k= go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8N5CE= go.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0= go.starlark.net v0.0.0-20260102030733-3fee463870c9 h1:nV1OyvU+0CYrp5eKfQ3rD03TpFYYhH08z31NK1HmtTk= go.starlark.net v0.0.0-20260102030733-3fee463870c9/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= go4.org/intern v0.0.0-20230205224052-192e9f60865c h1:b8WZ7Ja8nKegYxfwDLLwT00ZKv4lXAQrw8LYPK+cHSI= go4.org/intern v0.0.0-20230205224052-192e9f60865c/go.mod h1:RJ0SVrOMpxLhgb5noIV+09zI1RsRlMsbUcSxpWHqbrE= go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= go4.org/unsafe/assume-no-moving-gc v0.0.0-20230204201903-c31fa085b70e/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 h1:lGdhQUN/cnWdSH3291CUuxSEqc+AsGTiDxPP3r2J0l4= go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= gocloud.dev v0.25.1-0.20220408200107-09b10f7359f7 h1:esuNxgk6HkmcadSJQCFnGOfyufN1GW1gtFJDwUbmYOw= gocloud.dev v0.25.1-0.20220408200107-09b10f7359f7/go.mod h1:mkUgejbnbLotorqDyvedJO20XcZNTynmSeVSQS9btVg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211115234514-b4de73f9ece8/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa h1:t2QcU6V556bFjYgu4L6C+6VrCPyJZ+eyRsABUPs1mz4= golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg= golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= google.golang.org/api v0.68.0/go.mod h1:sOM8pTpwgflXRhz+oC8H2Dr+UcbMqkPPWNJo88Q7TH8= google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80= google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.239.0 h1:2hZKUnFZEy81eugPs4e2XzIJ5SOwQg0G82bpXD65Puo= google.golang.org/api v0.239.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= google.golang.org/genproto v0.0.0-20220401170504-314d38edb7de/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78= google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= inet.af/netaddr v0.0.0-20230525184311-b8eac61e914a h1:1XCVEdxrvL6c0TGOhecLuB7U9zYNdxZEjvOqJreKZiM= inet.af/netaddr v0.0.0-20230525184311-b8eac61e914a/go.mod h1:e83i32mAQOW1LAqEIweALsuK2Uw4mhQadA5r7b0Wobo= k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= ================================================ FILE: goreleaser-alpine.dockerfile ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 FROM alpine LABEL maintainer="Karim Radhouani , Roman Dodin " LABEL documentation="https://gnmic.openconfig.net" LABEL repo="https://github.com/openconfig/gnmic" COPY gnmic /app/gnmic ENTRYPOINT [ "/app/gnmic" ] CMD [ "help" ] ================================================ FILE: goreleaser-scratch.dockerfile ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 FROM scratch LABEL maintainer="Karim Radhouani , Roman Dodin " LABEL documentation="https://gnmic.openconfig.net" LABEL repo="https://github.com/openconfig/gnmic" COPY gnmic /app/gnmic ENTRYPOINT [ "/app/gnmic" ] CMD [ "help" ] ================================================ FILE: install.sh ================================================ #!/usr/bin/env bash # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 # The install script is based off of the Apache 2.0 script from Helm, # https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 : ${BINARY_NAME:="gnmic"} : ${PROJECT_NAME:="gnmic"} # if project name does not match binary name : ${USE_SUDO:="true"} : ${USE_PKG:="false"} # default --use-pkg flag value. will use package installation by default unless the default is changed to false : ${VERIFY_CHECKSUM:="false"} : ${BIN_INSTALL_DIR:="/usr/local/bin"} : ${REPO_NAME:="openconfig/gnmic"} : ${REPO_URL:="https://github.com/$REPO_NAME"} : ${PROJECT_URL:="https://gnmic.openconfig.net"} : ${LATEST_URL:="https://api.github.com/repos/$REPO_NAME/releases/latest"} # detectArch discovers the architecture for this system. detectArch() { ARCH=$(uname -m) # case $ARCH in # armv5*) ARCH="armv5" ;; # armv6*) ARCH="armv6" ;; # armv7*) ARCH="arm" ;; # aarch64) ARCH="arm64" ;; # x86) ARCH="386" ;; # x86_64) ARCH="amd64" ;; # i686) ARCH="386" ;; # i386) ARCH="386" ;; # esac } # detectOS discovers the operating system for this system and its package format detectOS() { OS=$(echo $(uname) | tr '[:upper:]' '[:lower:]') case "$OS" in # Minimalist GNU for Windows mingw*) OS='windows' ;; esac if type "rpm" &>/dev/null; then PKG_FORMAT="rpm" elif type "dpkg" &>/dev/null; then PKG_FORMAT="deb" fi } # runs the given command as root (detects if we are root already) runAsRoot() { local CMD="$*" if [ $EUID -ne 0 -a $USE_SUDO = "true" ]; then CMD="sudo $CMD" fi $CMD } # verifySupported checks that the os/arch combination is supported verifySupported() { local supported="darwin-x86_64\ndarwin-aarch64\nlinux-i386\nlinux-x86_64\nlinux-armv7\nlinux-aarch64" # change ARCH to "aarch64" if OS="darwin" and ARCH="arm64" if [ ${OS} == "darwin" ] && [ ${ARCH} == "arm64" ]; then ARCH="aarch64" fi if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then echo "No prebuilt binary for ${OS}-${ARCH}." echo "To build from source, go to ${REPO_URL}" exit 1 fi if ! type "curl" &>/dev/null && ! type "wget" &>/dev/null; then echo "Either curl or wget is required" exit 1 fi } # verifyOpenssl checks if openssl is installed to perform checksum operation verifyOpenssl() { if [ $VERIFY_CHECKSUM == "true" ]; then if ! type "openssl" &>/dev/null; then echo "openssl is not found. It is used to verify checksum of the downloaded file." exit 1 fi fi } # setDesiredVersion sets the desired version either to an explicit version provided by a user # or to the latest release available on github releases setDesiredVersion() { if [ "x$DESIRED_VERSION" == "x" ]; then # when desired version is not provided # get latest tag from the gh releases local cmd="" if type "curl" &>/dev/null; then cmd="curl -s " elif type "wget" &>/dev/null; then cmd="wget -q -O- " else echo "Missing curl or wget utility to download the installation package" exit 1 fi local latest_release_url="" # use jq to filter the api response if available if type "jq" &>/dev/null; then latest_release_url=$($cmd $LATEST_URL | jq -r .html_url) # else use grep and cut else latest_release_url=$($cmd $LATEST_URL | grep "html_url.*releases/tag" | cut -d '"' -f 4) fi # check for empty response or null (GitHub API may return null on rate limit or errors) if [ "x$latest_release_url" == "x" ] || [ "$latest_release_url" == "null" ]; then echo "Could not determine the latest release from GitHub API." echo "This may be due to GitHub API rate limiting. Please try again later or specify a version with --version flag." exit 1 fi TAG=$(echo $latest_release_url | cut -d '"' -f 2 | awk -F "/" '{print $NF}') # tag with stripped `v` prefix TAG_WO_VER=$(echo "${TAG}" | cut -c 2-) # validate that TAG looks like a version (should start with 'v') if [[ ! "$TAG" =~ ^v[0-9] ]]; then echo "Error: Invalid version tag '$TAG' retrieved from GitHub API." echo "Expected a version starting with 'v' (e.g., v0.1.0). Please try again later or specify a version with --version flag." exit 1 fi else TAG=$DESIRED_VERSION TAG_WO_VER=$(echo "${TAG}" | cut -c 2-) fi } # checkInstalledVersion checks which version is installed and # if it needs to be changed. checkInstalledVersion() { if [[ -f "${BIN_INSTALL_DIR}/${BINARY_NAME}" ]]; then local version=$("${BIN_INSTALL_DIR}/${BINARY_NAME}" version | grep version | awk '{print $NF}') if [[ "v$version" == "$TAG" ]]; then echo "${BINARY_NAME} is already at ${DESIRED_VERSION:-latest ($version)}" version return 0 else echo "${BINARY_NAME} ${TAG_WO_VER} is available. Changing from version ${version}." return 1 fi else return 1 fi } # createTempDir creates temporary directory where we downloaded files createTempDir() { TMP_ROOT="$(mktemp -d)" TMP_BIN="$TMP_ROOT/$BINARY_NAME" } # downloadFile downloads the latest binary archive, the checksum file and performs the sum check downloadFile() { EXT="tar.gz" # download file extension if [ $USE_PKG == "true" ]; then if [ -z $PKG_FORMAT ]; then echo "Package for $OS-$ARCH is not available" cleanup exit 1 fi EXT=$PKG_FORMAT fi ARCHIVE="${PROJECT_NAME}_${TAG_WO_VER}_${OS}_${ARCH}.${EXT}" DOWNLOAD_URL="${REPO_URL}/releases/download/${TAG}/${ARCHIVE}" CHECKSUM_URL="${REPO_URL}/releases/download/${TAG}/checksums.txt" TMP_FILE="$TMP_ROOT/$ARCHIVE" SUM_FILE="$TMP_ROOT/checksums.txt" echo "Downloading $DOWNLOAD_URL" if type "curl" &>/dev/null; then curl -SsL "$CHECKSUM_URL" -o "$SUM_FILE" curl -SsL "$DOWNLOAD_URL" -o "$TMP_FILE" elif type "wget" &>/dev/null; then wget -q -O "$SUM_FILE" "$CHECKSUM_URL" wget -q -O "$TMP_FILE" "$DOWNLOAD_URL" fi # verify downloaded file if [ $VERIFY_CHECKSUM == "true" ]; then local sum=$(openssl sha1 -sha256 ${TMP_FILE} | awk '{print $2}') local expected_sum=$(cat ${SUM_FILE} | grep -i $ARCHIVE | awk '{print $1}') if [ "$sum" != "$expected_sum" ]; then echo "SHA sum of ${TMP_FILE} does not match. Aborting." exit 1 fi echo "Checksum verified" fi } # installFile verifies the SHA256 for the file, then unpacks and # installs it. By default, the installation is done from .tar.gz archive, that can be overriden with --use-pkg flag installFile() { tar xf "$TMP_FILE" -C "$TMP_ROOT" echo "Preparing to install $BINARY_NAME ${TAG_WO_VER} into ${BIN_INSTALL_DIR}" runAsRoot cp -f "$TMP_ROOT/$BINARY_NAME" "$BIN_INSTALL_DIR/$BINARY_NAME" runAsRoot chmod 755 "$BIN_INSTALL_DIR/$BINARY_NAME" echo "$BINARY_NAME installed into $BIN_INSTALL_DIR/$BINARY_NAME" } # installPkg installs the downloaded version of a package in a deb or rpm format installPkg() { echo "Preparing to install $BINARY_NAME ${TAG_WO_VER} from package" if [ $PKG_FORMAT == "deb" ]; then runAsRoot dpkg -i $TMP_FILE elif [ $PKG_FORMAT == "rpm" ]; then runAsRoot rpm -U $TMP_FILE fi } # fail_trap is executed if an error occurs. fail_trap() { result=$? if [ "$result" != "0" ]; then if [[ -n "$INPUT_ARGUMENTS" ]]; then echo "Failed to install $BINARY_NAME with the arguments provided: $INPUT_ARGUMENTS" help else echo "Failed to install $BINARY_NAME" fi echo -e "\tFor support, go to $REPO_URL/issues" fi cleanup exit $result } # testVersion tests the installed client to make sure it is working. testVersion() { set +e $BIN_INSTALL_DIR/$BINARY_NAME version if [ "$?" = "1" ]; then echo "$BINARY_NAME not found. Is $BIN_INSTALL_DIR in your "'$PATH?' exit 1 fi set -e } # help provides possible cli installation arguments help() { echo "Accepted cli arguments are:" echo -e "\t[--help|-h ] ->> prints this help" echo -e "\t[--version|-v ] . When not defined it fetches the latest release from GitHub" echo -e "\te.g. --version v0.1.1" echo -e "\t[--use-pkg] ->> install from deb/rpm packages" echo -e "\t[--no-sudo] ->> install without sudo" echo -e "\t[--verify-checksum] ->> verify checksum of the downloaded file" } # removes temporary directory used to download artefacts cleanup() { if [[ -d "${TMP_ROOT:-}" ]]; then rm -rf "$TMP_ROOT" fi } # Execution #Stop execution on any error trap "fail_trap" EXIT set -e # Parsing input arguments (if any) export INPUT_ARGUMENTS="${@}" set -u while [[ $# -gt 0 ]]; do case $1 in '--version' | -v) shift if [[ $# -ne 0 ]]; then export DESIRED_VERSION="v${1}" else echo -e "Please provide the desired version. e.g. --version 0.1.1" exit 0 fi ;; '--no-sudo') USE_SUDO="false" ;; '--verify-checksum') VERIFY_CHECKSUM="true" ;; '--use-pkg') USE_PKG="true" ;; '--help' | -h) help exit 0 ;; *) exit 1 ;; esac shift done set +u detectArch detectOS verifySupported setDesiredVersion if ! checkInstalledVersion; then createTempDir verifyOpenssl downloadFile if [ $USE_PKG == "true" ]; then installPkg else installFile fi testVersion cleanup fi ================================================ FILE: main.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package main import ( "github.com/openconfig/gnmic/pkg/cmd" ) func main() { cmd.Execute() } ================================================ FILE: mkdocs.yml ================================================ # © 2022 Nokia. # # This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. # No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. # This code is provided on an “as is” basis without any warranties of any kind. # # SPDX-License-Identifier: Apache-2.0 site_name: gNMIc nav: - Home: index.md - Getting started: - Installation: install.md - Basic usage: basic_usage.md - User guide: user_guide/configuration_intro.md - Command reference: cmd/capabilities.md - Deployment examples: deployments/deployments_intro.md - Changelog: changelog.md - User guide: - Configuration: - Introduction: user_guide/configuration_intro.md - Flags: - user_guide/configuration_flags.md - global_flags.md - Environment variables: user_guide/configuration_env.md - File configuration: user_guide/configuration_file.md - Targets: - Configuration: user_guide/targets/targets.md - Session Security: user_guide/targets/targets_session_sec.md - Discovery: - Introduction: user_guide/targets/target_discovery/discovery_intro.md - File Discovery: user_guide/targets/target_discovery/file_discovery.md - Consul Discovery: user_guide/targets/target_discovery/consul_discovery.md - Docker Discovery: user_guide/targets/target_discovery/docker_discovery.md - HTTP Discovery: user_guide/targets/target_discovery/http_discovery.md - Subscriptions: user_guide/subscriptions.md - Prompt mode: user_guide/prompt_suggestions.md - gNMI Server: user_guide/gnmi_server.md - Tunnel Server: user_guide/tunnel_server.md - Inputs: - Introduction: user_guide/inputs/input_intro.md - NATS: user_guide/inputs/nats_input.md - Jetstream: user_guide/inputs/jetstream_input.md - STAN: user_guide/inputs/stan_input.md - Kafka: user_guide/inputs/kafka_input.md - Outputs: - Introduction: user_guide/outputs/output_intro.md - File: user_guide/outputs/file_output.md - NATS: - NATS: user_guide/outputs/nats_output.md - STAN: user_guide/outputs/stan_output.md - Jetstream: user_guide/outputs/jetstream_output.md - Kafka: user_guide/outputs/kafka_output.md - InfluxDB: user_guide/outputs/influxdb_output.md - Prometheus: - Scrape Based (Pull): user_guide/outputs/prometheus_output.md - Remote Write (Push): user_guide/outputs/prometheus_write_output.md - OpenTelemetry: user_guide/outputs/otlp_output.md - gNMI Server: user_guide/outputs/gnmi_output.md - TCP: user_guide/outputs/tcp_output.md - UDP: user_guide/outputs/udp_output.md - SNMP: user_guide/outputs/snmp_output.md - ASCII Graph: user_guide/outputs/asciigraph_output.md - Processors: - Introduction: user_guide/event_processors/intro.md - Add Tag: user_guide/event_processors/event_add_tag.md - Allow: user_guide/event_processors/event_allow.md - Combine: user_guide/event_processors/event_combine.md - Convert: user_guide/event_processors/event_convert.md - Data Convert: user_guide/event_processors/event_data_convert.md - Date string: user_guide/event_processors/event_date_string.md - Delete: user_guide/event_processors/event_delete.md - Drop: user_guide/event_processors/event_drop.md - Duration Convert: user_guide/event_processors/event_duration_convert.md - Extract Tags: user_guide/event_processors/event_extract_tags.md - Group by: user_guide/event_processors/event_group_by.md - IEEE Float32: user_guide/event_processors/event_ieeefloat32.md - JQ: user_guide/event_processors/event_jq.md - Merge: user_guide/event_processors/event_merge.md - Override TS: user_guide/event_processors/event_override_ts.md - Plugin: user_guide/event_processors/event_plugin.md - Rate Limit: user_guide/event_processors/event_rate_limit.md - Starlark: user_guide/event_processors/event_starlark.md - Strings: user_guide/event_processors/event_strings.md - Time Epoch: user_guide/event_processors/event_time_epoch.md - To Tag: user_guide/event_processors/event_to_tag.md - Trigger: user_guide/event_processors/event_trigger.md - Value Tag: user_guide/event_processors/event_value_tag.md - Write: user_guide/event_processors/event_write.md - Actions: user_guide/actions/actions.md - Caching: user_guide/caching.md - Clustering: user_guide/HA.md - REST API: - Introduction: user_guide/api/api_intro.md - Configuration: user_guide/api/configuration.md - Targets: user_guide/api/targets.md - Cluster: user_guide/api/cluster.md - Other: user_guide/api/other.md - Golang Package: - Introduction: user_guide/golang_package/intro.md - Target Options: user_guide/golang_package/target_options.md - gNMI Options: user_guide/golang_package/gnmi_options.md - Examples: - Capabilities: user_guide/golang_package/examples/capabilities.md - Get: user_guide/golang_package/examples/get.md - Set: user_guide/golang_package/examples/set.md - Subcribe: user_guide/golang_package/examples/subscribe.md - Collector Mode: - Introduction: user_guide/collector/collector_intro.md - Configuration: user_guide/collector/collector_configuration.md - REST API: user_guide/collector/collector_api.md - Command reference: - Capabilities: cmd/capabilities.md - Get: cmd/get.md - Set: cmd/set.md - GetSet: cmd/getset.md - Subscribe: cmd/subscribe.md - Diff: - Diff: cmd/diff/diff.md - Diff Setrequest: cmd/diff/diff_setrequest.md - Diff Set-To-Notifs: cmd/diff/diff_set_to_notifs.md - Listen: cmd/listen.md - Path: cmd/path.md - Prompt: cmd/prompt.md - Generate: - Generate: 'cmd/generate.md' - Generate Path: cmd/generate/generate_path.md - Generate Set-Request: cmd/generate/generate_set_request.md - Processor: cmd/processor.md - Proxy: cmd/proxy.md - Collector: cmd/collector.md - Deployment examples: - Deployments: deployments/deployments_intro.md - gNMIc Single Instance: - NATS Output: - Containerlab: deployments/single-instance/containerlab/nats-output.md - Docker Compose: deployments/single-instance/docker-compose/nats-output.md - Kafka output: - Containerlab: deployments/single-instance/containerlab/kafka-output.md - Docker Compose: deployments/single-instance/docker-compose/kafka-output.md - InfluxDB output: - Containerlab: deployments/single-instance/containerlab/influxdb-output.md - Docker Compose: deployments/single-instance/docker-compose/influxdb-output.md - Prometheus output: - Containerlab: deployments/single-instance/containerlab/prometheus-output.md - Docker Compose: deployments/single-instance/docker-compose/prometheus-output.md - Prometheus Remote Write output: - Containerlab: deployments/single-instance/containerlab/prometheus-remote-write-output.md - Multiple outputs: - Containerlab: deployments/single-instance/containerlab/multiple-outputs.md - Docker Compose: deployments/single-instance/docker-compose/multiple-outputs.md - gNMIc Cluster: - InfluxDB output: - Containerlab: deployments/clusters/containerlab/cluster_with_influxdb_output.md - Docker Compose: deployments/clusters/docker-compose/cluster_with_influxdb_output.md - Prometheus output: - Containerlab: deployments/clusters/containerlab/cluster_with_prometheus_output.md - Docker Compose: deployments/clusters/docker-compose/cluster_with_prometheus_output.md - Kubernetes: deployments/clusters/kubernetes/cluster_with_prometheus_output.md - Prometheus output with data replication: - Containerlab: deployments/clusters/containerlab/cluster_with_nats_input_and_prometheus_output.md - Docker Compose: deployments/clusters/docker-compose/cluster_with_nats_input_and_prometheus_output.md - gNMI Server Cluster: - Containerlab: deployments/clusters/containerlab/cluster_with_gnmi_server_and_prometheus_output.md - gNMIc Pipeline: - NATS to Prometheus: - Docker Compose: deployments/pipelines/docker-compose/nats_prometheus.md - NATS to InfluxDB: - Docker Compose: deployments/pipelines/docker-compose/nats_influxdb.md - Clustered pipeline: - Docker Compose: deployments/pipelines/docker-compose/gnmic_cluster_nats_prometheus.md - Forked pipeline: - Docker Compose: deployments/pipelines/docker-compose/forked_pipeline.md - Changelog: changelog.md # - Blog: blog/index.md site_author: Karim Radhouani site_description: >- gnmi client and collector command line interface # Repository repo_name: openconfig/gnmic repo_url: https://github.com/openconfig/gnmic edit_uri: "" theme: name: material features: - navigation.tabs #- navigation.expand - navigation.top #- navigation.sections # 404 page static_templates: - 404.html # Don't include MkDocs' JavaScript include_search_page: false search_index_only: true # Default values, taken from mkdocs_theme.yml language: en palette: # Light mode - media: "(prefers-color-scheme: light)" scheme: default primary: blue accent: indigo toggle: icon: material/toggle-switch-off-outline name: Switch to dark mode # Dark mode - media: "(prefers-color-scheme: dark)" scheme: slate primary: black accent: cyan toggle: icon: material/toggle-switch name: Switch to light mode font: text: Manrope code: Fira Mono icon: logo: octicons/pulse-24 favicon: images/pulse.svg extra_css: - stylesheets/extra.css # Plugins plugins: - search - minify: minify_html: true # Customization extra: social: - icon: fontawesome/brands/github link: https://github.com/karimra analytics: provider: google property: UA-177206500-1 # Extensions markdown_extensions: - markdown.extensions.admonition - markdown.extensions.attr_list - markdown.extensions.codehilite: guess_lang: false - markdown.extensions.def_list - markdown.extensions.footnotes - markdown.extensions.meta - markdown.extensions.toc: permalink: "#" - pymdownx.arithmatex - pymdownx.betterem: smart_enable: all - pymdownx.caret - pymdownx.critic - pymdownx.details - pymdownx.emoji: emoji_index: !!python/name:materialx.emoji.twemoji emoji_generator: !!python/name:materialx.emoji.to_svg - pymdownx.highlight: linenums_style: pymdownx-inline - pymdownx.inlinehilite - pymdownx.keys - pymdownx.magiclink: repo_url_shorthand: true user: squidfunk repo: mkdocs-material - pymdownx.mark - pymdownx.smartsymbols - pymdownx.snippets: check_paths: true - pymdownx.superfences - pymdownx.tabbed: alternate_style: true - pymdownx.tasklist: custom_checkbox: true - pymdownx.tilde ================================================ FILE: pkg/actions/action.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package actions import ( "context" "log" "github.com/mitchellh/mapstructure" "github.com/openconfig/gnmic/pkg/api/types" ) type Action interface { // Init initializes an Action given its configuration and a list of options Init(cfg map[string]interface{}, opts ...Option) error // Run, well runs the action. // it takes an action Context which is made of: // - `Input` : an interface{} event message, target name added/deleted,... // - `Env` : a map[string]interface{} containing the output of previous actions // - `Vars` : a map[string]interface{} containing variables passed to the action // - `Targets`: a map[string]*types.TargetConfig containing (if the action is ran by a loader) // the currently known targets configurations Run(ctx context.Context, aCtx *Context) (interface{}, error) // NName returns the configured action name NName() string // WithTargets passes the known configured targets to the action when initialized WithTargets(map[string]*types.TargetConfig) // WithLogger passes the configured logger to the action WithLogger(*log.Logger) } // Context defines an action execution context type Context struct { // Input event message, target name added/deleted,... Input interface{} `json:"Input,omitempty"` // Env used to store the output of a sequence of actions Env map[string]interface{} `json:"Env,omitempty"` // Vars contains the variables passed to the action Vars map[string]interface{} `json:"Vars,omitempty"` // a map of known targets configurations Targets map[string]*types.TargetConfig `json:"Targets,omitempty"` } var ActionTypes = []string{ "gnmi", "http", "script", "template", } type Option func(Action) var Actions = map[string]Initializer{} type Initializer func() Action func Register(name string, initFn Initializer) { Actions[name] = initFn } func DecodeConfig(src, dst interface{}) error { decoder, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), Result: dst, }, ) if err != nil { return err } return decoder.Decode(src) } func WithTargets(tcs map[string]*types.TargetConfig) Option { return func(a Action) { a.WithTargets(tcs) } } func WithLogger(l *log.Logger) Option { return func(a Action) { a.WithLogger(l) } } ================================================ FILE: pkg/actions/all/all.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package all import ( _ "github.com/openconfig/gnmic/pkg/actions/gnmi_action" _ "github.com/openconfig/gnmic/pkg/actions/http_action" _ "github.com/openconfig/gnmic/pkg/actions/script_action" _ "github.com/openconfig/gnmic/pkg/actions/template_action" ) ================================================ FILE: pkg/actions/gnmi_action/gnmi_action.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package gnmi_action import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "log" "strings" "sync" "text/template" "gopkg.in/yaml.v2" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/actions" "github.com/openconfig/gnmic/pkg/api" "github.com/openconfig/gnmic/pkg/api/target" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" ) const ( defaultRPC = "get" loggingPrefix = "[gnmi_action] " actionType = "gnmi" defaultDataType = "ALL" defaultTarget = `{{ index .Input.Tags "source" }}` defaultEncoding = "JSON" defaultFormat = "json" ) const ( rpcGet = "get" rpcSet = "set" rpcSetUpdate = "set-update" rpcSetReplace = "set-replace" rpcSetDelete = "set-delete" rpcDelete = "delete" rpcSub = "sub" rpcSubscribe = "subscribe" ) func init() { actions.Register(actionType, func() actions.Action { return &gnmiAction{ logger: log.New(io.Discard, "", 0), m: new(sync.RWMutex), targetsConfigs: make(map[string]*types.TargetConfig), } }) } type gnmiAction struct { // action name Name string `mapstructure:"name,omitempty"` // target of the gNMI RPC, it can be a Go template Target string `mapstructure:"target,omitempty"` // gNMI RPC, possible values `get`, `set`, `set-update`, // `set-replace`, `sub`, `subscribe` RPC string `mapstructure:"rpc,omitempty"` // gNMI Path Prefix, can be a Go template Prefix string `mapstructure:"prefix,omitempty"` // list of gNMI Paths, each one can be a Go template Paths []string `mapstructure:"paths,omitempty"` // gNMI data type in case RPC is `get`, // possible values: `config`, `state`, `operational` Type string `mapstructure:"data-type,omitempty"` // list of gNMI values, used in case RPC=`set*` Values []string `mapstructure:"values,omitempty"` // gNMI encoding Encoding string `mapstructure:"encoding,omitempty"` // Debug Debug bool `mapstructure:"debug,omitempty"` // Ignore ENV proxy NoEnvProxy bool `mapstructure:"no-env-proxy,omitempty"` // Response format, // possible values: `json`, `event`, `prototext`, `protojson` Format string `mapstructure:"format,omitempty"` target *template.Template prefix *template.Template paths []*template.Template values []*template.Template logger *log.Logger m *sync.RWMutex targetsConfigs map[string]*types.TargetConfig } func (g *gnmiAction) Init(cfg map[string]interface{}, opts ...actions.Option) error { err := actions.DecodeConfig(cfg, g) if err != nil { return err } for _, opt := range opts { opt(g) } if g.Name == "" { return fmt.Errorf("action type %q missing name field", actionType) } g.setDefaults() err = g.parseTemplates() if err != nil { return err } err = g.validate() if err != nil { return err } g.logger.Printf("action name %q of type %q initialized: %v", g.Name, actionType, g) return nil } func (g *gnmiAction) Run(ctx context.Context, aCtx *actions.Context) (interface{}, error) { g.m.Lock() for n, tc := range aCtx.Targets { g.targetsConfigs[n] = tc } in := &actions.Context{ Input: aCtx.Input, Env: aCtx.Env, Vars: aCtx.Vars, Targets: aCtx.Targets, } g.m.Unlock() b := new(bytes.Buffer) err := g.target.Execute(b, in) if err != nil { return nil, err } tName := b.String() targetsConfigs, err := g.selectTargets(tName) if err != nil { return nil, err } ctx, cancel := context.WithCancel(ctx) defer cancel() result := make(map[string]interface{}) resCh := make(chan *gnmiResponse) errCh := make(chan error) wg := new(sync.WaitGroup) wg.Add(len(targetsConfigs)) for _, tc := range targetsConfigs { go func(tc *types.TargetConfig) { defer wg.Done() // create new actions.Context to be used by each target // run RPC rb, err := g.runRPC(ctx, tc, &actions.Context{ Input: in.Input, Env: in.Env, Vars: in.Vars, }) if err != nil { errCh <- err return } resCh <- &gnmiResponse{name: tc.Name, data: rb} }(tc) } errs := make([]error, 0) doneCh := make(chan struct{}) go func() { defer close(doneCh) for { select { case resp, ok := <-resCh: if !ok { return } var res interface{} // using yaml.Unmarshal instead of json.Unmarshal to avoid // treating integers as floats err = yaml.Unmarshal(resp.data, &res) if err != nil { errs = append(errs, err) } result[resp.name] = res case err := <-errCh: g.logger.Printf("gnmi action error: %v", err) errs = append(errs, err) case <-ctx.Done(): return } } }() wg.Wait() close(resCh) // close result channel <-doneCh // wait for the result map to be set if len(errs) > 0 { // return only the first errors return nil, errs[0] } return result, nil } func (g *gnmiAction) NName() string { return g.Name } func (g *gnmiAction) setDefaults() { if g.Type == "" { g.Type = defaultDataType } if g.Encoding == "" { g.Encoding = defaultEncoding } switch g.RPC { case "": g.RPC = defaultRPC case rpcSet: g.RPC = rpcSetUpdate case rpcDelete: g.RPC = rpcSetDelete case rpcSub: g.RPC = rpcSubscribe } if g.Target == "" { g.Target = defaultTarget } if g.Format == "" { g.Format = defaultFormat } } func (g *gnmiAction) validate() error { numPaths := len(g.Paths) if numPaths == 0 { return errors.New("paths field is required") } switch g.RPC { case rpcGet, rpcSetDelete, rpcDelete: case rpcSetUpdate, rpcSetReplace: numValues := len(g.values) if numValues == 0 { return errors.New("values field is required when RPC is set") } if numPaths != len(g.values) { return errors.New("number of paths and values do not match") } case rpcSub, rpcSubscribe: if strings.ToLower(g.Format) != "json" && strings.ToLower(g.Format) != "protojson" && strings.ToLower(g.Format) != "event" { return fmt.Errorf("unsupported format %q", g.Format) } default: return fmt.Errorf("unknown gnmi RPC %q", g.RPC) } return nil } func (g *gnmiAction) parseTemplates() error { var err error g.target, err = gtemplate.CreateTemplate(fmt.Sprintf("%s-target", g.Name), g.Target) if err != nil { return err } g.prefix, err = gtemplate.CreateTemplate(fmt.Sprintf("%s-prefix", g.Name), g.Prefix) if err != nil { return err } g.paths, err = g.createTemplates("path", g.Paths) if err != nil { return err } g.values, err = g.createTemplates("value", g.Values) return err } func (g *gnmiAction) createTemplates(n string, s []string) ([]*template.Template, error) { tpls := make([]*template.Template, 0, len(s)) for i, p := range s { tpl, err := gtemplate.CreateTemplate(fmt.Sprintf("%s-%s-%d", g.Name, n, i), p) if err != nil { return nil, err } tpls = append(tpls, tpl) } return tpls, nil } func (g *gnmiAction) createGetRequest(in *actions.Context) (*gnmi.GetRequest, error) { gnmiOpts := make([]api.GNMIOption, 0, 3) gnmiOpts = append(gnmiOpts, api.Encoding(g.Encoding)) gnmiOpts = append(gnmiOpts, api.DataType(g.Type)) var err error b := new(bytes.Buffer) if g.Prefix != "" { err = g.prefix.Execute(b, in) if err != nil { return nil, fmt.Errorf("prefix parse error: %v", err) } gnmiOpts = append(gnmiOpts, api.Prefix(b.String())) } for _, p := range g.paths { b.Reset() err = p.Execute(b, in) if err != nil { return nil, fmt.Errorf("path parse error: %v", err) } gnmiOpts = append(gnmiOpts, api.Path(b.String())) } return api.NewGetRequest(gnmiOpts...) } func (g *gnmiAction) createSetRequest(in *actions.Context) (*gnmi.SetRequest, error) { gnmiOpts := make([]api.GNMIOption, 0, len(g.paths)) var err error b := new(bytes.Buffer) if g.Prefix != "" { err = g.prefix.Execute(b, in) if err != nil { return nil, fmt.Errorf("prefix parse error: %v", err) } gnmiOpts = append(gnmiOpts, api.Prefix(b.String())) } for i, p := range g.paths { b.Reset() err = p.Execute(b, in) if err != nil { return nil, fmt.Errorf("path parse error: %v", err) } sPath := b.String() switch g.RPC { case rpcSetDelete: gnmiOpts = append(gnmiOpts, api.Delete(sPath)) case rpcSetUpdate: b.Reset() err = g.values[i].Execute(b, in) if err != nil { return nil, fmt.Errorf("value %d parse error: %v", i, err) } gnmiOpts = append(gnmiOpts, api.Update( api.Path(sPath), api.Value(b.String(), g.Encoding), )) case rpcSetReplace: b.Reset() err = g.values[i].Execute(b, in) if err != nil { return nil, fmt.Errorf("value %d parse error: %v", i, err) } gnmiOpts = append(gnmiOpts, api.Replace( api.Path(sPath), api.Value(b.String(), g.Encoding), )) } } return api.NewSetRequest(gnmiOpts...) } func (g *gnmiAction) createSubscribeRequest(in *actions.Context) (*gnmi.SubscribeRequest, error) { gnmiOpts := make([]api.GNMIOption, 0, 2+len(g.paths)) gnmiOpts = append(gnmiOpts, api.Encoding(g.Encoding), api.SubscriptionListModeONCE(), ) // var err error b := new(bytes.Buffer) if g.Prefix != "" { err = g.prefix.Execute(b, in) if err != nil { return nil, fmt.Errorf("prefix template exec error: %v", err) } gnmiOpts = append(gnmiOpts, api.Prefix(b.String())) } for _, p := range g.paths { b.Reset() err = p.Execute(b, in) if err != nil { return nil, fmt.Errorf("path template exec error: %v", err) } gnmiOpts = append(gnmiOpts, api.Subscription( api.Path(b.String()))) } return api.NewSubscribeRequest(gnmiOpts...) } func (g *gnmiAction) selectTargets(tName string) ([]*types.TargetConfig, error) { if tName == "" { return nil, nil } targets := make([]*types.TargetConfig, 0, len(g.targetsConfigs)) g.m.RLock() defer g.m.RUnlock() // select all targets if tName == "all" { for _, tc := range g.targetsConfigs { targets = append(targets, tc) } return targets, nil } // select a few targets tNames := strings.Split(tName, ",") for _, name := range tNames { if tc, ok := g.targetsConfigs[name]; ok { targets = append(targets, tc) } } return targets, nil } func (g *gnmiAction) runRPC(ctx context.Context, tc *types.TargetConfig, in *actions.Context) ([]byte, error) { switch g.RPC { case rpcGet: return g.runGet(ctx, tc, in) case rpcSetUpdate, rpcSetReplace, rpcSetDelete: return g.runSet(ctx, tc, in) case rpcSubscribe: // once return g.runSubscribe(ctx, tc, in) default: return nil, fmt.Errorf("unknown RPC %q", g.RPC) } } func (g *gnmiAction) runGet(ctx context.Context, tc *types.TargetConfig, in *actions.Context) ([]byte, error) { t := target.NewTarget(tc) req, err := g.createGetRequest(in) if err != nil { return nil, err } err = t.CreateGNMIClient(ctx) if err != nil { return nil, err } defer t.Close() resp, err := t.Get(ctx, req) if err != nil { return nil, fmt.Errorf("target %q GetRequest failed: %v", t.Config.Name, err) } mo := &formatters.MarshalOptions{Format: g.Format} return mo.Marshal(resp, nil) } func (g *gnmiAction) runSet(ctx context.Context, tc *types.TargetConfig, in *actions.Context) ([]byte, error) { t := target.NewTarget(tc) req, err := g.createSetRequest(in) if err != nil { return nil, err } err = t.CreateGNMIClient(ctx) if err != nil { return nil, fmt.Errorf("target %q SetRequest failed: %v", t.Config.Name, err) } defer t.Close() resp, err := t.Set(ctx, req) if err != nil { return nil, err } mo := &formatters.MarshalOptions{Format: g.Format} return mo.Marshal(resp, nil) } func (g *gnmiAction) runSubscribe(ctx context.Context, tc *types.TargetConfig, in *actions.Context) ([]byte, error) { t := target.NewTarget(tc) req, err := g.createSubscribeRequest(in) if err != nil { return nil, err } err = t.CreateGNMIClient(ctx) if err != nil { return nil, err } defer t.Close() responses, err := t.SubscribeOnce(ctx, req) if err != nil { return nil, err } mo := &formatters.MarshalOptions{Format: g.Format} formattedResponse := make([]interface{}, 0, len(responses)) m := map[string]string{ "source": tc.Name, } for _, r := range responses { msgb, err := mo.Marshal(r, m) if err != nil { return nil, err } var v interface{} err = json.Unmarshal(msgb, &v) if err != nil { return nil, err } formattedResponse = append(formattedResponse, utils.Convert(v)) } return json.Marshal(formattedResponse) } type gnmiResponse struct { name string data []byte } ================================================ FILE: pkg/actions/gnmi_action/gnmi_action_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package gnmi_action import ( "testing" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/actions" "github.com/openconfig/gnmic/pkg/api/testutils" "github.com/openconfig/gnmic/pkg/formatters" ) type getRequestTestItem struct { input *formatters.EventMsg output *gnmi.GetRequest } type setRequestTestItem struct { input *formatters.EventMsg output *gnmi.SetRequest } var getRequestTestSet = map[string]struct { actionType string action map[string]interface{} tests []getRequestTestItem }{ "get_no_templates": { actionType: actionType, action: map[string]interface{}{ "type": "gnmi", "name": "act1", "paths": []string{"/path"}, "debug": true, "vars": nil, }, tests: []getRequestTestItem{ { input: nil, output: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "path", }, }, }, }, Encoding: gnmi.Encoding_JSON, }, }, }, }, "get_with_templates_in_path": { actionType: actionType, action: map[string]interface{}{ "type": "gnmi", "name": "act1", "paths": []string{`/{{.Input.Name}}`}, "debug": true, }, tests: []getRequestTestItem{ { input: &formatters.EventMsg{ Name: "sub1", }, output: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "sub1", }, }, }, }, Encoding: gnmi.Encoding_JSON, }, }, }, }, "get_with_templates_in_prefix": { actionType: actionType, action: map[string]interface{}{ "type": "gnmi", "name": "act1", "prefix": `/{{.Input.Name}}`, "paths": []string{`/{{.Input.Name}}`}, "debug": true, }, tests: []getRequestTestItem{ { input: &formatters.EventMsg{ Name: "sub1", }, output: &gnmi.GetRequest{ Prefix: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "sub1", }, }, }, Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "sub1", }, }, }, }, Encoding: gnmi.Encoding_JSON, }, }, }, }, } var setRequestTestSet = map[string]struct { actionType string action map[string]interface{} tests []setRequestTestItem }{ "set_no_templates": { actionType: actionType, action: map[string]interface{}{ "type": "gnmi", "name": "act1", "rpc": "set", "paths": []string{"/path"}, "values": []string{"value1"}, "debug": true, }, tests: []setRequestTestItem{ { input: nil, output: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "path", }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value1\""), }, }, }, }, }, }, }, }, "set_with_templates_in_path": { actionType: actionType, action: map[string]interface{}{ "type": "gnmi", "name": "act1", "rpc": "set", "paths": []string{"/{{.Input.Name}}"}, "values": []string{"value1"}, "debug": true, }, tests: []setRequestTestItem{ { input: &formatters.EventMsg{ Name: "sub1", }, output: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "sub1", }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value1\""), }, }, }, }, }, }, }, }, // changing a value via set update "set_with_template_in_values": { actionType: actionType, action: map[string]interface{}{ "type": "gnmi", "name": "act1", "rpc": "set", "paths": []string{`{{ range $k, $v := .Input.Values }}{{if eq $k "path1" }}{{$k}}{{end}}{{end}}`}, "values": []string{`{{ range $k, $v := .Input.Values }}{{if eq $k "path1" }}value2{{end}}{{end}}`}, "debug": true, }, tests: []setRequestTestItem{ { input: &formatters.EventMsg{ Name: "sub1", Values: map[string]interface{}{ "path1": "value1", }, }, output: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "path1", }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value2\""), }, }, }, }, }, }, }, }, // changing multiple values via set update "set_with_multiple_templates_in_values": { actionType: actionType, action: map[string]interface{}{ "type": "gnmi", "name": "act1", "rpc": "set", "paths": []string{ `{{ range $k, $v := .Input.Values }}{{if eq $k "path1" }}{{$k}}{{end}}{{end}}`, `{{ range $k, $v := .Input.Values }}{{if eq $k "path2" }}{{$k}}{{end}}{{end}}`, }, "values": []string{ `{{ range $k, $v := .Input.Values }}{{if eq $k "path1" }}value11{{end}}{{end}}`, `{{ range $k, $v := .Input.Values }}{{if eq $k "path2" }}value22{{end}}{{end}}`, }, "debug": true, }, tests: []setRequestTestItem{ { input: &formatters.EventMsg{ Name: "sub1", Values: map[string]interface{}{ "path1": "value1", "path2": "value2", }, }, output: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "path1", }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value11\""), }, }, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "path2", }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value22\""), }, }, }, }, }, }, }, }, // changing a value via set replace "set_replace_with_template_in_values": { actionType: actionType, action: map[string]interface{}{ "type": "gnmi", "name": "act1", "rpc": "set-replace", "paths": []string{`{{ range $k, $v := .Input.Values }}{{if and (eq $k "path1") (eq $v "value1")}}{{$k}}{{end}}{{end}}`}, "values": []string{`{{ range $k, $v := .Input.Values }}{{if and (eq $k "path1") (eq $v "value1")}}value2{{end}}{{end}}`}, "debug": true, }, tests: []setRequestTestItem{ { input: &formatters.EventMsg{ Name: "sub1", Values: map[string]interface{}{ "path1": "value1", }, }, output: &gnmi.SetRequest{ Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "path1", }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value2\""), }, }, }, }, }, }, }, }, // changing multiple values via set update replace "set_replace_with_multiple_templates_in_values": { actionType: actionType, action: map[string]interface{}{ "type": "gnmi", "name": "act1", "rpc": "set-replace", "paths": []string{ `{{ range $k, $v := .Input.Values }}{{if and (eq $k "path1") (eq $v "value1")}}{{$k}}{{end}}{{end}}`, `{{ range $k, $v := .Input.Values }}{{if and (eq $k "path2") (eq $v "value2")}}{{$k}}{{end}}{{end}}`, }, "values": []string{ `{{ range $k, $v := .Input.Values }}{{if and (eq $k "path1") (eq $v "value1")}}value11{{end}}{{end}}`, `{{ range $k, $v := .Input.Values }}{{if and (eq $k "path2") (eq $v "value2")}}value22{{end}}{{end}}`, }, "debug": true, }, tests: []setRequestTestItem{ { input: &formatters.EventMsg{ Name: "sub1", Values: map[string]interface{}{ "path1": "value1", "path2": "value2", }, }, output: &gnmi.SetRequest{ Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "path1", }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value11\""), }, }, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "path2", }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value22\""), }, }, }, }, }, }, }, }, } func TestGnmiGetRequest(t *testing.T) { for name, ts := range getRequestTestSet { if ai, ok := actions.Actions[ts.actionType]; ok { t.Log("found action") a := ai() err := a.Init(ts.action) if err != nil { t.Errorf("failed to initialize action: %v", err) return } t.Logf("action: %+v", a) ga := a.(*gnmiAction) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) gReq, err := ga.createGetRequest(&actions.Context{Input: item.input}) if err != nil { t.Logf("failed: %v", err) t.Fail() } if !testutils.GetRequestsEqual(gReq, item.output) { t.Errorf("failed at %s item %d, expected %+v, got: %+v", name, i, item.output, gReq) } }) } } else { t.Errorf("action %q not found", ts.actionType) } } } func TestGnmiSetRequest(t *testing.T) { for name, ts := range setRequestTestSet { if ai, ok := actions.Actions[ts.actionType]; ok { t.Log("found action") a := ai() err := a.Init(ts.action) if err != nil { t.Errorf("failed to initialize action: %v", err) return } t.Logf("action: %+v", a) ga := a.(*gnmiAction) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) gReq, err := ga.createSetRequest(&actions.Context{Input: item.input}) if err != nil { t.Logf("failed: %v", err) t.Fail() } if !testutils.SetRequestsEqual(gReq, item.output) { t.Errorf("failed at %s item %d, expected %+v, got: %+v", name, i, item.output, gReq) } }) } } else { t.Errorf("action %q not found", ts.actionType) } } } ================================================ FILE: pkg/actions/gnmi_action/options.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package gnmi_action import ( "log" "os" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" ) func (g *gnmiAction) WithTargets(tcs map[string]*types.TargetConfig) { if tcs == nil { return } g.targetsConfigs = tcs } func (g *gnmiAction) WithLogger(logger *log.Logger) { if g.Debug && logger != nil { g.logger = log.New(logger.Writer(), loggingPrefix, logger.Flags()) } else if g.Debug { g.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } ================================================ FILE: pkg/actions/http_action/http_action.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package http_action import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "log" "net/http" "os" "strings" "text/template" "time" "github.com/openconfig/gnmic/pkg/actions" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( defaultMethod = "GET" defaultTimeout = 5 * time.Second loggingPrefix = "[http_action] " actionType = "http" defaultBodyTemplate = "{{ json . }}" ) func init() { actions.Register(actionType, func() actions.Action { return &httpAction{ logger: log.New(io.Discard, "", 0), } }) } type httpAction struct { Name string `mapstructure:"name,omitempty"` Method string `mapstructure:"method,omitempty"` URL string `mapstructure:"url,omitempty"` Headers map[string]string `mapstructure:"headers,omitempty"` Timeout time.Duration `mapstructure:"timeout,omitempty"` Body string `mapstructure:"body,omitempty"` Debug bool `mapstructure:"debug,omitempty"` url *template.Template body *template.Template logger *log.Logger } func (h *httpAction) Init(cfg map[string]interface{}, opts ...actions.Option) error { err := actions.DecodeConfig(cfg, h) if err != nil { return err } for _, opt := range opts { opt(h) } if h.Name == "" { return fmt.Errorf("action type %q missing name field", actionType) } err = h.setDefaults() if err != nil { return err } h.body, err = template.New("body").Funcs(funcMap).Parse(h.Body) if err != nil { return err } h.url, err = template.New("url").Funcs(funcMap).Parse(h.URL) return err } func (h *httpAction) Run(ctx context.Context, aCtx *actions.Context) (interface{}, error) { if h.url == nil { return nil, errors.New("missing url template") } if h.body == nil { return nil, errors.New("missing body template") } in := &actions.Context{ Input: aCtx.Input, Env: aCtx.Env, Vars: aCtx.Vars, Targets: aCtx.Targets, } b := new(bytes.Buffer) err := json.NewEncoder(b).Encode(in) if err != nil { return nil, err } b.Reset() err = h.body.Execute(b, in) if err != nil { return nil, err } url := new(bytes.Buffer) err = h.url.Execute(url, in) if err != nil { return nil, err } h.logger.Printf("url: %s", url.String()) h.logger.Printf("body: %s", b.String()) req, err := http.NewRequest(h.Method, url.String(), b) if err != nil { return nil, err } for k, v := range h.Headers { req.Header.Add(k, v) } client := &http.Client{ Timeout: h.Timeout, } ctx, cancel := context.WithCancel(ctx) defer cancel() resp, err := client.Do(req.WithContext(ctx)) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode == http.StatusOK { bodyBytes, err := io.ReadAll(resp.Body) if err != nil { return nil, err } return bodyBytes, nil } return nil, fmt.Errorf("status code=%d", resp.StatusCode) } func (h *httpAction) NName() string { return h.Name } func (h *httpAction) setDefaults() error { // if !strings.HasPrefix(h.URL, "http") { // h.URL = "http://" + h.URL // } // if _, err := url.Parse(h.URL); err != nil { // return err // } if h.Method == "" { h.Method = defaultMethod } h.Method = strings.ToUpper(h.Method) switch h.Method { case http.MethodConnect: break case http.MethodDelete: break case http.MethodGet: break case http.MethodHead: break case http.MethodOptions: break case http.MethodPatch: break case http.MethodPost: break case http.MethodPut: break default: return fmt.Errorf("method %q not allowed", h.Method) } if h.Timeout <= 0 { h.Timeout = defaultTimeout } if h.Body == "" { h.Body = defaultBodyTemplate } return nil } func (h *httpAction) WithTargets(map[string]*types.TargetConfig) {} func (h *httpAction) WithLogger(logger *log.Logger) { if h.Debug && logger != nil { h.logger = log.New(logger.Writer(), loggingPrefix, logger.Flags()) } else if h.Debug { h.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } var funcMap = template.FuncMap{ "json": func(v interface{}) string { a, _ := json.Marshal(v) return string(a) }, "name": func(v interface{}) string { var result interface{} switch v := v.(type) { case *formatters.EventMsg: result = v.Name default: return "" } a, _ := json.Marshal(result) return string(a) }, "withTags": func(v interface{}, keys ...string) string { switch v := v.(type) { case *formatters.EventMsg: tags := v.Tags v.Tags = make(map[string]string) for _, k := range keys { if vv, ok := tags[k]; ok { v.Tags[k] = vv } } a, _ := json.Marshal(v) return string(a) case string: msg := make(map[string]interface{}) json.Unmarshal([]byte(v), &msg) tags := msg["tags"] if tags == nil { a, _ := json.Marshal(msg) return string(a) } tagsMap, ok := tags.(map[string]interface{}) if !ok { a, _ := json.Marshal(msg) return string(a) } newTags := make(map[string]interface{}) for _, k := range keys { if vv, ok := tagsMap[k]; ok { newTags[k] = vv } } delete(msg, "tags") if len(newTags) > 0 { msg["tags"] = newTags } a, _ := json.Marshal(msg) return string(a) } return "" }, "withValues": func(v interface{}, keys ...string) string { switch v := v.(type) { case *formatters.EventMsg: values := v.Values v.Values = make(map[string]interface{}) for _, k := range keys { if vv, ok := values[k]; ok { v.Values[k] = vv } } a, _ := json.Marshal(v) return string(a) case string: msg := make(map[string]interface{}) json.Unmarshal([]byte(v), &msg) values := msg["values"] if values == nil { a, _ := json.Marshal(msg) return string(a) } valuesMap, ok := values.(map[string]interface{}) if !ok { a, _ := json.Marshal(msg) return string(a) } newValues := make(map[string]interface{}) for _, k := range keys { if vv, ok := valuesMap[k]; ok { newValues[k] = vv } } delete(msg, "values") if len(newValues) > 0 { msg["values"] = newValues } a, _ := json.Marshal(msg) return string(a) } return "" }, "withoutTags": func(v interface{}, keys ...string) string { switch v := v.(type) { case *formatters.EventMsg: for _, k := range keys { delete(v.Tags, k) } a, _ := json.Marshal(v) return string(a) case string: msg := make(map[string]interface{}) json.Unmarshal([]byte(v), &msg) tags := msg["tags"] if tags == nil { a, _ := json.Marshal(msg) return string(a) } switch tags := msg["tags"].(type) { case map[string]interface{}: for _, k := range keys { delete(tags, k) } msg["tags"] = tags } a, _ := json.Marshal(msg) return string(a) } return "" }, "withoutValues": func(v interface{}, keys ...string) string { switch v := v.(type) { case *formatters.EventMsg: for _, k := range keys { delete(v.Values, k) } a, _ := json.Marshal(v) return string(a) case string: msg := make(map[string]interface{}) json.Unmarshal([]byte(v), &msg) if msg["values"] == nil { a, _ := json.Marshal(msg) return string(a) } switch values := msg["values"].(type) { case map[string]interface{}: for _, k := range keys { delete(values, k) } msg["values"] = values } a, _ := json.Marshal(msg) return string(a) } return "" }, } ================================================ FILE: pkg/actions/http_action/http_action_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package http_action import ( "context" "encoding/json" "errors" "fmt" "io" "log" "net/http" "net/url" "os" "reflect" "testing" "time" "github.com/openconfig/gnmic/pkg/actions" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input *formatters.EventMsg output interface{} } var testset = map[string]struct { actionType string action map[string]interface{} tests []item }{ "default_values": { actionType: actionType, action: map[string]interface{}{ "type": "http", "name": "act1", "URL": "http://localhost:8080", "debug": true, }, tests: []item{ { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", }, }, output: map[string]interface{}{ "Input": map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag1": "1", }, }, }, }, }, }, "with_simple_template": { actionType: actionType, action: map[string]interface{}{ "type": "http", "name": "act1", "url": "http://localhost:8080", "body": `{{ name .Input }}`, "debug": true, }, tests: []item{ { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", }, }, output: "sub1", }, { input: &formatters.EventMsg{ Name: "sub2", }, output: "sub2", }, }, }, "remove_all_tags": { actionType: actionType, action: map[string]interface{}{ "type": "http", "name": "act1", "url": "http://localhost:8080", "body": `{{ withTags .Input }}`, "debug": true, }, tests: []item{ { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", }, }, output: map[string]interface{}{ "name": "sub1", }, }, { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", "tag2": "2", }, }, output: map[string]interface{}{ "name": "sub1", }, }, }, }, "remove_some_tags": { actionType: actionType, action: map[string]interface{}{ "type": "http", "name": "act1", "url": "http://localhost:8080", "body": `{{ withoutTags .Input "tag1" }}`, "debug": true, }, tests: []item{ { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", }, }, output: map[string]interface{}{ "name": "sub1", }, }, { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", "tag2": "2", }, }, output: map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag2": "2", }, }, }, { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag2": "2", }, }, output: map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag2": "2", }, }, }, }, }, "select_some_tags": { actionType: actionType, action: map[string]interface{}{ "type": "http", "name": "act1", "url": "http://localhost:8080", "body": `{{ withTags .Input "tag1" }}`, "debug": true, }, tests: []item{ { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", "tag2": "2", "tag3": "3", }, }, output: map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag1": "1", }, }, }, { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag2": "2", }, }, output: map[string]interface{}{ "name": "sub1", }, }, }, }, "remove_all_values": { actionType: actionType, action: map[string]interface{}{ "type": "http", "name": "act1", "url": "http://localhost:8080", "body": `{{ withValues .Input }}`, "debug": true, }, tests: []item{ { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", }, }, output: map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag1": "1", }, }, }, { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", }, Values: map[string]interface{}{ "val1": "1", }, }, output: map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag1": "1", }, }, }, }, }, "remove_some_values": { actionType: actionType, action: map[string]interface{}{ "type": "http", "name": "act1", "url": "http://localhost:8080", "body": `{{ withoutValues .Input "val1"}}`, "debug": true, }, tests: []item{ { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", }, }, output: map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag1": "1", }, }, }, { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", }, Values: map[string]interface{}{ "val1": "1", }, }, output: map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag1": "1", }, }, }, { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", }, Values: map[string]interface{}{ "val1": "1", "val2": "2", }, }, output: map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag1": "1", }, "values": map[string]interface{}{ "val2": "2", }, }, }, }, }, "select_some_values": { actionType: actionType, action: map[string]interface{}{ "type": "http", "name": "act1", "url": "http://localhost:8080", "body": `{{ withValues .Input "val1" }}`, "debug": true, }, tests: []item{ { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", }, Values: map[string]interface{}{ "val1": "1", }, }, output: map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag1": "1", }, "values": map[string]interface{}{ "val1": "1", }, }, }, { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", }, }, output: map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag1": "1", }, }, }, { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", }, Values: map[string]interface{}{ "val2": "2", }, }, output: map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag1": "1", }, }, }, }, }, "select_tags_and_values": { actionType: actionType, action: map[string]interface{}{ "type": "http", "name": "act1", "url": "http://localhost:8080", "body": `{{ withTags (withValues .Input "val1") "tag1" }}`, "debug": true, }, tests: []item{ { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag1": "1", "tag2": "2", }, Values: map[string]interface{}{ "val1": "1", }, }, output: map[string]interface{}{ "name": "sub1", "tags": map[string]interface{}{ "tag1": "1", }, "values": map[string]interface{}{ "val1": "1", }, }, }, { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag2": "2", }, }, output: map[string]interface{}{ "name": "sub1", }, }, { input: &formatters.EventMsg{ Name: "sub1", Tags: map[string]string{ "tag2": "2", }, Values: map[string]interface{}{ "val1": "1", "val2": "2", }, }, output: map[string]interface{}{ "name": "sub1", "values": map[string]interface{}{ "val1": "1", }, }, }, }, }, } func TestHTTPAction(t *testing.T) { for name, ts := range testset { if ai, ok := actions.Actions[ts.actionType]; ok { t.Log("found action") a := ai() err := a.Init(ts.action, actions.WithLogger(log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags))) if err != nil { t.Errorf("failed to initialize action: %v", err) return } t.Logf("action: %+v", a) mux := http.NewServeMux() mux.Handle("/", echo()) ah, ok := a.(*httpAction) if !ok { t.Errorf("failed to assert action type: %T", a) t.Fail() return } // start http server urlAddr, err := url.Parse(ah.URL) if err != nil { t.Errorf("failed to parse URL: %v", err) t.Fail() return } s := &http.Server{ Addr: urlAddr.Host, Handler: mux, } go func() { if err := s.ListenAndServe(); err != nil { if !errors.Is(err, http.ErrServerClosed) { t.Logf("failed to start http server: %v", err) } } }() // wait for server time.Sleep(time.Second) // for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) res, err := a.Run(context.TODO(), &actions.Context{Input: item.input}) if err != nil { t.Errorf("failed at %s item %d, %v", name, i, err) t.Fail() return } t.Logf("Run result: %+v", string(res.([]byte))) var result interface{} err = json.Unmarshal(res.([]byte), &result) if err != nil { t.Errorf("failed at %s item %d, %v", name, i, err) t.Fail() return } if !reflect.DeepEqual(result, item.output) { t.Errorf("failed at %s item %d, expected %+v(%T), got: %+v(%T)", name, i, item.output, item.output, result, result) } }) } ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) s.Shutdown(ctx) cancel() } else { t.Errorf("action %s not found", ts.actionType) } } } func echo() http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { b, err := io.ReadAll(r.Body) defer r.Body.Close() if err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "%v", err) return } fmt.Fprint(w, string(b)) }) } ================================================ FILE: pkg/actions/script_action/options.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package script_action import ( "log" "os" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" ) func (s *scriptAction) WithTargets(map[string]*types.TargetConfig) {} func (s *scriptAction) WithLogger(logger *log.Logger) { if s.Debug && logger != nil { s.logger = log.New(logger.Writer(), loggingPrefix, logger.Flags()) } else if s.Debug { s.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } ================================================ FILE: pkg/actions/script_action/script_action.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package script_action import ( "bytes" "context" "fmt" "io" "log" "os" "os/exec" "strings" "github.com/openconfig/gnmic/pkg/actions" ) const ( loggingPrefix = "[script_action] " actionType = "script" defaultShell = "/bin/bash" ) func init() { actions.Register(actionType, func() actions.Action { return &scriptAction{ logger: log.New(io.Discard, "", 0), } }) } type scriptAction struct { Name string `mapstructure:"name,omitempty"` Shell string `mapstructure:"shell,omitempty"` Command string `mapstructure:"command,omitempty"` File string `mapstructure:"file,omitempty"` Debug bool `mapstructure:"debug,omitempty"` logger *log.Logger } func (s *scriptAction) Init(cfg map[string]interface{}, opts ...actions.Option) error { err := actions.DecodeConfig(cfg, s) if err != nil { return err } for _, opt := range opts { opt(s) } if s.Name == "" { return fmt.Errorf("action type %q missing name field", actionType) } err = s.setDefaults() if err != nil { return err } s.logger.Printf("action name %q of type %q initialized: %v", s.Name, actionType, s) return nil } func (s *scriptAction) Run(_ context.Context, aCtx *actions.Context) (interface{}, error) { if s.Command == "" && s.File == "" { return nil, nil } stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) var cmd *exec.Cmd if s.Command != "" { cmds := strings.Split(s.Command, "\n") args := append([]string{"-c"}, strings.Join(cmds, "; ")) cmd = exec.Command(s.Shell, args...) } if s.File != "" { cmd = exec.Command(s.File) } cmd.Stdout = stdout cmd.Stderr = stderr cmd.Env = os.Environ() for k, v := range aCtx.Env { k = strings.ReplaceAll(k, "-", "_") cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) } for k, v := range aCtx.Vars { k = strings.ReplaceAll(k, "-", "_") cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", k, v)) } err := cmd.Run() if err != nil { return nil, fmt.Errorf("%v: %s", err, stderr.String()) } if stderr.String() != "" { return map[string]string{"stderr": stderr.String()}, nil } return map[string]string{"stdout": stdout.String()}, nil } func (s *scriptAction) NName() string { return s.Name } func (s *scriptAction) setDefaults() error { if s.Shell == "" { s.Shell = defaultShell } return nil } ================================================ FILE: pkg/actions/template_action/options.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package template_action import ( "log" "os" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" ) func (t *templateAction) WithTargets(map[string]*types.TargetConfig) {} func (t *templateAction) WithLogger(logger *log.Logger) { if t.Debug && logger != nil { t.logger = log.New(logger.Writer(), loggingPrefix, logger.Flags()) } else if t.Debug { t.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } ================================================ FILE: pkg/actions/template_action/template_action.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package template_action import ( "bytes" "context" "fmt" "io" "log" "os" "text/template" "github.com/openconfig/gnmic/pkg/actions" "github.com/openconfig/gnmic/pkg/gtemplate" ) const ( loggingPrefix = "[template_action] " actionType = "template" defaultTemplate = "{{ . }}" ) func init() { actions.Register(actionType, func() actions.Action { return &templateAction{ logger: log.New(io.Discard, "", 0), } }) } type templateAction struct { Name string `mapstructure:"name,omitempty"` Template string `mapstructure:"template,omitempty"` TemplateFile string `mapstructure:"template-file,omitempty"` Output string `mapstructure:"output,omitempty"` Debug bool `mapstructure:"debug,omitempty"` tpl *template.Template logger *log.Logger } func (t *templateAction) Init(cfg map[string]interface{}, opts ...actions.Option) error { err := actions.DecodeConfig(cfg, t) if err != nil { return err } for _, opt := range opts { opt(t) } if t.Name == "" { return fmt.Errorf("action type %q missing name field", actionType) } err = t.setDefaults() if err != nil { return err } if t.Template != "" { t.tpl, err = gtemplate.CreateTemplate(fmt.Sprintf("%s-template-action", t.Name), t.Template) if err != nil { return err } } else if t.TemplateFile != "" { t.tpl, err = template.ParseGlob(t.TemplateFile) if err != nil { return err } t.tpl = t.tpl.Funcs(gtemplate.NewTemplateEngine().CreateFuncs()). Option("missingkey=zero") } t.logger.Printf("action name %q of type %q initialized: %v", t.Name, actionType, t) return nil } func (t *templateAction) Run(_ context.Context, aCtx *actions.Context) (interface{}, error) { b := new(bytes.Buffer) err := t.tpl.Execute(b, &actions.Context{ Input: aCtx.Input, Env: aCtx.Env, Vars: aCtx.Vars, Targets: aCtx.Targets, }) if err != nil { return nil, err } out := b.String() if t.Debug { t.logger.Printf("template output: %s", out) } switch t.Output { case "stdout": fmt.Fprint(os.Stdout, out) case "": default: fi, err := os.Create(t.Output) if err != nil { return nil, err } _, err = fi.Write(b.Bytes()) if err != nil { return nil, err } } return out, nil } func (t *templateAction) NName() string { return t.Name } func (t *templateAction) setDefaults() error { if t.Template == "" && t.TemplateFile == "" { t.Template = defaultTemplate } return nil } ================================================ FILE: pkg/api/gnmi_msgs.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package api import ( "bytes" "encoding/json" "errors" "fmt" "strconv" "strings" "time" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmi/proto/gnmi_ext" gvalue "github.com/openconfig/gnmi/value" "github.com/openconfig/gnmic/pkg/api/path" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/durationpb" ) const ( DefaultGNMIVersion = "0.7.0" encodingJSON = "json" encodingJSON_IETF = "json_ietf" ) // GNMIOption is a function that acts on the supplied proto.Message. // The message is expected to be one of the protobuf defined gNMI messages // exchanged by the RPCs or any of the nested messages. type GNMIOption func(proto.Message) error // ErrInvalidMsgType is returned by a GNMIOption in case the Option is supplied // an unexpected proto.Message var ErrInvalidMsgType = errors.New("invalid message type") // ErrInvalidValue is returned by a GNMIOption in case the Option is supplied // an unexpected value. var ErrInvalidValue = errors.New("invalid value") // apply is a helper function that simply applies the options to the proto.Message. // It returns an error if any of the options fails. func apply(m proto.Message, opts ...GNMIOption) error { for _, o := range opts { if err := o(m); err != nil { return err } } return nil } // NewCapabilitiesRequest creates a new *gnmi.CapabilityeRequest using the provided GNMIOption list. // returns an error in case one of the options is invalid func NewCapabilitiesRequest(opts ...GNMIOption) (*gnmi.CapabilityRequest, error) { m := new(gnmi.CapabilityRequest) err := apply(m, opts...) if err != nil { return nil, err } return m, nil } // NewCapabilitiesResponse creates a new *gnmi.CapabilityResponse using the provided GNMIOption list. // returns an error in case one of the options is invalid func NewCapabilitiesResponse(opts ...GNMIOption) (*gnmi.CapabilityResponse, error) { m := new(gnmi.CapabilityResponse) err := apply(m, opts...) if err != nil { return nil, err } if m.GNMIVersion == "" { m.GNMIVersion = DefaultGNMIVersion } return m, nil } // NewGetRequest creates a new *gnmi.GetRequest using the provided GNMIOption list. // returns an error in case one of the options is invalid func NewGetRequest(opts ...GNMIOption) (*gnmi.GetRequest, error) { m := new(gnmi.GetRequest) err := apply(m, opts...) if err != nil { return nil, err } return m, nil } // NewGetResponse creates a new *gnmi.GetResponse using the provided GNMIOption list. // returns an error in case one of the options is invalid func NewGetResponse(opts ...GNMIOption) (*gnmi.GetResponse, error) { m := new(gnmi.GetResponse) err := apply(m, opts...) if err != nil { return nil, err } return m, nil } // NewSetRequest creates a new *gnmi.SetRequest using the provided GNMIOption list. // returns an error in case one of the options is invalid func NewSetRequest(opts ...GNMIOption) (*gnmi.SetRequest, error) { m := new(gnmi.SetRequest) err := apply(m, opts...) return m, err } // NewSetResponse creates a new *gnmi.SetResponse using the provided GNMIOption list. // returns an error in case one of the options is invalid func NewSetResponse(opts ...GNMIOption) (*gnmi.SetResponse, error) { m := new(gnmi.SetResponse) err := apply(m, opts...) if err != nil { return nil, err } return m, nil } // NewSubscribeRequest creates a new *gnmi.SubscribeRequest using the provided GNMIOption list. // returns an error in case one of the options is invalid func NewSubscribeRequest(opts ...GNMIOption) (*gnmi.SubscribeRequest, error) { m := &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: new(gnmi.SubscriptionList), }, } err := apply(m, opts...) return m, err } // NewSubscribePollRequest creates a new *gnmi.SubscribeRequest with request type Poll // using the provided GNMIOption list. // returns an error in case one of the options is invalid func NewSubscribePollRequest(opts ...GNMIOption) (*gnmi.SubscribeRequest, error) { m := &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Poll{ Poll: new(gnmi.Poll), }, } err := apply(m, opts...) return m, err } // NewSubscribeResponse creates a *gnmi.SubscribeResponse with a gnmi.SubscribeResponse_Update as // response type. func NewSubscribeResponse(opts ...GNMIOption) (*gnmi.SubscribeResponse, error) { m := &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: new(gnmi.Notification), }, } err := apply(m, opts...) if err != nil { return nil, err } return m, nil } // NewSubscribeResponse creates a *gnmi.SubscribeResponse with a gnmi.SubscribeResponse_SyncResponse as // response type. func NewSubscribeSyncResponse(opts ...GNMIOption) (*gnmi.SubscribeResponse, error) { m := &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_SyncResponse{ SyncResponse: true, }, } err := apply(m, opts...) if err != nil { return nil, err } return m, nil } // Messages options // Version sets the provided gNMI version string in a gnmi.CapabilityResponse message. func Version(v string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.CapabilityResponse: msg.GNMIVersion = v default: return fmt.Errorf("option Version: %w: %T", ErrInvalidMsgType, msg) } return nil } } // SupportedEncoding creates an GNMIOption that sets the provided encodings as supported encodings in a gnmi.CapabilitiesResponse func SupportedEncoding(encodings ...string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.CapabilityResponse: if len(msg.SupportedEncodings) == 0 { msg.SupportedEncodings = make([]gnmi.Encoding, 0) } for _, encoding := range encodings { enc, ok := gnmi.Encoding_value[strings.ToUpper(encoding)] if !ok { return fmt.Errorf("option SupportedEncoding: %w: %s", ErrInvalidValue, encoding) } msg.SupportedEncodings = append(msg.SupportedEncodings, gnmi.Encoding(enc)) } default: return fmt.Errorf("option SupportedEncoding: %w: %T", ErrInvalidMsgType, msg) } return nil } } // SupportedModel creates an GNMIOption that sets the provided name, org and version as a supported model in a gnmi.CapabilitiesResponse. func SupportedModel(name, org, version string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.CapabilityResponse: if len(msg.SupportedModels) == 0 { msg.SupportedModels = make([]*gnmi.ModelData, 0) } msg.SupportedModels = append(msg.SupportedModels, &gnmi.ModelData{ Name: name, Organization: org, Version: version, }) default: return fmt.Errorf("option SupportedModel: %w: %T", ErrInvalidMsgType, msg) } return nil } } // Extension creates a GNMIOption that applies the supplied gnmi_ext.Extension to the provided // proto.Message. func Extension(ext *gnmi_ext.Extension) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.CapabilityRequest: if len(msg.Extension) == 0 { msg.Extension = make([]*gnmi_ext.Extension, 0) } msg.Extension = append(msg.Extension, ext) case *gnmi.GetRequest: if len(msg.Extension) == 0 { msg.Extension = make([]*gnmi_ext.Extension, 0) } msg.Extension = append(msg.Extension, ext) case *gnmi.GetResponse: if len(msg.Extension) == 0 { msg.Extension = make([]*gnmi_ext.Extension, 0) } msg.Extension = append(msg.Extension, ext) case *gnmi.SetRequest: if len(msg.Extension) == 0 { msg.Extension = make([]*gnmi_ext.Extension, 0) } msg.Extension = append(msg.Extension, ext) case *gnmi.SetResponse: if len(msg.Extension) == 0 { msg.Extension = make([]*gnmi_ext.Extension, 0) } msg.Extension = append(msg.Extension, ext) case *gnmi.SubscribeRequest: if len(msg.Extension) == 0 { msg.Extension = make([]*gnmi_ext.Extension, 0) } msg.Extension = append(msg.Extension, ext) case *gnmi.SubscribeResponse: if len(msg.Extension) == 0 { msg.Extension = make([]*gnmi_ext.Extension, 0) } msg.Extension = append(msg.Extension, ext) } return nil } } func Extension_CommitRequest(id string, dur time.Duration) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SetRequest: fn := Extension( &gnmi_ext.Extension{ Ext: &gnmi_ext.Extension_Commit{ Commit: &gnmi_ext.Commit{ Id: id, Action: &gnmi_ext.Commit_Commit{ Commit: &gnmi_ext.CommitRequest{ RollbackDuration: durationpb.New(dur), }, }, }, }, }, ) return fn(msg) default: return fmt.Errorf("option Extension_CommitRequest: %w: %T", ErrInvalidMsgType, msg) } } } func Extension_CommitConfirm(id string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SetRequest: fn := Extension( &gnmi_ext.Extension{ Ext: &gnmi_ext.Extension_Commit{ Commit: &gnmi_ext.Commit{ Id: id, Action: &gnmi_ext.Commit_Confirm{ Confirm: &gnmi_ext.CommitConfirm{}, }, }, }, }, ) return fn(msg) default: return fmt.Errorf("option Extension_CommitConfirm: %w: %T", ErrInvalidMsgType, msg) } } } func Extension_CommitCancel(id string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SetRequest: fn := Extension( &gnmi_ext.Extension{ Ext: &gnmi_ext.Extension_Commit{ Commit: &gnmi_ext.Commit{ Id: id, Action: &gnmi_ext.Commit_Cancel{ Cancel: &gnmi_ext.CommitCancel{}, }, }, }, }, ) return fn(msg) default: return fmt.Errorf("option Extension_CommitCancel: %w: %T", ErrInvalidMsgType, msg) } } } func Extension_CommitSetRollbackDuration(id string, dur time.Duration) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SetRequest: fn := Extension( &gnmi_ext.Extension{ Ext: &gnmi_ext.Extension_Commit{ Commit: &gnmi_ext.Commit{ Id: id, Action: &gnmi_ext.Commit_SetRollbackDuration{ SetRollbackDuration: &gnmi_ext.CommitSetRollbackDuration{ RollbackDuration: durationpb.New(dur), }, }, }, }, }, ) return fn(msg) default: return fmt.Errorf("option Extension_CommitSetRollbackDuration: %w: %T", ErrInvalidMsgType, msg) } } } func Extension_Depth(lvl uint32) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.GetRequest, *gnmi.SubscribeRequest: fn := Extension( &gnmi_ext.Extension{ Ext: &gnmi_ext.Extension_Depth{ Depth: &gnmi_ext.Depth{ Level: lvl, }, }, }, ) return fn(msg) default: return fmt.Errorf("option Extension_Depth: %w: %T", ErrInvalidMsgType, msg) } } } // Extension_HistorySnapshotTime creates a GNMIOption that adds a gNMI extension of // type History Snapshot with the supplied snapshot time. // the snapshot value can be nanoseconds since Unix epoch or a date in RFC3339 format func Extension_HistorySnapshotTime(tm time.Time) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SubscribeRequest: fn := Extension( &gnmi_ext.Extension{ Ext: &gnmi_ext.Extension_History{ History: &gnmi_ext.History{ Request: &gnmi_ext.History_SnapshotTime{ SnapshotTime: tm.UnixNano(), }, }, }, }, ) return fn(msg) default: return fmt.Errorf("option Extension_HistorySnapshotTime: %w: %T", ErrInvalidMsgType, msg) } } } // Extension_HistoryRange creates a GNMIOption that adds a gNMI extension of // type History TimeRange with the supplied start and end times. // the start/end values can be nanoseconds since Unix epoch or a date in RFC3339 format func Extension_HistoryRange(start, end time.Time) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SubscribeRequest: fn := Extension( &gnmi_ext.Extension{ Ext: &gnmi_ext.Extension_History{ History: &gnmi_ext.History{ Request: &gnmi_ext.History_Range{ Range: &gnmi_ext.TimeRange{ Start: start.UnixNano(), End: end.UnixNano(), }, }, }, }, }, ) return fn(msg) default: return fmt.Errorf("option Extension_HistoryRange: %w: %T", ErrInvalidMsgType, msg) } } } // Prefix creates a GNMIOption that creates a *gnmi.Path and adds it to the supplied // proto.Message (as a Path Prefix). // The proto.Message can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe. func Prefix(prefix string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } var err error switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.GetRequest: msg.Prefix, err = path.CreatePrefix(prefix, "") case *gnmi.SetRequest: msg.Prefix, err = path.CreatePrefix(prefix, "") case *gnmi.SetResponse: msg.Prefix, err = path.CreatePrefix(prefix, "") case *gnmi.SubscribeRequest: switch msg := msg.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: if msg.Subscribe == nil { msg.Subscribe = new(gnmi.SubscriptionList) } msg.Subscribe.Prefix, err = path.CreatePrefix(prefix, "") default: return fmt.Errorf("option Prefix: %w: %T", ErrInvalidMsgType, msg) } case *gnmi.Notification: msg.Prefix, err = path.CreatePrefix(prefix, "") default: return fmt.Errorf("option Prefix: %w: %T", ErrInvalidMsgType, msg) } if err != nil { return fmt.Errorf("%w: %v", ErrInvalidValue, err) } return nil } } // Target creates a GNMIOption that set the gnmi Prefix target to the supplied string value. // The proto.Message can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe. func Target(target string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } if target == "" { return nil } var err error switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.GetRequest: if msg.Prefix == nil { msg.Prefix = new(gnmi.Path) } msg.Prefix.Target = target case *gnmi.SetRequest: if msg.Prefix == nil { msg.Prefix = new(gnmi.Path) } msg.Prefix.Target = target case *gnmi.SubscribeRequest: switch msg := msg.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: if msg.Subscribe == nil { msg.Subscribe = new(gnmi.SubscriptionList) } if msg.Subscribe.Prefix == nil { msg.Subscribe.Prefix = new(gnmi.Path) } msg.Subscribe.Prefix.Target = target default: return fmt.Errorf("option Target: %w: %T", ErrInvalidMsgType, msg) } default: return fmt.Errorf("option Target: %w: %T", ErrInvalidMsgType, msg) } return err } } // Path creates a GNMIOption that creates a *gnmi.Path and adds it to the supplied proto.Message // which can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.Subscription. func Path(p string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } var err error switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.GetRequest: gp, err := path.ParsePath(p) if err != nil { return fmt.Errorf("%w: %v", ErrInvalidValue, err) } if len(msg.Path) == 0 { msg.Path = make([]*gnmi.Path, 0) } msg.Path = append(msg.Path, gp) case *gnmi.Update: msg.Path, err = path.ParsePath(p) if err != nil { return fmt.Errorf("%w: %v", ErrInvalidValue, err) } case *gnmi.UpdateResult: msg.Path, err = path.ParsePath(p) if err != nil { return fmt.Errorf("%w: %v", ErrInvalidValue, err) } case *gnmi.Subscription: msg.Path, err = path.ParsePath(p) if err != nil { return fmt.Errorf("%w: %v", ErrInvalidValue, err) } default: return fmt.Errorf("option Path: %w: %T", ErrInvalidMsgType, msg) } return nil } } // Encoding creates a GNMIOption that adds the encoding type to the supplied proto.Message // which can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe. func Encoding(encoding string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } enc, ok := gnmi.Encoding_value[strings.ToUpper(encoding)] if !ok { return fmt.Errorf("option Encoding: %w: %s", ErrInvalidValue, encoding) } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.GetRequest: msg.Encoding = gnmi.Encoding(enc) case *gnmi.SubscribeRequest: switch msg := msg.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: if msg.Subscribe == nil { msg.Subscribe = new(gnmi.SubscriptionList) } msg.Subscribe.Encoding = gnmi.Encoding(enc) } default: return fmt.Errorf("option Encoding: %w: %T", ErrInvalidMsgType, msg) } return nil } } // EncodingJSON creates a GNMIOption that sets the encoding type to JSON in a gnmi.GetRequest or // gnmi.SubscribeRequest. func EncodingJSON() func(msg proto.Message) error { return Encoding("JSON") } // EncodingBYTES creates a GNMIOption that sets the encoding type to BYTES in a gnmi.GetRequest or // gnmi.SubscribeRequest. func EncodingBYTES() func(msg proto.Message) error { return Encoding("BYTES") } // EncodingPROTO creates a GNMIOption that sets the encoding type to PROTO in a gnmi.GetRequest or // gnmi.SubscribeRequest. func EncodingPROTO() func(msg proto.Message) error { return Encoding("PROTO") } // EncodingASCII creates a GNMIOption that sets the encoding type to ASCII in a gnmi.GetRequest or // gnmi.SubscribeRequest. func EncodingASCII() func(msg proto.Message) error { return Encoding("ASCII") } // EncodingJSON_IETF creates a GNMIOption that sets the encoding type to JSON_IETF in a gnmi.GetRequest or // gnmi.SubscribeRequest. func EncodingJSON_IETF() func(msg proto.Message) error { return Encoding("JSON_IETF") } // EncodingCustom creates a GNMIOption that adds the encoding type to the supplied proto.Message // which can be a *gnmi.GetRequest, *gnmi.SetRequest or a *gnmi.SubscribeRequest with RequestType Subscribe. // Unlike Encoding, this GNMIOption does not validate if the provided encoding is defined by the gNMI spec. func EncodingCustom(enc int) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.GetRequest: msg.Encoding = gnmi.Encoding(enc) case *gnmi.SubscribeRequest: switch msg := msg.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: if msg.Subscribe == nil { msg.Subscribe = new(gnmi.SubscriptionList) } msg.Subscribe.Encoding = gnmi.Encoding(enc) } default: return fmt.Errorf("option EncodingCustom: %w: %T", ErrInvalidMsgType, msg) } return nil } } // DataType creates a GNMIOption that adds the data type to the supplied proto.Message // which must be a *gnmi.GetRequest. func DataType(datat string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } if datat == "" { return nil } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.GetRequest: dt, ok := gnmi.GetRequest_DataType_value[strings.ToUpper(datat)] if !ok { return fmt.Errorf("option DataType: %w: %s", ErrInvalidValue, datat) } msg.Type = gnmi.GetRequest_DataType(dt) default: return fmt.Errorf("option DataType: %w: %T", ErrInvalidMsgType, msg) } return nil } } // DataTypeALL creates a GNMIOption that sets the gnmi.GetRequest data type to ALL func DataTypeALL() func(msg proto.Message) error { return DataType("ALL") } // DataTypeCONFIG creates a GNMIOption that sets the gnmi.GetRequest data type to CONFIG func DataTypeCONFIG() func(msg proto.Message) error { return DataType("CONFIG") } // DataTypeSTATE creates a GNMIOption that sets the gnmi.GetRequest data type to STATE func DataTypeSTATE() func(msg proto.Message) error { return DataType("STATE") } // DataTypeOPERATIONAL creates a GNMIOption that sets the gnmi.GetRequest data type to OPERATIONAL func DataTypeOPERATIONAL() func(msg proto.Message) error { return DataType("OPERATIONAL") } // UseModel creates a GNMIOption that add a gnmi.DataModel to a gnmi.GetRequest or gnmi.SubscribeRequest // based on the name, org and version strings provided. func UseModel(name, org, version string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.GetRequest: if len(msg.UseModels) == 0 { msg.UseModels = make([]*gnmi.ModelData, 0) } msg.UseModels = append(msg.UseModels, &gnmi.ModelData{ Name: name, Organization: org, Version: version, }) case *gnmi.SubscribeRequest: switch msg := msg.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: if msg.Subscribe == nil { msg.Subscribe = new(gnmi.SubscriptionList) } if len(msg.Subscribe.UseModels) == 0 { msg.Subscribe.UseModels = make([]*gnmi.ModelData, 0) } msg.Subscribe.UseModels = append(msg.Subscribe.UseModels, &gnmi.ModelData{ Name: name, Organization: org, Version: version, }) } default: return fmt.Errorf("option UseModel: %w: %T", ErrInvalidMsgType, msg) } return nil } } // Update creates a GNMIOption that creates a *gnmi.Update message and adds it to the supplied proto.Message, // the supplied message must be a *gnmi.SetRequest. func Update(opts ...GNMIOption) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SetRequest: if len(msg.Update) == 0 { msg.Update = make([]*gnmi.Update, 0) } upd := new(gnmi.Update) err := apply(upd, opts...) if err != nil { return err } msg.Update = append(msg.Update, upd) case *gnmi.Notification: if len(msg.Update) == 0 { msg.Update = make([]*gnmi.Update, 0) } upd := new(gnmi.Update) err := apply(upd, opts...) if err != nil { return err } msg.Update = append(msg.Update, upd) default: return fmt.Errorf("option Update: %w: %T", ErrInvalidMsgType, msg) } return nil } } // Replace creates a GNMIOption that creates a *gnmi.Update message and adds it to the supplied proto.Message. // the supplied message must be a *gnmi.SetRequest. func Replace(opts ...GNMIOption) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SetRequest: if len(msg.Update) == 0 { msg.Update = make([]*gnmi.Update, 0) } upd := new(gnmi.Update) err := apply(upd, opts...) if err != nil { return err } msg.Replace = append(msg.Replace, upd) default: return fmt.Errorf("option Replace: %w: %T", ErrInvalidMsgType, msg) } return nil } } // Value creates a GNMIOption that creates a *gnmi.TypedValue and adds it to the supplied proto.Message. // the supplied message must be a *gnmi.Update. // If a map is supplied as `data interface{}` it has to be a map[string]interface{}. func Value(data interface{}, encoding string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } var err error switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.Update: msg.Val, err = value(data, encoding) if err != nil { return err } default: return fmt.Errorf("option Value: %w: %T", ErrInvalidMsgType, msg) } return nil } } func value(data interface{}, encoding string) (*gnmi.TypedValue, error) { switch data := data.(type) { case []interface{}, []string: switch strings.ToLower(encoding) { case "": encoding = encodingJSON fallthrough case encodingJSON: b, err := json.Marshal(data) if err != nil { return nil, err } return &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: bytes.Trim(b, " \r\n\t"), }}, nil case encodingJSON_IETF: b, err := json.Marshal(data) if err != nil { return nil, err } return &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: bytes.Trim(b, " \r\n\t"), }}, nil default: return gvalue.FromScalar(data) } case map[string]interface{}: switch strings.ToLower(encoding) { case "": encoding = encodingJSON fallthrough case encodingJSON: b := new(bytes.Buffer) enc := json.NewEncoder(b) enc.SetEscapeHTML(false) err := enc.Encode(data) if err != nil { return nil, err } return &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: bytes.Trim(b.Bytes(), " \r\n\t"), }}, nil case encodingJSON_IETF: b := new(bytes.Buffer) enc := json.NewEncoder(b) enc.SetEscapeHTML(false) err := enc.Encode(data) if err != nil { return nil, err } return &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: bytes.Trim(b.Bytes(), " \r\n\t"), }}, nil } case string: switch strings.ToLower(encoding) { case "": encoding = encodingJSON fallthrough case encodingJSON: b := new(bytes.Buffer) if json.Valid([]byte(data)) { b.WriteString(data) } else { enc := json.NewEncoder(b) enc.SetEscapeHTML(false) err := enc.Encode(data) if err != nil { return nil, err } } return &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: bytes.Trim(b.Bytes(), " \r\n\t"), }}, nil case encodingJSON_IETF: b := new(bytes.Buffer) if json.Valid([]byte(data)) { b.WriteString(data) } else { enc := json.NewEncoder(b) enc.SetEscapeHTML(false) err := enc.Encode(data) if err != nil { return nil, err } } return &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: bytes.Trim(b.Bytes(), " \r\n\t"), }}, nil case "ascii": return &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{ AsciiVal: data, }}, nil case "bool": bval, err := strconv.ParseBool(data) if err != nil { return nil, err } return &gnmi.TypedValue{ Value: &gnmi.TypedValue_BoolVal{ BoolVal: bval, }}, nil case "bytes": return &gnmi.TypedValue{ Value: &gnmi.TypedValue_BytesVal{ BytesVal: []byte(data), }}, nil case "decimal": return nil, fmt.Errorf("decimal type not implemented") case "float": f, err := strconv.ParseFloat(data, 32) if err != nil { return nil, err } return &gnmi.TypedValue{ Value: &gnmi.TypedValue_FloatVal{ FloatVal: float32(f), }}, nil case "int": k, err := strconv.ParseInt(data, 10, 64) if err != nil { return nil, err } return &gnmi.TypedValue{ Value: &gnmi.TypedValue_IntVal{ IntVal: k, }}, nil case "uint": u, err := strconv.ParseUint(data, 10, 64) if err != nil { return nil, err } return &gnmi.TypedValue{ Value: &gnmi.TypedValue_UintVal{ UintVal: u, }}, nil case "string": return &gnmi.TypedValue{ Value: &gnmi.TypedValue_StringVal{ StringVal: data, }}, nil default: return nil, fmt.Errorf("invalid encoding %s", encoding) } case *gnmi.TypedValue: return data, nil case *gnmi.TypedValue_AnyVal: return &gnmi.TypedValue{Value: data}, nil case *gnmi.TypedValue_AsciiVal: return &gnmi.TypedValue{Value: data}, nil case *gnmi.TypedValue_BoolVal: return &gnmi.TypedValue{Value: data}, nil case *gnmi.TypedValue_BytesVal: return &gnmi.TypedValue{Value: data}, nil case *gnmi.TypedValue_DecimalVal: return &gnmi.TypedValue{Value: data}, nil case *gnmi.TypedValue_FloatVal: return &gnmi.TypedValue{Value: data}, nil case *gnmi.TypedValue_IntVal: return &gnmi.TypedValue{Value: data}, nil case *gnmi.TypedValue_JsonIetfVal: return &gnmi.TypedValue{Value: data}, nil case *gnmi.TypedValue_JsonVal: return &gnmi.TypedValue{Value: data}, nil case *gnmi.TypedValue_LeaflistVal: return &gnmi.TypedValue{Value: data}, nil case *gnmi.TypedValue_ProtoBytes: return &gnmi.TypedValue{Value: data}, nil case *gnmi.TypedValue_StringVal: return &gnmi.TypedValue{Value: data}, nil case *gnmi.TypedValue_UintVal: return &gnmi.TypedValue{Value: data}, nil default: v, err := gvalue.FromScalar(data) if err != nil { return nil, fmt.Errorf("%w: %v", ErrInvalidValue, err) } return v, nil } return nil, fmt.Errorf("unexpected value type and encoding %w: %T and %s", ErrInvalidValue, data, encoding) } // Delete creates a GNMIOption that creates a *gnmi.Path and adds it to the supplied proto.Message. // the supplied message must be a *gnmi.SetRequest. The *gnmi.Path is added the .Delete list. func Delete(p string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SetRequest: gp, err := path.ParsePath(p) if err != nil { return fmt.Errorf("%w: %v", ErrInvalidValue, err) } if len(msg.Delete) == 0 { msg.Delete = make([]*gnmi.Path, 0) } msg.Delete = append(msg.Delete, gp) case *gnmi.Notification: gp, err := path.ParsePath(p) if err != nil { return fmt.Errorf("%w: %v", ErrInvalidValue, err) } if len(msg.Delete) == 0 { msg.Delete = make([]*gnmi.Path, 0) } msg.Delete = append(msg.Delete, gp) default: return fmt.Errorf("option Delete: %w: %T", ErrInvalidMsgType, msg) } return nil } } // SubscriptionListMode creates a GNMIOption that sets the SubscribeRequest Mode. // The variable mode must be one of "once", "poll" or "stream". // The supplied proto.Message must be a *gnmi.SubscribeRequest with RequestType Subscribe. func SubscriptionListMode(mode string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SubscribeRequest: switch msg := msg.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: if msg.Subscribe == nil { msg.Subscribe = new(gnmi.SubscriptionList) } gmode, ok := gnmi.SubscriptionList_Mode_value[strings.ToUpper(mode)] if !ok { return fmt.Errorf("option SubscriptionListMode: %w: %s", ErrInvalidValue, mode) } msg.Subscribe.Mode = gnmi.SubscriptionList_Mode(gmode) default: return fmt.Errorf("option SubscriptionListMode: %w: %T", ErrInvalidMsgType, msg) } default: return fmt.Errorf("option SubscriptionListMode: %w: %T", ErrInvalidMsgType, msg) } return nil } } // SubscriptionListModeSTREAM creates a GNMIOption that sets the Subscription List Mode to STREAM func SubscriptionListModeSTREAM() func(msg proto.Message) error { return SubscriptionListMode("STREAM") } // SubscriptionListModeONCE creates a GNMIOption that sets the Subscription List Mode to ONCE func SubscriptionListModeONCE() func(msg proto.Message) error { return SubscriptionListMode("ONCE") } // SubscriptionListModePOLL creates a GNMIOption that sets the Subscription List Mode to POLL func SubscriptionListModePOLL() func(msg proto.Message) error { return SubscriptionListMode("POLL") } // Qos creates a GNMIOption that sets the QosMarking field in a *gnmi.SubscribeRequest with RequestType Subscribe. func Qos(qos uint32) func(msg proto.Message) error { return func(msg proto.Message) error { switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SubscribeRequest: switch msg := msg.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: if msg.Subscribe == nil { msg.Subscribe = new(gnmi.SubscriptionList) } msg.Subscribe.Qos = &gnmi.QOSMarking{Marking: qos} default: return fmt.Errorf("option Qos: %w: %T", ErrInvalidMsgType, msg) } default: return fmt.Errorf("option Qos: %w: %T", ErrInvalidMsgType, msg) } return nil } } // AllowAggregation creates a GNMIOption that sets the AllowAggregation field in a *gnmi.SubscribeRequest with RequestType Subscribe. func AllowAggregation(b bool) func(msg proto.Message) error { return func(msg proto.Message) error { switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SubscribeRequest: switch msg := msg.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: if msg.Subscribe == nil { msg.Subscribe = new(gnmi.SubscriptionList) } msg.Subscribe.AllowAggregation = b default: return fmt.Errorf("option AllowAggregation: %w: %T", ErrInvalidMsgType, msg) } default: return fmt.Errorf("option AllowAggregation: %w: %T", ErrInvalidMsgType, msg) } return nil } } // UpdatesOnly creates a GNMIOption that sets the UpdatesOnly field in a *gnmi.SubscribeRequest with RequestType Subscribe. func UpdatesOnly(b bool) func(msg proto.Message) error { return func(msg proto.Message) error { switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SubscribeRequest: switch msg := msg.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: if msg.Subscribe == nil { msg.Subscribe = new(gnmi.SubscriptionList) } msg.Subscribe.UpdatesOnly = b default: return fmt.Errorf("option UpdatesOnly: %w: %T", ErrInvalidMsgType, msg) } default: return fmt.Errorf("option UpdatesOnly: %w: %T", ErrInvalidMsgType, msg) } return nil } } // UpdatesOnly creates a GNMIOption that creates a *gnmi.Subscription based on the supplied GNMIOption(s) and adds it the // supplied proto.Mesage which must be of type *gnmi.SubscribeRequest with RequestType Subscribe. func Subscription(opts ...GNMIOption) func(msg proto.Message) error { return func(msg proto.Message) error { switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SubscribeRequest: switch msg := msg.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: if msg.Subscribe == nil { msg.Subscribe = new(gnmi.SubscriptionList) } if len(msg.Subscribe.Subscription) == 0 { msg.Subscribe.Subscription = make([]*gnmi.Subscription, 0) } sub := new(gnmi.Subscription) err := apply(sub, opts...) if err != nil { return err } msg.Subscribe.Subscription = append(msg.Subscribe.Subscription, sub) default: return fmt.Errorf("option Subscription: %w: %T", ErrInvalidMsgType, msg) } default: return fmt.Errorf("option Subscription: %w: %T", ErrInvalidMsgType, msg) } return nil } } // SubscriptionMode creates a GNMIOption that sets the Subscription mode in a proto.Message of type *gnmi.Subscription. func SubscriptionMode(mode string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.Subscription: gmode, ok := gnmi.SubscriptionMode_value[strings.ToUpper(strings.ReplaceAll(mode, "-", "_"))] if !ok { return fmt.Errorf("option SubscriptionMode: %w: %s", ErrInvalidValue, mode) } msg.Mode = gnmi.SubscriptionMode(gmode) default: return fmt.Errorf("option SubscriptionMode: %w: %T", ErrInvalidMsgType, msg) } return nil } } // SubscriptionModeTARGET_DEFINED creates a GNMIOption that sets the subscription mode to TARGET_DEFINED func SubscriptionModeTARGET_DEFINED() func(msg proto.Message) error { return SubscriptionMode("TARGET_DEFINED") } // SubscriptionModeON_CHANGE creates a GNMIOption that sets the subscription mode to ON_CHANGE func SubscriptionModeON_CHANGE() func(msg proto.Message) error { return SubscriptionMode("ON_CHANGE") } // SubscriptionModeSAMPLE creates a GNMIOption that sets the subscription mode to SAMPLE func SubscriptionModeSAMPLE() func(msg proto.Message) error { return SubscriptionMode("SAMPLE") } // SampleInterval creates a GNMIOption that sets the SampleInterval in a proto.Message of type *gnmi.Subscription. func SampleInterval(d time.Duration) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.Subscription: msg.SampleInterval = uint64(d.Nanoseconds()) default: return fmt.Errorf("option SampleInterval: %w: %T", ErrInvalidMsgType, msg) } return nil } } // HeartbeatInterval creates a GNMIOption that sets the HeartbeatInterval in a proto.Message of type *gnmi.Subscription. func HeartbeatInterval(d time.Duration) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.Subscription: msg.HeartbeatInterval = uint64(d.Nanoseconds()) default: return fmt.Errorf("option HeartbeatInterval: %w: %T", ErrInvalidMsgType, msg) } return nil } } // SuppressRedundant creates a GNMIOption that sets the SuppressRedundant in a proto.Message of type *gnmi.Subscription. func SuppressRedundant(s bool) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.Subscription: msg.SuppressRedundant = s default: return fmt.Errorf("option SuppressRedundant: %w: %T", ErrInvalidMsgType, msg) } return nil } } // Notification creates a GNMIOption that builds a gnmi.Notification from the supplied GNMIOptions and adds it // to the supplied proto.Message func Notification(opts ...GNMIOption) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.GetResponse: if len(msg.Notification) == 0 { msg.Notification = make([]*gnmi.Notification, 0) } notif := new(gnmi.Notification) err := apply(notif, opts...) if err != nil { return err } msg.Notification = append(msg.Notification, notif) case *gnmi.SubscribeResponse: switch msg := msg.Response.(type) { case *gnmi.SubscribeResponse_Update: notif := new(gnmi.Notification) err := apply(notif, opts...) if err != nil { return err } msg.Update = notif } default: return fmt.Errorf("option Notification: %w: %T", ErrInvalidMsgType, msg) } return nil } } // Timestamp sets the supplied timestamp in a gnmi.Notification message func Timestamp(t int64) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.Notification: msg.Timestamp = t case *gnmi.SetResponse: msg.Timestamp = t default: return fmt.Errorf("option Timestamp: %w: %T", ErrInvalidMsgType, msg) } return nil } } // TimestampNow is the same as Timestamp(time.Now().UnixNano()) func TimestampNow() func(msg proto.Message) error { return Timestamp(time.Now().UnixNano()) } // Atomic sets the .Atomic field in a gnmi.Notification message func Atomic(b bool) func(msg proto.Message) error { return func(msg proto.Message) error { switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.Notification: msg.Atomic = b default: return fmt.Errorf("option Atomic: %w: %T", ErrInvalidMsgType, msg) } return nil } } // UpdateResult creates a GNMIOption that creates a gnmi.UpdateResult and adds it to // a proto.Message of type gnmi.SetResponse. func UpdateResult(opts ...GNMIOption) func(msg proto.Message) error { return func(msg proto.Message) error { switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SetResponse: if len(msg.Response) == 0 { msg.Response = make([]*gnmi.UpdateResult, 0) } updRes := new(gnmi.UpdateResult) err := apply(updRes, opts...) if err != nil { return err } msg.Response = append(msg.Response, updRes) default: return fmt.Errorf("option UpdateResult: %w: %T", ErrInvalidMsgType, msg) } return nil } } // Operation creates a GNMIOption that sets the gnmi.UpdateResult_Operation // value in a gnmi.UpdateResult. func Operation(oper string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.UpdateResult: setOper, ok := gnmi.UpdateResult_Operation_value[strings.ToUpper(oper)] if !ok { return fmt.Errorf("option Operation: %w: %s", ErrInvalidValue, oper) } msg.Op = gnmi.UpdateResult_Operation(setOper) default: return fmt.Errorf("option Operation: %w: %T", ErrInvalidMsgType, msg) } return nil } } // OperationINVALID creates a GNMIOption that sets the gnmi.SetResponse Operation to INVALID func OperationINVALID() func(msg proto.Message) error { return Operation("INVALID") } // OperationDELETE creates a GNMIOption that sets the gnmi.SetResponse Operation to DELETE func OperationDELETE() func(msg proto.Message) error { return Operation("DELETE") } // OperationREPLACE creates a GNMIOption that sets the gnmi.SetResponse Operation to REPLACE func OperationREPLACE() func(msg proto.Message) error { return Operation("REPLACE") } // OperationUPDATE creates a GNMIOption that sets the gnmi.SetResponse Operation to UPDATE func OperationUPDATE() func(msg proto.Message) error { return Operation("UPDATE") } // UnionReplace creates a GNMIOption that creates a *gnmi.Update message and adds it to the supplied proto.Message. // the supplied message must be a *gnmi.SetRequest. func UnionReplace(opts ...GNMIOption) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SetRequest: if len(msg.UnionReplace) == 0 { msg.UnionReplace = make([]*gnmi.Update, 0) } upd := new(gnmi.Update) err := apply(upd, opts...) if err != nil { return err } msg.UnionReplace = append(msg.UnionReplace, upd) default: return fmt.Errorf("option UnionReplace: %w: %T", ErrInvalidMsgType, msg) } return nil } } ================================================ FILE: pkg/api/gnmi_msgs_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package api import ( "errors" "reflect" "strings" "testing" "time" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmi/proto/gnmi_ext" "github.com/openconfig/gnmic/pkg/api/testutils" ) // Capabilities Request / Response tests func TestNewCapabilitiesRequest(t *testing.T) { name := "single_case" t.Run(name, func(t *testing.T) { nreq, err := NewCapabilitiesRequest() if err != nil { t.Errorf("failed at %q: %v", name, err) t.Fail() } if !reflect.DeepEqual(new(gnmi.CapabilityRequest), nreq) { t.Errorf("failed at %q", name) t.Errorf("expected %+v", &gnmi.CapabilityRequest{}) t.Errorf(" got %+v", nreq) t.Fail() } }) } type capResponseInput struct { opts []GNMIOption req *gnmi.CapabilityResponse err error } var capResponseTestSet = map[string]capResponseInput{ "simple": { opts: []GNMIOption{ SupportedEncoding("json", "json_ietf"), }, req: &gnmi.CapabilityResponse{ SupportedEncodings: []gnmi.Encoding{ gnmi.Encoding_JSON, gnmi.Encoding_JSON_IETF, }, GNMIVersion: DefaultGNMIVersion, }, err: nil, }, "custom_version": { opts: []GNMIOption{ Version("1.0.0"), SupportedEncoding("json", "json_ietf"), }, req: &gnmi.CapabilityResponse{ SupportedEncodings: []gnmi.Encoding{ gnmi.Encoding_JSON, gnmi.Encoding_JSON_IETF, }, GNMIVersion: "1.0.0", }, err: nil, }, "unsupported_encoding": { opts: []GNMIOption{ Version(DefaultGNMIVersion), SupportedEncoding("not_json", "json_ietf"), }, req: &gnmi.CapabilityResponse{ SupportedEncodings: []gnmi.Encoding{ gnmi.Encoding_JSON, gnmi.Encoding_JSON_IETF, }, GNMIVersion: DefaultGNMIVersion, }, err: ErrInvalidValue, }, } func TestNewCapabilitiesResponse(t *testing.T) { for name, item := range capResponseTestSet { t.Run(name, func(t *testing.T) { nreq, err := NewCapabilitiesResponse(item.opts...) if err != nil { uerr := errors.Unwrap(err) if !errors.Is(uerr, item.err) { t.Errorf("%q failed", name) t.Errorf("%q expected err : %v", name, item.err) t.Errorf("%q got err : %v", name, err) t.Fail() } return } if !testutils.CapabilitiesResponsesEqual(nreq, item.req) { t.Errorf("%q failed", name) t.Errorf("%q expected result : %+v", name, item.req) t.Errorf("%q got result : %+v", name, nreq) t.Fail() } }) } } // Get Request / Response tests type getRequestInput struct { opts []GNMIOption req *gnmi.GetRequest } var getRequestTestSet = map[string]getRequestInput{ "path": { opts: []GNMIOption{ Path("system/name"), }, req: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "system", }, { Name: "name", }, }, }, }, }, }, "extension": { opts: []GNMIOption{ Path("system/name"), Extension(&gnmi_ext.Extension{Ext: &gnmi_ext.Extension_History{}}), }, req: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "system", }, { Name: "name", }, }, }, }, Extension: []*gnmi_ext.Extension{ {Ext: &gnmi_ext.Extension_History{}}, }, }, }, "two_paths": { opts: []GNMIOption{ Path("system/name"), Path("system/gnmi-server"), }, req: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "system", }, { Name: "name", }, }, }, { Elem: []*gnmi.PathElem{ { Name: "system", }, { Name: "gnmi-server", }, }, }, }, }, }, "prefix": { opts: []GNMIOption{ Prefix("system/name"), }, req: &gnmi.GetRequest{ Prefix: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "system", }, { Name: "name", }, }, }, }, }, "target": { opts: []GNMIOption{ Target("target1"), }, req: &gnmi.GetRequest{ Prefix: &gnmi.Path{ Target: "target1", }, }, }, "prefix_target_path": { opts: []GNMIOption{ Prefix("system"), Path("name"), Target("target1"), }, req: &gnmi.GetRequest{ Prefix: &gnmi.Path{ Target: "target1", Elem: []*gnmi.PathElem{ { Name: "system", }, }, }, Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "name", }, }, }, }, }, }, "data_type_ALL": { opts: []GNMIOption{ Path("system/name"), DataTypeALL(), }, req: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "system", }, { Name: "name", }, }, }, }, Type: gnmi.GetRequest_ALL, }, }, "data_type_CONFIG": { opts: []GNMIOption{ Path("system/name"), DataTypeCONFIG(), }, req: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "system", }, { Name: "name", }, }, }, }, Type: gnmi.GetRequest_CONFIG, }, }, "data_type_STATE": { opts: []GNMIOption{ Path("system/name"), DataTypeSTATE(), }, req: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "system", }, { Name: "name", }, }, }, }, Type: gnmi.GetRequest_STATE, }, }, "data_type_OPERATIONAL": { opts: []GNMIOption{ Path("system/name"), DataTypeOPERATIONAL(), }, req: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "system", }, { Name: "name", }, }, }, }, Type: gnmi.GetRequest_OPERATIONAL, }, }, "encoding": { opts: []GNMIOption{ Path("system/name"), DataType("config"), Encoding("json_ietf"), }, req: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "system", }, { Name: "name", }, }, }, }, Type: gnmi.GetRequest_CONFIG, Encoding: gnmi.Encoding_JSON_IETF, }, }, "encoding_custom": { opts: []GNMIOption{ Path("system/name"), DataType("config"), EncodingCustom(42), }, req: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "system", }, { Name: "name", }, }, }, }, Type: gnmi.GetRequest_CONFIG, Encoding: gnmi.Encoding(42), }, }, } func TestNewGetRequest(t *testing.T) { for name, item := range getRequestTestSet { t.Run(name, func(t *testing.T) { nreq, err := NewGetRequest(item.opts...) if err != nil { t.Errorf("failed at %q: %v", name, err) t.Fail() } if !testutils.GetRequestsEqual(nreq, item.req) { t.Errorf("failed at %q", name) t.Errorf("expected %+v", item.req) t.Errorf(" got %+v", nreq) t.Fail() } }) } } type getResponseInput struct { opts []GNMIOption req *gnmi.GetResponse } var getResponseTestSet = map[string]getResponseInput{ "simple": { opts: []GNMIOption{ Notification( Timestamp(42), Update( Path("/system/name"), Value("srl1", "json_ietf"), ), ), }, req: &gnmi.GetResponse{ Notification: []*gnmi.Notification{ { Timestamp: 42, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("\"srl1\""), }, }, }, }, }, }, }, }, "two_updates": { opts: []GNMIOption{ Notification( Timestamp(42), Update( Path("/system/name"), Value("srl1", "json_ietf"), ), Update( Path("/interface"), Value(map[string]interface{}{ "name": "ethernet-1/1", }, "json_ietf"), ), ), }, req: &gnmi.GetResponse{ Notification: []*gnmi.Notification{ { Timestamp: 42, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("\"srl1\""), }, }, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "interface"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte(`{"name":"ethernet-1/1"}`), }, }, }, }, }, }, }, }, } func TestNewGetResponse(t *testing.T) { for name, item := range getResponseTestSet { t.Run(name, func(t *testing.T) { nreq, err := NewGetResponse(item.opts...) if err != nil { t.Errorf("failed at %q: %v", name, err) t.Fail() } if !testutils.GetResponsesEqual(nreq, item.req) { t.Errorf("failed at %q", name) t.Errorf("expected %+v", item.req) t.Errorf(" got %+v", nreq) t.Fail() } }) } } // Set Request / Response tests type setRequestInput struct { opts []GNMIOption req *gnmi.SetRequest } var setRequestTestSet = map[string]setRequestInput{ "update": { opts: []GNMIOption{ Update(Path("/system/name/host-name"), Value("srl2", "json_ietf")), }, req: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"srl2\"")}, }, }, }, }, }, "two_updates": { opts: []GNMIOption{ Update( Path("/system/name/host-name"), Value("srl2", "json_ietf"), ), Update( Path("/system/gnmi-server/unix-socket/admin-state"), Value("enable", "json_ietf"), ), }, req: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"srl2\"")}, }, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "gnmi-server"}, {Name: "unix-socket"}, {Name: "admin-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"enable\"")}, }, }, }, }, }, "replace": { opts: []GNMIOption{ Replace(Path("/system/name/host-name"), Value("srl2", "json_ietf")), }, req: &gnmi.SetRequest{ Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"srl2\"")}, }, }, }, }, }, "two_replaces": { opts: []GNMIOption{ Replace( Path("/system/name/host-name"), Value("srl2", "json_ietf"), ), Replace( Path("/system/gnmi-server/unix-socket/admin-state"), Value("enable", "json_ietf"), ), }, req: &gnmi.SetRequest{ Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"srl2\"")}, }, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "gnmi-server"}, {Name: "unix-socket"}, {Name: "admin-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"enable\"")}, }, }, }, }, }, "delete": { opts: []GNMIOption{ Delete("/system/name/host-name"), }, req: &gnmi.SetRequest{ Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, }, }, }, "two_deletes": { opts: []GNMIOption{ Delete("/system/name/host-name"), Delete("interface/description"), }, req: &gnmi.SetRequest{ Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, { Elem: []*gnmi.PathElem{ {Name: "interface"}, {Name: "description"}, }, }, }, }, }, "update_replace": { opts: []GNMIOption{ Update( Path("/system/name/host-name"), Value("srl2", "json_ietf"), ), Replace( Path("/system/gnmi-server/unix-socket/admin-state"), Value("enable", "json_ietf"), ), }, req: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"srl2\"")}, }, }, }, Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "gnmi-server"}, {Name: "unix-socket"}, {Name: "admin-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"enable\"")}, }, }, }, }, }, "update_replace_delete": { opts: []GNMIOption{ Update( Path("/system/name/host-name"), Value("srl2", "json_ietf"), ), Replace( Path("/system/gnmi-server/unix-socket/admin-state"), Value("enable", "json_ietf"), ), Delete("/system/name/host-name"), }, req: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"srl2\"")}, }, }, }, Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "gnmi-server"}, {Name: "unix-socket"}, {Name: "admin-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"enable\"")}, }, }, }, Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, }, }, }, } func TestNewSetRequest(t *testing.T) { for name, item := range setRequestTestSet { t.Run(name, func(t *testing.T) { nreq, err := NewSetRequest(item.opts...) if err != nil { t.Errorf("failed at %q: %v", name, err) t.Fail() } if !testutils.SetRequestsEqual(nreq, item.req) { t.Errorf("failed at %q", name) t.Errorf("expected %+v", item.req) t.Errorf(" got %+v", nreq) t.Fail() } }) } } type setResponseInput struct { opts []GNMIOption req *gnmi.SetResponse } var setResponseTestSet = map[string]setResponseInput{ "simple": { opts: []GNMIOption{ Timestamp(42), UpdateResult( Operation("update"), Path("interface"), ), }, req: &gnmi.SetResponse{ Response: []*gnmi.UpdateResult{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "interface"}, }, }, Op: gnmi.UpdateResult_UPDATE, }, }, Timestamp: 42, }, }, "combined": { opts: []GNMIOption{ Timestamp(42), UpdateResult( Operation("update"), Path("interface"), ), UpdateResult( Operation("replace"), Path("network-instance"), ), }, req: &gnmi.SetResponse{ Response: []*gnmi.UpdateResult{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "interface"}, }, }, Op: gnmi.UpdateResult_UPDATE, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "network-instance"}, }, }, Op: gnmi.UpdateResult_REPLACE, }, }, Timestamp: 42, }, }, } func TestNewSetResponse(t *testing.T) { for name, item := range setResponseTestSet { t.Run(name, func(t *testing.T) { nreq, err := NewSetResponse(item.opts...) if err != nil { t.Errorf("failed at %q: %v", name, err) t.Fail() } if !testutils.SetResponsesEqual(nreq, item.req) { t.Errorf("failed at %q", name) t.Errorf("expected %+v", item.req) t.Errorf(" got %+v", nreq) t.Fail() } }) } } // Subscribe Request / Response tests type subscribeRequestInput struct { opts []GNMIOption req *gnmi.SubscribeRequest } var subscribeRequestTestSet = map[string]subscribeRequestInput{ "subscription": { opts: []GNMIOption{ EncodingJSON_IETF(), Subscription( Path("system/name"), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Encoding: gnmi.Encoding_JSON_IETF, Subscription: []*gnmi.Subscription{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, "subscription_list_mode_ONCE": { opts: []GNMIOption{ SubscriptionListModeONCE(), Subscription( Path("system/name"), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Mode: gnmi.SubscriptionList_ONCE, Subscription: []*gnmi.Subscription{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, "subscription_list_mode_POLL": { opts: []GNMIOption{ SubscriptionListModePOLL(), Subscription( Path("system/name"), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Mode: gnmi.SubscriptionList_POLL, Subscription: []*gnmi.Subscription{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, "subscription_list_mode_STREAM": { opts: []GNMIOption{ SubscriptionListModeSTREAM(), Subscription( Path("system/name"), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Mode: gnmi.SubscriptionList_STREAM, Subscription: []*gnmi.Subscription{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, "subscription_mode_SAMPLE": { opts: []GNMIOption{ Subscription( Path("system/name"), SubscriptionModeSAMPLE(), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Subscription: []*gnmi.Subscription{ { Mode: gnmi.SubscriptionMode_SAMPLE, Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, "subscription_mode_TARGET_DEFINED": { opts: []GNMIOption{ Subscription( Path("system/name"), SubscriptionModeTARGET_DEFINED(), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Subscription: []*gnmi.Subscription{ { Mode: gnmi.SubscriptionMode_TARGET_DEFINED, Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, "subscription_mode_ON_CHANGE": { opts: []GNMIOption{ Subscription( Path("system/name"), SubscriptionModeON_CHANGE(), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Subscription: []*gnmi.Subscription{ { Mode: gnmi.SubscriptionMode_ON_CHANGE, Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, "subscription_sample": { opts: []GNMIOption{ Encoding("json_ietf"), Subscription( Path("system/name"), SubscriptionMode("sample"), SampleInterval(10*time.Second), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Encoding: gnmi.Encoding_JSON_IETF, Subscription: []*gnmi.Subscription{ { Mode: gnmi.SubscriptionMode_SAMPLE, SampleInterval: uint64(10 * time.Second), Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, "subscription_list_encoding_json": { opts: []GNMIOption{ EncodingJSON(), Subscription( Path("system/name"), SubscriptionMode("sample"), SampleInterval(10*time.Second), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Subscription: []*gnmi.Subscription{ { Mode: gnmi.SubscriptionMode_SAMPLE, Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, "subscription_list_encoding_bytes": { opts: []GNMIOption{ EncodingBYTES(), Subscription( Path("system/name"), SubscriptionMode("sample"), SampleInterval(10*time.Second), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Encoding: gnmi.Encoding_BYTES, Subscription: []*gnmi.Subscription{ { Mode: gnmi.SubscriptionMode_SAMPLE, Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, "subscription_list_encoding_proto": { opts: []GNMIOption{ EncodingPROTO(), Subscription( Path("system/name"), SubscriptionMode("sample"), SampleInterval(10*time.Second), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Encoding: gnmi.Encoding_PROTO, Subscription: []*gnmi.Subscription{ { Mode: gnmi.SubscriptionMode_SAMPLE, Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, "subscription_list_encoding_ascii": { opts: []GNMIOption{ EncodingASCII(), Subscription( Path("system/name"), SubscriptionMode("sample"), SampleInterval(10*time.Second), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Encoding: gnmi.Encoding_ASCII, Subscription: []*gnmi.Subscription{ { Mode: gnmi.SubscriptionMode_SAMPLE, Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, "subscription_list_encoding_json_ietf": { opts: []GNMIOption{ EncodingJSON_IETF(), Subscription( Path("system/name"), SubscriptionMode("sample"), SampleInterval(10*time.Second), ), }, req: &gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Subscribe{ Subscribe: &gnmi.SubscriptionList{ Encoding: gnmi.Encoding_JSON_IETF, Subscription: []*gnmi.Subscription{ { Mode: gnmi.SubscriptionMode_SAMPLE, Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, }, }, }, }, }, }, }, }, } func TestNewSubscribeRequest(t *testing.T) { for name, item := range subscribeRequestTestSet { t.Run(name, func(t *testing.T) { nreq, err := NewSubscribeRequest(item.opts...) if err != nil { t.Errorf("failed at %q: %v", name, err) t.Fail() } if !testutils.SubscribeRequestsEqual(nreq, item.req) { t.Errorf("failed at %q", name) t.Errorf("expected %+v", item.req) t.Errorf(" got %+v", nreq) t.Fail() } }) } } type subscribeResponseInput struct { opts []GNMIOption req *gnmi.SubscribeResponse } var subscribeResponseTestSet = map[string]subscribeResponseInput{ "simple": { opts: []GNMIOption{ Notification( Timestamp(42), // Alias("alias1"), Update( Path("interface"), Value(map[string]interface{}{ "name": "ethernet-1/1", }, "json_ietf"), ), Delete("/interface[name=ethernet-1/2]"), Atomic(true), ), }, req: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: 42, // Alias: "alias1", Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "interface"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte(`{"name":"ethernet-1/1"}`), }, }, }, }, Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{"name": "ethernet-1/2"}, }, }, }, }, Atomic: true, }, }, }, }, } func TestNewSubscribeResponse(t *testing.T) { for name, item := range subscribeResponseTestSet { t.Run(name, func(t *testing.T) { nreq, err := NewSubscribeResponse(item.opts...) if err != nil { t.Errorf("failed at %q: %v", name, err) t.Fail() } if !testutils.SubscribeResponsesEqual(nreq, item.req) { t.Errorf("failed at %q", name) t.Errorf("expected %+v", item.req) t.Errorf(" got %+v", nreq) t.Fail() } }) } } func TestNewSubscribeRequestPoll(t *testing.T) { name := "single_case" t.Run(name, func(t *testing.T) { nreq, err := NewSubscribePollRequest() if err != nil { t.Errorf("failed at %q: %v", name, err) t.Fail() } if !reflect.DeepEqual(&gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Poll{ Poll: new(gnmi.Poll), }}, nreq) { t.Errorf("failed at %q", name) t.Errorf("expected %+v", &gnmi.SubscribeRequest{Request: &gnmi.SubscribeRequest_Poll{}}) t.Errorf(" got %+v", nreq) t.Fail() } }) } func TestNewSubscribeResponseSync(t *testing.T) { name := "single_case" t.Run(name, func(t *testing.T) { nreq, err := NewSubscribeSyncResponse() if err != nil { t.Errorf("failed at %q: %v", name, err) t.Fail() } if !reflect.DeepEqual(&gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_SyncResponse{ SyncResponse: true, }, }, nreq) { t.Errorf("failed at %q", name) t.Errorf("expected %+v", &gnmi.SubscribeRequest{Request: &gnmi.SubscribeRequest_Poll{}}) t.Errorf(" got %+v", nreq) t.Fail() } }) } // Value tests type valueInput struct { data interface{} encoding string msg *gnmi.Update err error } var valueTestSet = map[string]valueInput{ // json "json_string": { data: "value", encoding: "json", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value\""), }, }, }, }, "json_string_special_chars": { data: "<.*>", encoding: "json", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"<.*>\""), }, }, }, }, "json_string_array": { data: []string{"foo", "bar"}, encoding: "json", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("[\"foo\",\"bar\"]"), }, }, }, }, "json_interface{}_array": { data: []interface{}{"foo", 42}, encoding: "json", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("[\"foo\",42]"), }, }, }, }, "json_map": { data: map[string]interface{}{"k": "v"}, encoding: "json", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("{\"k\":\"v\"}"), }, }, }, }, // json_ietf "json_ietf_string": { data: "value", encoding: "json_ietf", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("\"value\""), }, }, }, }, "json_ietf_string_array": { data: []string{"foo", "bar"}, encoding: "json_ietf", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("[\"foo\",\"bar\"]"), }, }, }, }, "json_ietf_interface{}_array": { data: []interface{}{"foo", int(42)}, encoding: "json_ietf", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("[\"foo\",42]"), }, }, }, }, "json_ietf_map": { data: map[string]interface{}{"k": "v"}, encoding: "json_ietf", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("{\"k\":\"v\"}"), }, }, }, }, // ascii "ascii_string": { data: "foo", encoding: "ascii", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{ AsciiVal: "foo", }, }, }, }, "ascii_string_array": { data: []string{"foo", "bar"}, encoding: "ascii", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_LeaflistVal{ LeaflistVal: &gnmi.ScalarArray{ Element: []*gnmi.TypedValue{ { Value: &gnmi.TypedValue_StringVal{StringVal: "foo"}, }, { Value: &gnmi.TypedValue_StringVal{StringVal: "bar"}, }, }, }, }, }, }, }, "ascii_interface{}_array": { data: []interface{}{"foo", 42}, encoding: "ascii", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_LeaflistVal{ LeaflistVal: &gnmi.ScalarArray{ Element: []*gnmi.TypedValue{ { Value: &gnmi.TypedValue_StringVal{StringVal: "foo"}, }, { Value: &gnmi.TypedValue_IntVal{IntVal: 42}, }, }, }, }, }, }, }, // typed values "typed_value": { data: &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"srl1\"")}}, msg: &gnmi.Update{ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"srl1\"")}}, }, err: nil, }, "typed_value_json": { data: &gnmi.TypedValue_JsonVal{JsonVal: []byte("\"srl1\"")}, msg: &gnmi.Update{ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte("\"srl1\"")}}, }, err: nil, }, "typed_value_json_ietf": { data: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"srl1\"")}, msg: &gnmi.Update{ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonIetfVal{JsonIetfVal: []byte("\"srl1\"")}}, }, err: nil, }, "typed_value_ascii": { data: &gnmi.TypedValue_AsciiVal{AsciiVal: "srl1"}, msg: &gnmi.Update{ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_AsciiVal{AsciiVal: "srl1"}}, }, err: nil, }, "typed_value_bool": { data: &gnmi.TypedValue_BoolVal{BoolVal: true}, msg: &gnmi.Update{ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: true}}, }, err: nil, }, "typed_value_bytes": { data: &gnmi.TypedValue_BytesVal{BytesVal: []byte{0, 42}}, msg: &gnmi.Update{ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_BytesVal{BytesVal: []byte{0, 42}}}, }, err: nil, }, "typed_value_float": { data: &gnmi.TypedValue_FloatVal{FloatVal: 42.1}, msg: &gnmi.Update{ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_FloatVal{FloatVal: 42.1}}, }, err: nil, }, "typed_value_int": { data: &gnmi.TypedValue_IntVal{IntVal: 42}, msg: &gnmi.Update{ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_IntVal{IntVal: 42}}, }, err: nil, }, "typed_value_uint": { data: &gnmi.TypedValue_UintVal{UintVal: 42}, msg: &gnmi.Update{ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_UintVal{UintVal: 42}}, }, err: nil, }, "typed_value_string": { data: &gnmi.TypedValue_StringVal{StringVal: "foo"}, msg: &gnmi.Update{ Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "foo"}}, }, err: nil, }, "typed_value_leaf_list": { data: &gnmi.TypedValue_LeaflistVal{ LeaflistVal: &gnmi.ScalarArray{ Element: []*gnmi.TypedValue{ {Value: &gnmi.TypedValue_StringVal{StringVal: "foo"}}, {Value: &gnmi.TypedValue_UintVal{UintVal: 42}}, }, }, }, msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_LeaflistVal{ LeaflistVal: &gnmi.ScalarArray{ Element: []*gnmi.TypedValue{ {Value: &gnmi.TypedValue_StringVal{StringVal: "foo"}}, {Value: &gnmi.TypedValue_UintVal{UintVal: 42}}, }, }, }, }, }, err: nil, }, // scalar "from_scalar": { data: 42, encoding: "json", msg: &gnmi.Update{ Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_IntVal{ IntVal: 42, }, }, }, }, "invalid_value": { data: nil, err: ErrInvalidValue, }, } func TestValue(t *testing.T) { for name, item := range valueTestSet { t.Run(name, func(t *testing.T) { upd := new(gnmi.Update) err := Value(item.data, item.encoding)(upd) if err != nil { uerr := errors.Unwrap(err) if !errors.Is(uerr, item.err) { t.Errorf("failed at %q with error: %v", name, err) t.Errorf("expected err: %+v", item.err) t.Errorf(" got err: %+v", err) t.Fail() } return } if !testutils.GnmiValuesEqual(item.msg.GetVal(), upd.GetVal()) { t.Errorf("failed at %q", name) t.Errorf("expected %+v", item.msg.GetVal()) t.Errorf(" got %+v", upd.GetVal()) t.Fail() } }) } } // Version tests func TestVersion(t *testing.T) { name := "nil_msg" t.Run(name, func(t *testing.T) { err := Version(DefaultGNMIVersion)(nil) if err != nil { if !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) { t.Errorf("failed at %q with error: %v", name, err) t.Fail() } } }) name = "invalid_msg" t.Run(name, func(t *testing.T) { err := Version(DefaultGNMIVersion)(new(gnmi.GetRequest)) if err != nil { if !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) { t.Errorf("failed at %q with error: %v", name, err) t.Fail() } } }) } func TestSupportedEncoding(t *testing.T) { name := "nil_msg" t.Run(name, func(t *testing.T) { err := SupportedEncoding("json")(nil) if err != nil { if !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) { t.Errorf("failed at %q with error: %v", name, err) t.Fail() } } }) name = "invalid_msg" t.Run(name, func(t *testing.T) { err := SupportedEncoding("json")(new(gnmi.GetRequest)) if err != nil { if !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) { t.Errorf("failed at %q with error: %v", name, err) t.Fail() } } }) name = "invalid_value" t.Run(name, func(t *testing.T) { err := SupportedEncoding("not_valid")(new(gnmi.GetRequest)) if err != nil { if !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) { t.Errorf("failed at %q with error: %v", name, err) t.Fail() } } }) } func TestSupportedModel(t *testing.T) { name := "nil_msg" t.Run(name, func(t *testing.T) { err := SupportedModel("", "", "")(nil) if err != nil { if !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) { t.Errorf("failed at %q with error: %v", name, err) t.Fail() } } }) name = "invalid_msg" t.Run(name, func(t *testing.T) { err := SupportedModel("", "", "")(new(gnmi.GetRequest)) if err != nil { if !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) { t.Errorf("failed at %q with error: %v", name, err) t.Fail() } } }) name = "ok" t.Run(name, func(t *testing.T) { capRsp := new(gnmi.CapabilityResponse) err := SupportedModel("foo", "bar", "v2")(capRsp) if err != nil { if !strings.Contains(err.Error(), ErrInvalidMsgType.Error()) { t.Errorf("failed at %q with error: %v", name, err) t.Fail() } } if len(capRsp.SupportedModels) != 1 { t.Fail() } if capRsp.SupportedModels[0].Name != "foo" { t.Fail() } if capRsp.SupportedModels[0].Organization != "bar" { t.Fail() } if capRsp.SupportedModels[0].Version != "v2" { t.Fail() } }) } ================================================ FILE: pkg/api/go.mod ================================================ module github.com/openconfig/gnmic/pkg/api go 1.24.12 require ( github.com/AlekSi/pointer v1.2.0 github.com/google/go-cmp v0.7.0 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/jhump/protoreflect v1.17.0 github.com/juju/ratelimit v1.0.2 github.com/openconfig/gnmi v0.14.1 github.com/openconfig/grpctunnel v0.1.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.20.5 golang.org/x/net v0.48.0 golang.org/x/oauth2 v0.34.0 golang.org/x/sync v0.19.0 google.golang.org/grpc v1.78.0 google.golang.org/protobuf v1.36.11 ) require ( cloud.google.com/go/compute/metadata v0.9.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bufbuild/protocompile v0.14.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/kr/text v0.2.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect golang.org/x/sys v0.39.0 // indirect golang.org/x/text v0.32.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect ) ================================================ FILE: pkg/api/go.sum ================================================ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI= github.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/openconfig/gnmi v0.14.1 h1:qKMuFvhIRR2/xxCOsStPQ25aKpbMDdWr3kI+nP9bhMs= github.com/openconfig/gnmi v0.14.1/go.mod h1:whr6zVq9PCU8mV1D0K9v7Ajd3+swoN6Yam9n8OH3eT0= github.com/openconfig/grpctunnel v0.1.0 h1:EN99qtlExZczgQgp5ANnHRC/Rs62cAG+Tz2BQ5m/maM= github.com/openconfig/grpctunnel v0.1.0/go.mod h1:G04Pdu0pml98tdvXrvLaU+EBo3PxYfI9MYqpvdaEHLo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0= google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= ================================================ FILE: pkg/api/path/path.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package path import ( "errors" "sort" "strings" "sync" "github.com/openconfig/gnmi/proto/gnmi" ) var errMalformedXPath = errors.New("malformed xpath") var errMalformedXPathKey = errors.New("malformed xpath key") var errEmptyPathElemName = errors.New("empty path element name") var escapedBracketsReplacer = strings.NewReplacer(`\]`, `]`, `\[`, `[`) var stringBuilderPool = sync.Pool{ New: func() any { return new(strings.Builder) }, } // CreatePrefix // func CreatePrefix(prefix, target string) (*gnmi.Path, error) { if len(prefix)+len(target) == 0 { return nil, nil } p, err := ParsePath(prefix) if err != nil { return nil, err } if target != "" { p.Target = target } return p, nil } // ParsePath creates a gnmi.Path out of a p string, check if the first element is prefixed by an origin, // removes it from the xpath and adds it to the returned gnmiPath func ParsePath(p string) (*gnmi.Path, error) { lp := len(p) if lp == 0 { return &gnmi.Path{}, nil } var origin string idx := strings.Index(p, ":") if idx >= 0 && p[0] != '/' && !strings.Contains(p[:idx], "/") && // path == origin:/ || path == origin: ((idx+1 < lp && p[idx+1] == '/') || (lp == idx+1)) { origin = p[:idx] p = p[idx+1:] } pes, err := toPathElems(p) if err != nil { return nil, err } return &gnmi.Path{ Origin: origin, Elem: pes, }, nil } // toPathElems parses a xpath and returns a list of path elements func toPathElems(p string) ([]*gnmi.PathElem, error) { if !strings.HasSuffix(p, "/") { p += "/" } buffer := make([]rune, 0) null := rune(0) prevC := rune(0) // track if the loop is traversing a key inKey := false for _, r := range p { switch r { case '[': if inKey && prevC != '\\' { return nil, errMalformedXPath } if prevC != '\\' { inKey = true } case ']': if !inKey && prevC != '\\' { return nil, errMalformedXPath } if prevC != '\\' { inKey = false } case '/': if !inKey { buffer = append(buffer, null) prevC = r continue } } buffer = append(buffer, r) prevC = r } if inKey { return nil, errMalformedXPath } stringElems := strings.Split(string(buffer), string(null)) pElems := make([]*gnmi.PathElem, 0, len(stringElems)) for _, s := range stringElems { if s == "" { continue } pe, err := toPathElem(s) if err != nil { return nil, err } pElems = append(pElems, pe) } return pElems, nil } // toPathElem take a xpath formatted path element such as "elem1[k=v]" and returns the corresponding gnmi.PathElem func toPathElem(s string) (*gnmi.PathElem, error) { idx := -1 prevC := rune(0) for i, r := range s { if r == '[' && prevC != '\\' { idx = i break } prevC = r } var kvs map[string]string if idx > 0 { var err error kvs, err = parseXPathKeys(s[idx:]) if err != nil { return nil, err } s = s[:idx] } else if idx == 0 { return nil, errEmptyPathElemName } if s == "" { return nil, errEmptyPathElemName } return &gnmi.PathElem{Name: s, Key: kvs}, nil } // parseXPathKeys takes keys definition from an xpath, e.g [k1=v1][k2=v2] and return the keys and values as a map[string]string func parseXPathKeys(s string) (map[string]string, error) { if len(s) == 0 { return nil, nil } kvs := make(map[string]string) inKey := false start := 0 prevRune := rune(0) for i, r := range s { switch r { case '[': if prevRune == '\\' { prevRune = r continue } if inKey { return nil, errMalformedXPathKey } inKey = true start = i + 1 case ']': if prevRune == '\\' { prevRune = r continue } if !inKey { return nil, errMalformedXPathKey } eq := strings.Index(s[start:i], "=") if eq < 0 { return nil, errMalformedXPathKey } k, v := s[start:i][:eq], s[start:i][eq+1:] if len(k) == 0 || len(v) == 0 { return nil, errMalformedXPathKey } kvs[escapedBracketsReplacer.Replace(k)] = escapedBracketsReplacer.Replace(v) inKey = false default: if !inKey { return nil, errMalformedXPathKey } } prevRune = r } if inKey { return nil, errMalformedXPathKey } return kvs, nil } func PathElems(pf, p *gnmi.Path) []*gnmi.PathElem { r := make([]*gnmi.PathElem, 0, len(pf.GetElem())+len(p.GetElem())) r = append(r, pf.GetElem()...) return append(r, p.GetElem()...) } func GnmiPathToXPath(p *gnmi.Path, noKeys bool) string { if p == nil { return "" } sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() if p.Origin != "" { sb.WriteString(p.Origin) sb.WriteString(":/") } elems := p.GetElem() numElems := len(elems) for i, pe := range elems { sb.WriteString(pe.GetName()) if !noKeys { numKeys := len(pe.GetKey()) switch numKeys { case 0: case 1: for k := range pe.GetKey() { writeKey(sb, k, pe.GetKey()[k]) } default: keys := make([]string, 0, numKeys) for k := range pe.GetKey() { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { writeKey(sb, k, pe.GetKey()[k]) } } } if i+1 != numElems { sb.WriteString("/") } } return sb.String() } func writeKey(sb *strings.Builder, k, v string) { sb.WriteString("[") sb.WriteString(k) sb.WriteString("=") sb.WriteString(v) sb.WriteString("]") } ================================================ FILE: pkg/api/path/path_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package path import ( "reflect" "strings" "testing" "github.com/google/go-cmp/cmp" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/testutils" ) var prefixSet = map[string]*gnmi.Path{ "": nil, "target%%%origin:/e1/e2": { Origin: "origin", Target: "target", Elem: []*gnmi.PathElem{ {Name: "e1"}, {Name: "e2"}, }, }, "/e1": { Elem: []*gnmi.PathElem{ {Name: "e1"}, }, }, "/e1/e2[k=v]": { Elem: []*gnmi.PathElem{ {Name: "e1"}, {Name: "e2", Key: map[string]string{ "k": "v", }}, }, }, } var pathsTable = map[string]struct { strPath string gnmiPath *gnmi.Path isOK bool expectedErr error }{ "empty_path": { strPath: "", gnmiPath: &gnmi.Path{}, isOK: true, expectedErr: nil, }, "path_with_slash_only": { strPath: "/", gnmiPath: &gnmi.Path{}, isOK: true, }, "path_with_one_path_element": { strPath: "e", gnmiPath: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "e"}, }, }, isOK: true, expectedErr: nil, }, "path_with_one_path_element_with_slash": { strPath: "/e", gnmiPath: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "e"}, }, }, isOK: true, expectedErr: nil, }, "path_with_two_path_elements": { strPath: "/e1/e2", gnmiPath: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "e1"}, {Name: "e2"}, }, }, isOK: true, expectedErr: nil, }, "path_with_two_path_elements_with_key": { strPath: "/e1/e2[k=v]", gnmiPath: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "e1"}, {Name: "e2", Key: map[string]string{ "k": "v", }}, }, }, isOK: true, expectedErr: nil, }, "path_with_multiple_path_elements_and_multiple_keys": { strPath: "/e1/e2[k1=v1][k2=v2]", gnmiPath: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "e1"}, {Name: "e2", Key: map[string]string{ "k1": "v1", "k2": "v2", }}, }, }, isOK: true, expectedErr: nil, }, "path_with_origin": { strPath: "origin:/e1/e2", gnmiPath: &gnmi.Path{ Origin: "origin", Elem: []*gnmi.PathElem{ {Name: "e1"}, {Name: "e2"}, }, }, isOK: true, expectedErr: nil, }, "path_with_origin_only": { strPath: "origin:", gnmiPath: &gnmi.Path{ Origin: "origin", }, isOK: true, }, "path_with_origin_and_slash_only": { strPath: "origin:/", gnmiPath: &gnmi.Path{ Origin: "origin", }, isOK: true, }, "path_with_empty_origin": { strPath: ":", gnmiPath: &gnmi.Path{}, isOK: true, }, "path_with_empty_origin_and_slash_only": { strPath: ":/", gnmiPath: &gnmi.Path{}, isOK: true, }, "path_with_origin_and_key": { strPath: "origin:/e1/e2[k=v]", gnmiPath: &gnmi.Path{ Origin: "origin", Elem: []*gnmi.PathElem{ {Name: "e1"}, {Name: "e2", Key: map[string]string{ "k": "v", }}, }, }, isOK: true, expectedErr: nil, }, "path_with_origin_and_multiple_keys": { strPath: "origin:/e1[name=object]/e2[addr=1.1.1.1/32]", gnmiPath: &gnmi.Path{ Origin: "origin", Elem: []*gnmi.PathElem{ {Name: "e1", Key: map[string]string{ "name": "object", }}, {Name: "e2", Key: map[string]string{ "addr": "1.1.1.1/32", }}, }, }, isOK: true, expectedErr: nil, }, "path_with_colon_in_path_elem": { strPath: "origin:/e1:e1[k=1.1.1.1/32]/e2[k1=v2]", gnmiPath: &gnmi.Path{ Origin: "origin", Elem: []*gnmi.PathElem{ {Name: "e1:e1", Key: map[string]string{ "k": "1.1.1.1/32", }, }, {Name: "e2", Key: map[string]string{ "k1": "v2", }, }, }, }, isOK: true, expectedErr: nil, }, "path_with_colon_in_2_path_elems": { strPath: "origin:/e1:e1[k=1.1.1.1/32]/e2:e3[k1=v2]", gnmiPath: &gnmi.Path{ Origin: "origin", Elem: []*gnmi.PathElem{ {Name: "e1:e1", Key: map[string]string{ "k": "1.1.1.1/32", }, }, {Name: "e2:e3", Key: map[string]string{ "k1": "v2", }, }, }, }, isOK: true, expectedErr: nil, }, "path_with_escaped_open_bracket": { strPath: `/e1\[/e2[k=v]`, gnmiPath: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: `e1\[`}, {Name: "e2", Key: map[string]string{ "k": "v", }}, }, }, isOK: true, expectedErr: nil, }, "path_with_escaped_close_bracket": { strPath: `/e1\]/e2[k=v]`, gnmiPath: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: `e1\]`}, {Name: "e2", Key: map[string]string{ "k": "v", }}, }, }, isOK: true, expectedErr: nil, }, "path_with_colon_in_first_path_elem": { strPath: `e1:e2/e3[k=v]`, gnmiPath: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "e1:e2"}, {Name: "e3", Key: map[string]string{ "k": "v", }}, }, }, isOK: true, expectedErr: nil, }, "path_with_colon_in_key_value": { strPath: `/e1/e2[k=v:1]`, gnmiPath: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "e1"}, {Name: "e2", Key: map[string]string{ "k": "v:1", }}, }, }, isOK: true, expectedErr: nil, }, "path_without_origin_with_colon_in_path_elem": { strPath: `e1/e2:e3[k=v:1]`, gnmiPath: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "e1"}, {Name: "e2:e3", Key: map[string]string{ "k": "v:1", }}, }, }, isOK: true, expectedErr: nil, }, "path_with_origin_and_colon_in_key_value": { strPath: `origin:/e1/e2[k=v:1]`, gnmiPath: &gnmi.Path{ Origin: "origin", Elem: []*gnmi.PathElem{ {Name: "e1"}, {Name: "e2", Key: map[string]string{ "k": "v:1", }}, }, }, isOK: true, expectedErr: nil, }, "path_with_origin_and_colon_space_in_key_value": { strPath: `origin:/e1/e2[k=v a:1]`, gnmiPath: &gnmi.Path{ Origin: "origin", Elem: []*gnmi.PathElem{ {Name: "e1"}, {Name: "e2", Key: map[string]string{ "k": `v a:1`, }}, }, }, isOK: true, expectedErr: nil, }, "path_with_origin_and_colon_space_in_key_value_double_quoted_value": { strPath: `origin:/e1/e2[k="v a:1"]`, gnmiPath: &gnmi.Path{ Origin: "origin", Elem: []*gnmi.PathElem{ {Name: "e1"}, {Name: "e2", Key: map[string]string{ "k": `"v a:1"`, }}, }, }, isOK: true, expectedErr: nil, }, "path_with_missing_closing_bracket": { strPath: `/e1/e2[k=v`, gnmiPath: nil, isOK: false, expectedErr: errMalformedXPath, }, "path_with_missing_open_bracket": { strPath: `/e1/e2k=v]`, gnmiPath: nil, isOK: false, expectedErr: errMalformedXPath, }, "path_with_key_missing_equal_sign": { strPath: `/e1/e2[k]`, gnmiPath: nil, isOK: false, expectedErr: errMalformedXPathKey, }, } type outKeysSet struct { out map[string]string err error } type outPathElemSet struct { out *gnmi.PathElem err error } var keysSet = map[string]struct { in string exp outKeysSet }{ "no_key": { in: "", exp: outKeysSet{ out: nil, err: nil, }, }, "one_key": { in: "[k=v]", exp: outKeysSet{ out: map[string]string{"k": "v"}, err: nil, }, }, "two_key": { in: "[k1=v1][k2=1.1.1.1/30]", exp: outKeysSet{ out: map[string]string{"k1": "v1", "k2": "1.1.1.1/30"}, err: nil, }, }, "noval_key": { in: "[k1=]", exp: outKeysSet{ out: nil, err: errMalformedXPathKey, }, }, "nokey_with_val": { in: "[=v]", exp: outKeysSet{ out: nil, err: errMalformedXPathKey, }, }, "inKey_brackets": { in: "[k=[v]", exp: outKeysSet{ out: nil, err: errMalformedXPathKey, }, }, "inKey_escaped_open_bracket": { in: `[k=\[v]`, exp: outKeysSet{ out: map[string]string{"k": "[v"}, err: nil, }, }, "inKey_escaped_close_bracket": { in: `[k=\]v]`, exp: outKeysSet{ out: map[string]string{"k": "]v"}, err: nil, }, }, "inKey_escaped_brackets": { in: `[\[k=\]v]`, exp: outKeysSet{ out: map[string]string{"[k": "]v"}, err: nil, }, }, } var pathElemSet = map[string]struct { in string out outPathElemSet }{ "no_key": { in: "elem1", out: outPathElemSet{ out: &gnmi.PathElem{Name: "elem1"}, err: nil, }, }, "with_1_key": { in: "elem1[k=v]", out: outPathElemSet{ out: &gnmi.PathElem{Name: "elem1", Key: map[string]string{"k": "v"}}, err: nil, }, }, "with_2_keys": { in: "elem1[k1=v1][k2=v2]", out: outPathElemSet{ out: &gnmi.PathElem{Name: "elem1", Key: map[string]string{"k1": "v1", "k2": "v2"}}, err: nil, }, }, "with_1_key_malformed": { in: "elem1[k1=v1", out: outPathElemSet{ out: nil, err: errMalformedXPathKey, }, }, "elem_with_escaped_bracket": { in: `elem1\[k1=v1`, out: outPathElemSet{ out: &gnmi.PathElem{Name: `elem1\[k1=v1`}, err: nil, }, }, } func TestCreatePrefix(t *testing.T) { var target, prefix string for e, p := range prefixSet { val := strings.Split(e, "%%%") //fmt.Printf("%d: %v\n", len(val), val) if len(val) == 2 { target, prefix = val[0], val[1] } else if len(val) == 1 { target, prefix = "", val[0] } //fmt.Println(target, prefix) gp, err := CreatePrefix(prefix, target) if err != nil { t.Error(err) } if !reflect.DeepEqual(p, gp) { t.Errorf("failed at elem: %s: expecting %v, got %v", e, p, gp) } } } func TestParsePath(t *testing.T) { for name, tc := range pathsTable { t.Run(name, func(t *testing.T) { p, err := ParsePath(tc.strPath) if err != nil && tc.isOK { t.Fatal(err) } if !tc.isOK { if err != tc.expectedErr { t.Errorf("failed at '%s', expected error %+v, got %+v", name, tc.expectedErr, err) } return } if !testutils.GnmiPathsEqual(p, tc.gnmiPath) { t.Errorf("failed at '%s', expected %v, got %+v", name, tc.gnmiPath, p) } }) } } func TestParseXPathKeys(t *testing.T) { for name, input := range keysSet { t.Run(name, func(t *testing.T) { keys, err := parseXPathKeys(input.in) if !cmp.Equal(keys, input.exp.out) { t.Errorf("failed at '%s', expected %v, got %+v", name, input.exp.out, keys) } if err != input.exp.err { t.Errorf("failed at '%s', expected error %+v, got %+v", name, input.exp.err, err) } }) } } func TestStringToPathElem(t *testing.T) { for name, input := range pathElemSet { t.Run(name, func(t *testing.T) { gnmiPathElem, err := toPathElem(input.in) if gnmiPathElem == nil || input.out.out == nil { if gnmiPathElem != input.out.out { t.Errorf("failed at '%s', expected %v, got %+v", name, input.out.out, gnmiPathElem) } } else if !cmp.Equal(gnmiPathElem.Key, input.out.out.Key) || gnmiPathElem.Name != input.out.out.Name { t.Errorf("failed at '%s', expected %v, got %+v", name, input.out.out, gnmiPathElem) } if err != input.out.err { t.Errorf("failed at '%s', expected error %+v, got %+v", name, input.out.err, err) } }) } } func BenchmarkParsePath(b *testing.B) { for name, tc := range pathsTable { b.Run(name, func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { ParsePath(tc.strPath) } }) } } func TestGnmiPathToXPath(t *testing.T) { tests := []struct { name string // description of this test case // Named input parameters for target function. p *gnmi.Path noKeys bool want string }{ { name: "nil", p: nil, noKeys: false, want: "", }, { name: "empty_path", p: &gnmi.Path{}, noKeys: false, want: "", }, { name: "path_with_one_path_element", p: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "e1"}}}, noKeys: false, want: "e1", }, { name: "path_with_one_path_element_with_key", p: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "e1", Key: map[string]string{"k": "v"}}}}, noKeys: false, want: "e1[k=v]", }, { name: "path_with_two_path_elements", p: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "e1"}, {Name: "e2"}}}, noKeys: false, want: "e1/e2", }, { name: "path_with_two_path_elements_with_key", p: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "e1", Key: map[string]string{"k": "v"}}, {Name: "e2", Key: map[string]string{"k1": "v1"}}}}, noKeys: false, want: "e1[k=v]/e2[k1=v1]", }, { name: "path_with_origin", p: &gnmi.Path{Origin: "origin", Elem: []*gnmi.PathElem{{Name: "e1"}, {Name: "e2"}}}, noKeys: false, want: "origin:/e1/e2", }, { name: "path_with_origin_and_key", p: &gnmi.Path{Origin: "origin", Elem: []*gnmi.PathElem{{Name: "e1", Key: map[string]string{"k": "v"}}}}, noKeys: false, want: "origin:/e1[k=v]", }, { name: "path_with_origin_and_multiple_keys", p: &gnmi.Path{Origin: "origin", Elem: []*gnmi.PathElem{{Name: "e1", Key: map[string]string{"k": "v"}}, {Name: "e2", Key: map[string]string{"k1": "v1"}}}}, noKeys: false, want: "origin:/e1[k=v]/e2[k1=v1]", }, { name: "path_with_multiple_keys_in_one_path_element", p: &gnmi.Path{Elem: []*gnmi.PathElem{{Name: "e1", Key: map[string]string{"k": "v", "k1": "v1"}}}}, noKeys: false, want: "e1[k=v][k1=v1]", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := GnmiPathToXPath(tt.p, tt.noKeys) // TODO: update the condition below to compare got with tt.want. if got != tt.want { t.Errorf("GnmiPathToXPath() = %v, want %v", got, tt.want) } }) } } ================================================ FILE: pkg/api/server/options.go ================================================ // © 2024 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package server import ( "crypto/tls" "fmt" "os" "time" grpc_ratelimit "github.com/grpc-ecosystem/go-grpc-middleware/ratelimit" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/juju/ratelimit" "github.com/openconfig/gnmic/pkg/api/utils" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) func (s *gNMIServer) serverOpts() ([]grpc.ServerOption, error) { opts := make([]grpc.ServerOption, 0, 1) credsOpts, err := s.tlsServerOpts() if err != nil { return nil, err } opts = append(opts, credsOpts) if s.config.Keepalive != nil { opts = append(opts, grpc.KeepaliveParams(*s.config.Keepalive)) } if s.config.MaxRecvMsgSize > 0 { opts = append(opts, grpc.MaxRecvMsgSize(s.config.MaxRecvMsgSize)) } if s.config.MaxSendMsgSize > 0 { opts = append(opts, grpc.MaxSendMsgSize(s.config.MaxSendMsgSize)) } if s.config.MaxConcurrentStreams > 0 { opts = append(opts, grpc.MaxConcurrentStreams(s.config.MaxConcurrentStreams)) } opts = append(opts, s.interceptorsOpts()...) return opts, nil } func (s *gNMIServer) interceptorsOpts() []grpc.ServerOption { ui := []grpc.UnaryServerInterceptor{} si := []grpc.StreamServerInterceptor{} if s.reg != nil { grpcMetrics := grpc_prometheus.NewServerMetrics() ui = append(ui, grpcMetrics.UnaryServerInterceptor()) si = append(si, grpcMetrics.StreamServerInterceptor()) s.reg.MustRegister(grpcMetrics) } if s.config.RateLimit > 0 { limiter := &rateLimiterInterceptor{ bucket: ratelimit.NewBucket(time.Second, s.config.RateLimit), } ui = append(ui, grpc_ratelimit.UnaryServerInterceptor(limiter)) si = append(si, grpc_ratelimit.StreamServerInterceptor(limiter)) } return []grpc.ServerOption{ grpc.ChainUnaryInterceptor(ui...), grpc.ChainStreamInterceptor(si...), } } type rateLimiterInterceptor struct { bucket *ratelimit.Bucket } func (r *rateLimiterInterceptor) Limit() bool { return r.bucket.TakeAvailable(1) == 0 } func (s *gNMIServer) tlsServerOpts() (grpc.ServerOption, error) { if s.config.TLS == nil { return grpc.Creds(insecure.NewCredentials()), nil } err := s.config.TLS.Validate() if err != nil { return nil, err } tlsConfig, err := s.createTLSConfig() if err != nil { return nil, err } return grpc.Creds(credentials.NewTLS(tlsConfig)), nil } func (s *gNMIServer) createTLSConfig() (*tls.Config, error) { tlsConfig := &tls.Config{} if s.config.TLS.CertFile == "" && s.config.TLS.KeyFile == "" { cert, _ := utils.SelfSignedCerts() tlsConfig.Certificates = []tls.Certificate{cert} } else { tlsConfig.GetCertificate = s.readCerts } switch s.config.TLS.ClientAuth { default: tlsConfig.ClientAuth = tls.NoClientCert case "request": tlsConfig.ClientAuth = tls.RequestClientCert case "require": tlsConfig.ClientAuth = tls.RequireAnyClientCert case "verify-if-given": tlsConfig.ClientAuth = tls.VerifyClientCertIfGiven case "require-verify": tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert } if len(s.config.TLS.CaFile) != 0 { caCertPool, err := utils.LoadCACertificates(s.config.TLS.CaFile) if err != nil { return nil, err } tlsConfig.ClientCAs = caCertPool } return tlsConfig, nil } func (s *gNMIServer) readCerts(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { now := time.Now() s.cm.Lock() defer s.cm.Unlock() if !now.After(s.lastRead.Add(time.Minute)) && s.cert != nil { return s.cert, nil } cert, err := os.ReadFile(s.config.TLS.CertFile) if err != nil { return nil, fmt.Errorf("failed to read defined cert file: %w", err) } key, err := os.ReadFile(s.config.TLS.KeyFile) if err != nil { return nil, fmt.Errorf("failed to read defined key file: %w", err) } serverCert, err := tls.X509KeyPair(cert, key) if err != nil { return nil, err } s.cert = &serverCert s.lastRead = time.Now() return &serverCert, nil } ================================================ FILE: pkg/api/server/server.go ================================================ // © 2024 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package server import ( "context" "crypto/tls" "io" "log" "net" "strings" "sync" "time" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/types" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sync/semaphore" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/health" healthpb "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/peer" "google.golang.org/grpc/reflection" "google.golang.org/grpc/status" ) type Config struct { // gRPC server address Address string // MaxUnaryRPC defines the max number of inflight // Unary RPCs (Cap, Get, Set,...). // if negative or unset, there is not limit. MaxUnaryRPC int64 // MaxStreamingRPC defines the max number of inflight // streaming RPCs (Subscribe,...). // if negative or unset, there is not limit. MaxStreamingRPC int64 // MaxRecvMsgSize defines the max message // size in bytes the server can receive. // If this is not set, it defaults to 4MB. MaxRecvMsgSize int // MaxSendMsgSize defines the the max message // size in bytes the server can send. // If this is not set, the default is `math.MaxInt32`. MaxSendMsgSize int // MaxConcurrentStreams defines the max number of // concurrent streams to each ServerTransport. MaxConcurrentStreams uint32 // TCPKeepalive set the TCP keepalive time and // interval, if unset it is enabled based on // the protocol used and the OS. // If negative it is disabled. TCPKeepalive time.Duration // Keepalive params Keepalive *keepalive.ServerParameters // enable gRPC Health RPCs HealthEnabled bool // unary RPC request timeout Timeout time.Duration // RPCs rate limit RateLimit int64 // TLS config TLS *types.TLSConfig } type gNMIServer struct { gnmi.UnimplementedGNMIServer config Config logger *log.Logger reg *prometheus.Registry // unarySem *semaphore.Weighted streamSem *semaphore.Weighted // gnmi handlers capabilitiesHandler CapabilitiesHandler getHandler GetHandler setHandler SetHandler subscribeHandler SubscribeHandler // cached certificate cm *sync.Mutex cert *tls.Certificate // certificate last read time lastRead time.Time } // gNMI Handlers type CapabilitiesHandler func(ctx context.Context, req *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) type GetHandler func(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) type SetHandler func(ctx context.Context, req *gnmi.SetRequest) (*gnmi.SetResponse, error) type SubscribeHandler func(req *gnmi.SubscribeRequest, stream gnmi.GNMI_SubscribeServer) error type Option func(*gNMIServer) func defaultCapabilitiesHandlerFunc(ctx context.Context, req *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { return &gnmi.CapabilityResponse{ GNMIVersion: "0.10.0", }, nil } func (c *Config) setDefaults() error { if c.Address == "" { return errors.New("missing address") } if c.Timeout <= 0 { c.Timeout = 2 * time.Minute } return nil } func New(c Config, opts ...Option) (*gNMIServer, error) { err := c.setDefaults() if err != nil { return nil, err } s := &gNMIServer{ config: c, cm: new(sync.Mutex), } if c.MaxUnaryRPC > 0 { s.unarySem = semaphore.NewWeighted(c.MaxUnaryRPC) } if c.MaxStreamingRPC > 0 { s.streamSem = semaphore.NewWeighted(c.MaxStreamingRPC) } for _, o := range opts { o(s) } if s.capabilitiesHandler == nil { s.capabilitiesHandler = defaultCapabilitiesHandlerFunc } return s, nil } func (s *gNMIServer) Start(ctx context.Context) error { var networkType = "tcp" var addr = s.config.Address if indx := strings.Index(addr, "://"); indx > 0 { networkType = addr[:indx] addr = addr[indx+3:] } lc := &net.ListenConfig{ KeepAlive: s.config.TCPKeepalive, } var l net.Listener var err error for { l, err = lc.Listen(ctx, networkType, addr) if err != nil { err = errors.Wrap(err, "cannot listen") s.logger.Print(err) time.Sleep(time.Second) continue } break } opts, err := s.serverOpts() if err != nil { return err } // create a gRPC server object gs := grpc.NewServer(opts...) // register reflection reflection.Register(gs) // register gnmi service to the grpc server gnmi.RegisterGNMIServer(gs, s) if s.config.HealthEnabled { hs := health.NewServer() healthpb.RegisterHealthServer(gs, hs) hs.SetServingStatus("gNMI", healthpb.HealthCheckResponse_SERVING) } s.logger.Printf("starting gRPC server...") err = gs.Serve(l) if err != nil { s.logger.Printf("gRPC serve failed: %v", err) return err } return nil } func (s *gNMIServer) Capabilities(ctx context.Context, req *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { if s.capabilitiesHandler == nil { return nil, status.Errorf(codes.Unimplemented, "method Capabilities not implemented") } ctx, cancel := context.WithTimeout(ctx, s.config.Timeout) defer cancel() err := s.acquireUnarySem(ctx) if err != nil { return nil, err } defer s.releaseUnarySem() return s.capabilitiesHandler(ctx, req) } func (s *gNMIServer) Get(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) { if s.getHandler == nil { return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") } ctx, cancel := context.WithTimeout(ctx, s.config.Timeout) defer cancel() err := s.acquireUnarySem(ctx) if err != nil { return nil, err } defer s.releaseUnarySem() return s.getHandler(ctx, req) } func (s *gNMIServer) Set(ctx context.Context, req *gnmi.SetRequest) (*gnmi.SetResponse, error) { if s.setHandler == nil { return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") } ctx, cancel := context.WithTimeout(ctx, s.config.Timeout) defer cancel() err := s.acquireUnarySem(ctx) if err != nil { return nil, err } defer s.releaseUnarySem() return s.setHandler(ctx, req) } func (s *gNMIServer) Subscribe(stream gnmi.GNMI_SubscribeServer) error { if s.subscribeHandler == nil { return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") } ctx := stream.Context() err := s.acquireStreamSem(ctx) if err != nil { return err } defer s.releaseStreamSem() // pr, _ := peer.FromContext(ctx) s.logger.Printf("received subscribe request from peer %s", pr.Addr) req, err := stream.Recv() switch { case err == io.EOF: return nil case err != nil: return err case req.GetSubscribe() == nil: return status.Errorf(codes.InvalidArgument, "the subscribe request must contain a subscription definition") } return s.subscribeHandler(req, stream) } func (s *gNMIServer) acquireUnarySem(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() default: if s.config.MaxUnaryRPC <= 0 { return nil } return s.unarySem.Acquire(ctx, 1) } } func (s *gNMIServer) releaseUnarySem() { if s.config.MaxUnaryRPC <= 0 { return } s.unarySem.Release(1) } func (s *gNMIServer) acquireStreamSem(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() default: if s.config.MaxStreamingRPC <= 0 { return nil } return s.streamSem.Acquire(ctx, 1) } } func (s *gNMIServer) releaseStreamSem() { if s.config.MaxStreamingRPC <= 0 { return } s.streamSem.Release(1) } // opts func WithLogger(l *log.Logger) func(*gNMIServer) { return func(s *gNMIServer) { s.logger = l } } func WithRegistry(reg *prometheus.Registry) func(*gNMIServer) { return func(s *gNMIServer) { s.reg = reg } } func WithCapabilitiesHandler(h CapabilitiesHandler) func(*gNMIServer) { return func(s *gNMIServer) { s.capabilitiesHandler = h } } func WithGetHandler(h GetHandler) func(*gNMIServer) { return func(s *gNMIServer) { s.getHandler = h } } func WithSetHandler(h SetHandler) func(*gNMIServer) { return func(s *gNMIServer) { s.setHandler = h } } func WithSubscribeHandler(h SubscribeHandler) func(*gNMIServer) { return func(s *gNMIServer) { s.subscribeHandler = h } } ================================================ FILE: pkg/api/target/subscribe.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package target import ( "context" "errors" "fmt" "io" "strings" "time" "github.com/jhump/protoreflect/dynamic" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/types" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) // Subscribe sends a gnmi.SubscribeRequest to the target *t, responses and error are sent to the target channels func (t *Target) Subscribe(ctx context.Context, req *gnmi.SubscribeRequest, subscriptionName string) { var subscribeClient gnmi.GNMI_SubscribeClient var nctx context.Context var cancel context.CancelFunc var err error goto SUBSC_NODELAY SUBSC: { retry := time.NewTimer(t.Config.RetryTimer) select { case <-ctx.Done(): retry.Stop() return case <-retry.C: } } SUBSC_NODELAY: select { case <-ctx.Done(): return default: nctx, cancel = context.WithCancel(ctx) nctx = t.appendRequestMetadata(nctx) subscribeClient, err = t.Client.Subscribe(nctx, t.callOpts()...) if err != nil { t.errors <- &TargetError{ SubscriptionName: subscriptionName, Err: fmt.Errorf("failed to create a subscribe client, target='%s', retry in %s. err=%v", t.Config.Name, t.Config.RetryTimer, err), } cancel() goto SUBSC } } t.m.Lock() if cfn, ok := t.subscribeCancelFn[subscriptionName]; ok { cfn() } t.SubscribeClients[subscriptionName] = subscribeClient t.subscribeCancelFn[subscriptionName] = cancel subConfig := t.Subscriptions[subscriptionName] t.m.Unlock() err = subscribeClient.Send(req) if err != nil { select { case t.errors <- &TargetError{ SubscriptionName: subscriptionName, Err: fmt.Errorf("target '%s' send error, retry in %s. err=%v", t.Config.Name, t.Config.RetryTimer, err), }: case <-ctx.Done(): cancel() return } cancel() goto SUBSC } switch req.GetSubscribe().GetMode() { case gnmi.SubscriptionList_STREAM: err = t.handleStreamSubscriptionRcv(nctx, subscribeClient, subscriptionName, subConfig, t.subscribeResponses) if err != nil { select { case t.errors <- &TargetError{ SubscriptionName: subscriptionName, Err: err, }: case <-ctx.Done(): cancel() return } select { case t.errors <- &TargetError{ SubscriptionName: subscriptionName, Err: fmt.Errorf("retrying in %s", t.Config.RetryTimer), }: case <-ctx.Done(): cancel() return } cancel() goto SUBSC } case gnmi.SubscriptionList_ONCE: err = t.handleONCESubscriptionRcv(nctx, subscribeClient, subscriptionName, subConfig, t.subscribeResponses) if err != nil { select { case t.errors <- &TargetError{ SubscriptionName: subscriptionName, Err: err, }: case <-ctx.Done(): cancel() return } if errors.Is(err, io.EOF) { cancel() return } select { case t.errors <- &TargetError{ SubscriptionName: subscriptionName, Err: fmt.Errorf("retrying in %s", t.Config.RetryTimer), }: case <-ctx.Done(): cancel() return } cancel() goto SUBSC } cancel() return case gnmi.SubscriptionList_POLL: go t.listenPolls(nctx) err = t.handlePollSubscriptionRcv(nctx, subscribeClient, subscriptionName, subConfig, t.subscribeResponses) if err != nil { select { case t.errors <- &TargetError{ SubscriptionName: subscriptionName, Err: err, }: case <-ctx.Done(): cancel() return } cancel() goto SUBSC } } cancel() } func (t *Target) SubscribeChan(ctx context.Context, req *gnmi.SubscribeRequest, subscriptionName string) (chan *SubscribeResponse, chan *TargetError) { responseCh := make(chan *SubscribeResponse, 1) errCh := make(chan *TargetError, 1) go func() { defer close(responseCh) defer close(errCh) firstAttempt := true for { // retry delay, skipped the first attempt if !firstAttempt { timer := time.NewTimer(t.Config.RetryTimer) select { case <-ctx.Done(): timer.Stop() return case <-timer.C: } } firstAttempt = false // check if parent context is done if ctx.Err() != nil { return } // attempt subscription // return true if retry is needed shouldRetry := t.attemptSubscription(ctx, req, subscriptionName, responseCh, errCh) if !shouldRetry { return } } }() return responseCh, errCh } func (t *Target) attemptSubscription(ctx context.Context, req *gnmi.SubscribeRequest, subscriptionName string, responseCh chan *SubscribeResponse, errCh chan *TargetError) bool { // create child context for this attempt nctx, cancel := context.WithCancel(ctx) defer cancel() nctx = t.appendRequestMetadata(nctx) // create subscribe client subscribeClient, err := t.Client.Subscribe(nctx, t.callOpts()...) if err != nil { // check if cancellation was intentional if isCancellationError(err) { return false } sendError(errCh, ctx, subscriptionName, err) return true } // store subscription state and register cleanup t.m.Lock() if oldCancel, ok := t.subscribeCancelFn[subscriptionName]; ok { oldCancel() // cancel previous attempt } t.SubscribeClients[subscriptionName] = subscribeClient t.subscribeCancelFn[subscriptionName] = cancel subConfig := t.Subscriptions[subscriptionName] t.m.Unlock() // cleanup on exit (registered after state is stored) defer t.StopSubscription(subscriptionName) // send initial subscribe request err = subscribeClient.Send(req) if err != nil { sendError(errCh, ctx, subscriptionName, fmt.Errorf("target '%s' send error, retry in %s: %w", t.Config.Name, t.Config.RetryTimer, err)) return true } // handle subscription based on mode switch req.GetSubscribe().GetMode() { case gnmi.SubscriptionList_STREAM: return t.handleSTREAMMode(nctx, ctx, subscribeClient, subscriptionName, subConfig, responseCh, errCh) case gnmi.SubscriptionList_ONCE: return t.handleONCEMode(nctx, ctx, subscribeClient, subscriptionName, subConfig, responseCh, errCh) case gnmi.SubscriptionList_POLL: return t.handlePOLLMode(nctx, ctx, subscribeClient, subscriptionName, subConfig, responseCh, errCh) } return false } func (t *Target) handleSTREAMMode(nctx, ctx context.Context, client gnmi.GNMI_SubscribeClient, subscriptionName string, subConfig *types.SubscriptionConfig, responseCh chan *SubscribeResponse, errCh chan *TargetError) bool { err := t.handleStreamSubscriptionRcv(nctx, client, subscriptionName, subConfig, responseCh) if err != nil { if isCancellationError(err) { return false } sendError(errCh, ctx, subscriptionName, err) return true } return false } func (t *Target) handleONCEMode(nctx, ctx context.Context, client gnmi.GNMI_SubscribeClient, subscriptionName string, subConfig *types.SubscriptionConfig, responseCh chan *SubscribeResponse, errCh chan *TargetError) bool { err := t.handleONCESubscriptionRcv(nctx, client, subscriptionName, subConfig, responseCh) if err != nil { if isCancellationError(err) { return false } sendError(errCh, ctx, subscriptionName, err) // ONCE mode doesn't retry on EOF if errors.Is(err, io.EOF) { return false } return true } return false } func (t *Target) handlePOLLMode(nctx, ctx context.Context, client gnmi.GNMI_SubscribeClient, subscriptionName string, subConfig *types.SubscriptionConfig, responseCh chan *SubscribeResponse, errCh chan *TargetError) bool { // Start poll listener once per target (not per subscription attempt) // This prevents goroutine leaks on retry t.m.Lock() if t.pollChan == nil { t.pollChan = make(chan string, 10) go t.listenPolls(ctx) // Use parent context, not nctx } t.m.Unlock() err := t.handlePollSubscriptionRcv(nctx, client, subscriptionName, subConfig, responseCh) if err != nil { if isCancellationError(err) { return false } sendError(errCh, ctx, subscriptionName, err) // sendError(errCh, ctx, subscriptionName, // fmt.Errorf("retrying in %s", t.Config.RetryTimer)) return true } return false } // check if error is due to intentional cancellation func isCancellationError(err error) bool { if errors.Is(err, context.Canceled) { return true } st, ok := status.FromError(err) return ok && st.Code() == codes.Canceled } // send error to channel with context awareness func sendError(errCh chan *TargetError, ctx context.Context, subscriptionName string, err error) bool { select { case errCh <- &TargetError{ SubscriptionName: subscriptionName, Err: err, }: return true case <-ctx.Done(): return false } } func (t *Target) SubscribeStreamChan(ctx context.Context, req *gnmi.SubscribeRequest, subscriptionName string) (chan *gnmi.SubscribeResponse, chan error) { responseCh := make(chan *gnmi.SubscribeResponse) errCh := make(chan error) go func() { if req.GetSubscribe().GetMode() != gnmi.SubscriptionList_STREAM { errCh <- fmt.Errorf("subscribe request does not define a STREAM subscription: %v", req.GetSubscribe().GetMode()) close(errCh) close(responseCh) return } var subscribeClient gnmi.GNMI_SubscribeClient var nctx context.Context var cancel context.CancelFunc var err error goto SUBSC_NODELAY SUBSC: { retry := time.NewTimer(t.Config.RetryTimer) select { case <-ctx.Done(): retry.Stop() return case <-retry.C: } } SUBSC_NODELAY: select { case <-ctx.Done(): return default: nctx, cancel = context.WithCancel(ctx) defer cancel() nctx = t.appendRequestMetadata(nctx) subscribeClient, err = t.Client.Subscribe(nctx, t.callOpts()...) if err != nil { errCh <- fmt.Errorf("failed to create a subscribe client, target='%s', retry in %s. err=%v", t.Config.Name, t.Config.RetryTimer, err) cancel() goto SUBSC } } t.m.Lock() if cfn, ok := t.subscribeCancelFn[subscriptionName]; ok { cfn() } t.SubscribeClients[subscriptionName] = subscribeClient t.subscribeCancelFn[subscriptionName] = cancel t.m.Unlock() err = subscribeClient.Send(req) if err != nil { errCh <- fmt.Errorf("target '%s' send error, retry in %s. err=%v", t.Config.Name, t.Config.RetryTimer, err) cancel() goto SUBSC } for { if ctx.Err() != nil { errCh <- ctx.Err() cancel() goto SUBSC } response, err := subscribeClient.Recv() if err != nil { errCh <- err cancel() goto SUBSC } responseCh <- response } }() return responseCh, errCh } func (t *Target) SubscribeOnceChan(ctx context.Context, req *gnmi.SubscribeRequest) (chan *gnmi.SubscribeResponse, chan error) { responseCh := make(chan *gnmi.SubscribeResponse) errCh := make(chan error) go func() { nctx, cancel := context.WithCancel(ctx) defer cancel() nctx = t.appendRequestMetadata(nctx) subscribeClient, err := t.Client.Subscribe(nctx, t.callOpts()...) if err != nil { errCh <- err return } err = subscribeClient.Send(req) if err != nil { errCh <- err return } for { response, err := subscribeClient.Recv() if err != nil { errCh <- err return } responseCh <- response } }() return responseCh, errCh } func (t *Target) SubscribeOnce(ctx context.Context, req *gnmi.SubscribeRequest) ([]*gnmi.SubscribeResponse, error) { responses := make([]*gnmi.SubscribeResponse, 0) rspChan, errChan := t.SubscribeOnceChan(ctx, req) LOOP: for { select { case r := <-rspChan: switch r.Response.(type) { case *gnmi.SubscribeResponse_Update: responses = append(responses, r) case *gnmi.SubscribeResponse_SyncResponse: break LOOP } case err := <-errChan: // only non nil errors if err == io.EOF { break LOOP } return nil, err } } return responses, nil } func (t *Target) SubscribePoll(ctx context.Context, subName string) error { t.m.Lock() stream, ok := t.SubscribeClients[subName] t.m.Unlock() if !ok { return fmt.Errorf("unknown subscription name %q", subName) } return stream.Send(&gnmi.SubscribeRequest{ Request: &gnmi.SubscribeRequest_Poll{ Poll: new(gnmi.Poll), }, }) } func (t *Target) ReadSubscriptions() (chan *SubscribeResponse, chan *TargetError) { return t.subscribeResponses, t.errors } func (t *Target) NumberOfOnceSubscriptions() int { num := 0 t.m.Lock() defer t.m.Unlock() for _, sub := range t.Subscriptions { if strings.ToUpper(sub.Mode) == "ONCE" { num++ } } return num } func (t *Target) DecodeProtoBytes(resp *gnmi.SubscribeResponse) error { if t.RootDesc == nil { return nil } switch resp := resp.Response.(type) { case *gnmi.SubscribeResponse_Update: for _, update := range resp.Update.Update { switch update.Val.Value.(type) { case *gnmi.TypedValue_ProtoBytes: m := dynamic.NewMessage(t.RootDesc.GetFile().FindMessage("Nokia.SROS.root")) err := m.Unmarshal(update.Val.GetProtoBytes()) if err != nil { return err } jsondata, err := m.MarshalJSON() if err != nil { return err } update.Val.Value = &gnmi.TypedValue_JsonVal{JsonVal: jsondata} } } } return nil } func (t *Target) DeleteSubscription(name string) { t.m.Lock() defer t.m.Unlock() if _, ok := t.subscribeCancelFn[name]; ok { t.subscribeCancelFn[name]() } delete(t.subscribeCancelFn, name) delete(t.SubscribeClients, name) delete(t.Subscriptions, name) } func (t *Target) StopSubscription(name string) { t.m.Lock() defer t.m.Unlock() cfn, ok := t.subscribeCancelFn[name] if ok { cfn() } delete(t.subscribeCancelFn, name) delete(t.SubscribeClients, name) } func (t *Target) listenPolls(ctx context.Context) { for { select { case subName := <-t.pollChan: err := t.SubscribePoll(ctx, subName) if err != nil { t.errors <- &TargetError{ SubscriptionName: subName, Err: fmt.Errorf("failed to send PollRequest to subscription %s: %v", subName, err), } } case <-ctx.Done(): return } } } func (t *Target) handleStreamSubscriptionRcv(ctx context.Context, stream gnmi.GNMI_SubscribeClient, subscriptionName string, subConfig *types.SubscriptionConfig, ch chan *SubscribeResponse) error { for { if ctx.Err() != nil { return nil } response, err := stream.Recv() if err != nil { return err } select { case ch <- &SubscribeResponse{ SubscriptionName: subscriptionName, SubscriptionConfig: subConfig, Response: response, }: case <-ctx.Done(): return nil } } } func (t *Target) handleONCESubscriptionRcv(ctx context.Context, stream gnmi.GNMI_SubscribeClient, subscriptionName string, subConfig *types.SubscriptionConfig, ch chan *SubscribeResponse) error { for { if ctx.Err() != nil { return nil } response, err := stream.Recv() if err != nil { return err } select { case <-ctx.Done(): case ch <- &SubscribeResponse{ SubscriptionName: subscriptionName, SubscriptionConfig: subConfig, Response: response, }: } switch response.Response.(type) { case *gnmi.SubscribeResponse_SyncResponse: return nil } } } func (t *Target) handlePollSubscriptionRcv(ctx context.Context, stream gnmi.GNMI_SubscribeClient, subscriptionName string, subConfig *types.SubscriptionConfig, ch chan *SubscribeResponse) error { for { select { case <-ctx.Done(): return nil default: response, err := stream.Recv() if err != nil { return err } select { case <-ctx.Done(): return nil case ch <- &SubscribeResponse{ SubscriptionName: subscriptionName, SubscriptionConfig: subConfig, Response: response, }: } } } } ================================================ FILE: pkg/api/target/target.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package target import ( "context" "encoding/base64" "fmt" "net" "strings" "sync" "github.com/jhump/protoreflect/desc" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmi/proto/gnmi_ext" "github.com/openconfig/gnmic/pkg/api/types" "golang.org/x/net/proxy" "golang.org/x/oauth2" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/oauth" "google.golang.org/grpc/metadata" ) type TargetError struct { SubscriptionName string Err error } // SubscribeResponse // type SubscribeResponse struct { SubscriptionName string SubscriptionConfig *types.SubscriptionConfig Response *gnmi.SubscribeResponse } // Target represents a gNMI enabled box type Target struct { Config *types.TargetConfig `json:"config,omitempty"` Subscriptions map[string]*types.SubscriptionConfig `json:"subscriptions,omitempty"` m *sync.Mutex conn *grpc.ClientConn Client gnmi.GNMIClient `json:"-"` SubscribeClients map[string]gnmi.GNMI_SubscribeClient `json:"-"` // subscription name to subscribeClient subscribeCancelFn map[string]context.CancelFunc pollChan chan string // subscription name to be polled subscribeResponses chan *SubscribeResponse errors chan *TargetError stopped bool StopChan chan struct{} `json:"-"` Cfn context.CancelFunc `json:"-"` RootDesc desc.Descriptor `json:"-"` } // NewTarget // func NewTarget(c *types.TargetConfig) *Target { t := &Target{ Config: c, Subscriptions: make(map[string]*types.SubscriptionConfig), m: new(sync.Mutex), SubscribeClients: make(map[string]gnmi.GNMI_SubscribeClient), subscribeCancelFn: make(map[string]context.CancelFunc), pollChan: make(chan string), subscribeResponses: make(chan *SubscribeResponse, c.BufferSize), errors: make(chan *TargetError, c.BufferSize), StopChan: make(chan struct{}), } return t } // CreateGNMIClient // func (t *Target) CreateGNMIClient(ctx context.Context, opts ...grpc.DialOption) error { tOpts, err := t.Config.GrpcDialOptions() if err != nil { return err } opts = append(opts, tOpts...) // create a gRPC connection addrs := strings.Split(t.Config.Address, ",") numAddrs := len(addrs) errC := make(chan error, numAddrs) connC := make(chan *grpc.ClientConn) done := make(chan struct{}) ctx, cancel := context.WithCancel(ctx) defer cancel() for _, addr := range addrs { go func(addr string) { // copy opts optsCopy := make([]grpc.DialOption, len(opts)) copy(optsCopy, opts) timeoutCtx, cancel := context.WithTimeout(ctx, t.Config.Timeout) defer cancel() // add the local custom dialer only if the target is a not tunneled. if t.Config.TunnelTargetType == "" { optsCopy = append(optsCopy, grpc.WithContextDialer(t.createDialer(addr))) } conn, err := grpc.DialContext(timeoutCtx, addr, optsCopy...) if err != nil { errC <- fmt.Errorf("%s: %v", addr, err) return } select { case connC <- conn: case <-done: if conn != nil { conn.Close() } } }(addr) } errs := make([]string, 0, numAddrs) for { select { case conn := <-connC: close(done) t.conn = conn t.Client = gnmi.NewGNMIClient(conn) return nil case err := <-errC: errs = append(errs, err.Error()) if len(errs) == numAddrs { return fmt.Errorf("%s", strings.Join(errs, ", ")) } } } } func (t *Target) createDialer(addr string) func(context.Context, string) (net.Conn, error) { // socks5 proxy dialer if t.Config.Proxy != "" { if idx := strings.Index(t.Config.Proxy, "://"); idx >= 0 { proxyType := t.Config.Proxy[:idx] proxyAddress := t.Config.Proxy[idx+3:] if proxyType == "socks5" { return t.createProxyDialer(proxyAddress) } } } // non socks5 proxy or non-proxied dialer return t.createCustomDialer(addr) } func (t *Target) createProxyDialer(addr string) func(context.Context, string) (net.Conn, error) { return func(_ context.Context, targetAddr string) (net.Conn, error) { dialer, err := proxy.SOCKS5("tcp", addr, nil, &net.Dialer{ Timeout: t.Config.Timeout, KeepAlive: t.Config.TCPKeepalive, }, ) if err != nil { return nil, err } return dialer.Dial("tcp", targetAddr) } } func (t *Target) createCustomDialer(addr string) func(context.Context, string) (net.Conn, error) { return func(ctx context.Context, _ string) (net.Conn, error) { dialer := net.Dialer{ Timeout: t.Config.Timeout, KeepAlive: t.Config.TCPKeepalive, } ctx, cancel := context.WithTimeout(ctx, t.Config.Timeout) defer cancel() var networkType = "tcp" if indx := strings.Index(addr, "://"); indx > 0 { if addr[:indx] == "unix" { networkType = "unix" addr = addr[indx+3:] } } return dialer.DialContext(ctx, networkType, addr) } } func (t *Target) callOpts() []grpc.CallOption { if t.Config.AuthScheme == "" { return nil } callOpts := make([]grpc.CallOption, 0, 1) var auth string if t.Config.Username != nil { auth = *t.Config.Username } auth += ":" if t.Config.Password != nil { auth += *t.Config.Password } callOpts = append(callOpts, grpc.PerRPCCredentials( oauth.TokenSource{ TokenSource: oauth2.StaticTokenSource( &oauth2.Token{ AccessToken: base64.StdEncoding.EncodeToString([]byte(auth)), TokenType: t.Config.AuthScheme, }, ), }, )) return callOpts } func (t *Target) appendRequestMetadata(ctx context.Context) context.Context { ctx = t.appendCredentials(ctx) ctx = t.appendMetadata(ctx) return ctx } func (t *Target) appendCredentials(ctx context.Context) context.Context { if t.Config.AuthScheme != "" { return ctx } if t.Config.Username != nil && *t.Config.Username != "" { ctx = metadata.AppendToOutgoingContext(ctx, "username", *t.Config.Username) } if t.Config.Password != nil && *t.Config.Password != "" { ctx = metadata.AppendToOutgoingContext(ctx, "password", *t.Config.Password) } return ctx } func (t *Target) appendMetadata(ctx context.Context) context.Context { var pairs []string for k, v := range t.Config.Metadata { pairs = append(pairs, k, v) } return metadata.AppendToOutgoingContext(ctx, pairs...) } // Capabilities sends a gnmi.CapabilitiesRequest to the target *t and returns a gnmi.CapabilitiesResponse and an error func (t *Target) Capabilities(ctx context.Context, ext ...*gnmi_ext.Extension) (*gnmi.CapabilityResponse, error) { return t.Client.Capabilities(t.appendRequestMetadata(ctx), &gnmi.CapabilityRequest{Extension: ext}, t.callOpts()...) } // Get sends a gnmi.GetRequest to the target *t and returns a gnmi.GetResponse and an error func (t *Target) Get(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) { return t.Client.Get(t.appendRequestMetadata(ctx), req, t.callOpts()...) } // Set sends a gnmi.SetRequest to the target *t and returns a gnmi.SetResponse and an error func (t *Target) Set(ctx context.Context, req *gnmi.SetRequest) (*gnmi.SetResponse, error) { return t.Client.Set(t.appendRequestMetadata(ctx), req, t.callOpts()...) } func (t *Target) StopSubscriptions() { t.m.Lock() defer t.m.Unlock() for _, cfn := range t.subscribeCancelFn { cfn() } if t.Cfn != nil { t.Cfn() } if !t.stopped { close(t.StopChan) } t.stopped = true } func (t *Target) Close() error { t.StopSubscriptions() if t.conn != nil { return t.conn.Close() } return nil } // SubscribeClientStates returns current subscription states. // based on the SubscribeClients map. func (t *Target) SubscribeClientStates() map[string]bool { t.m.Lock() defer t.m.Unlock() if len(t.Subscriptions) == 0 { return nil } states := make(map[string]bool, len(t.Subscriptions)) for name := range t.Subscriptions { _, ok := t.SubscribeClients[name] states[name] = ok } return states } func (t *Target) ConnState() string { if t.conn == nil { return "" } return t.conn.GetState().String() } // WaitForConnStateChange blocks until the gRPC connection state changes from // sourceState or ctx is done. Returns true if the state changed, false if // ctx expired. Returns false immediately if conn is nil. func (t *Target) WaitForConnStateChange(ctx context.Context, sourceState connectivity.State) bool { if t.conn == nil { return false } return t.conn.WaitForStateChange(ctx, sourceState) } // ConnectivityState returns the current gRPC connectivity state. // Returns connectivity.Shutdown if the connection is nil. func (t *Target) ConnectivityState() connectivity.State { if t.conn == nil { return connectivity.Shutdown } return t.conn.GetState() } ================================================ FILE: pkg/api/target.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package api import ( "crypto/tls" "errors" "strings" "time" "github.com/AlekSi/pointer" "github.com/openconfig/gnmic/pkg/api/target" "github.com/openconfig/gnmic/pkg/api/types" ) var DefaultTargetTimeout = 10 * time.Second type TargetOption func(*target.Target) error func NewTarget(opts ...TargetOption) (*target.Target, error) { t := target.NewTarget(&types.TargetConfig{}) var err error for _, o := range opts { err = o(t) if err != nil { return nil, err } } if t.Config.Address == "" { return nil, errors.New("missing address") } if t.Config.Name == "" { t.Config.Name = strings.Split(t.Config.Address, ",")[0] } if t.Config.Timeout == 0 { t.Config.Timeout = DefaultTargetTimeout } if t.Config.Insecure == nil && t.Config.SkipVerify == nil { t.Config.Insecure = pointer.ToBool(false) t.Config.SkipVerify = pointer.ToBool(false) } if t.Config.SkipVerify == nil { t.Config.SkipVerify = pointer.ToBool(false) } if t.Config.Insecure == nil { t.Config.Insecure = pointer.ToBool(false) } return t, nil } // Name sets the target name. func Name(name string) TargetOption { return func(t *target.Target) error { t.Config.Name = name return nil } } // Address sets the target address. // This Option can be set multiple times. func Address(addr string) TargetOption { return func(t *target.Target) error { if t.Config.Address != "" { t.Config.Address = strings.Join([]string{t.Config.Address, addr}, ",") return nil } t.Config.Address = addr return nil } } // Username sets the target Username. func Username(username string) TargetOption { return func(t *target.Target) error { t.Config.Username = pointer.ToString(username) return nil } } // Password sets the target Password. func Password(password string) TargetOption { return func(t *target.Target) error { t.Config.Password = pointer.ToString(password) return nil } } // Timeout sets the gNMI client creation timeout. func Timeout(timeout time.Duration) TargetOption { return func(t *target.Target) error { t.Config.Timeout = timeout return nil } } // Insecure sets the option to create a gNMI client with an // insecure gRPC connection func Insecure(i bool) TargetOption { return func(t *target.Target) error { t.Config.Insecure = pointer.ToBool(i) return nil } } // SkipVerify sets the option to create a gNMI client with a // secure gRPC connection without verifying the target's certificates. func SkipVerify(i bool) TargetOption { return func(t *target.Target) error { t.Config.SkipVerify = pointer.ToBool(i) return nil } } // TLSCA sets that path towards the TLS certificate authority file. func TLSCA(tlsca string) TargetOption { return func(t *target.Target) error { t.Config.TLSCA = pointer.ToString(tlsca) return nil } } // TLSCert sets that path towards the TLS certificate file. func TLSCert(cert string) TargetOption { return func(t *target.Target) error { t.Config.TLSCert = pointer.ToString(cert) return nil } } // TLSKey sets that path towards the TLS key file. func TLSKey(key string) TargetOption { return func(t *target.Target) error { t.Config.TLSKey = pointer.ToString(key) return nil } } // TLSMinVersion sets the TLS minimum version used during the TLS handshake. func TLSMinVersion(v string) TargetOption { return func(t *target.Target) error { t.Config.TLSMinVersion = v return nil } } // TLSMaxVersion sets the TLS maximum version used during the TLS handshake. func TLSMaxVersion(v string) TargetOption { return func(t *target.Target) error { t.Config.TLSMaxVersion = v return nil } } // TLSVersion sets the desired TLS version used during the TLS handshake. func TLSVersion(v string) TargetOption { return func(t *target.Target) error { t.Config.TLSVersion = v return nil } } // TLSConfig func TLSConfig(tlsconfig *tls.Config) TargetOption { return func(t *target.Target) error { t.Config.SetTLSConfig(tlsconfig) return nil } } // LogTLSSecret, if set to true, // enables logging of the TLS master key. func LogTLSSecret(b bool) TargetOption { return func(t *target.Target) error { t.Config.LogTLSSecret = pointer.ToBool(b) return nil } } // Gzip, if set to true, // adds gzip compression to the gRPC connection. func Gzip(b bool) TargetOption { return func(t *target.Target) error { t.Config.Gzip = pointer.ToBool(b) return nil } } // Token sets the per RPC credentials for all RPC calls. func Token(token string) TargetOption { return func(t *target.Target) error { t.Config.Token = pointer.ToString(token) return nil } } ================================================ FILE: pkg/api/target_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package api import ( "testing" "github.com/AlekSi/pointer" "github.com/openconfig/gnmic/pkg/api/types" ) type input struct { opts []TargetOption config *types.TargetConfig } var targetTestSet = map[string]input{ "address": { opts: []TargetOption{ Address("10.0.0.1:57400"), Insecure(true), }, config: &types.TargetConfig{ Name: "10.0.0.1:57400", Address: "10.0.0.1:57400", Insecure: pointer.ToBool(true), SkipVerify: pointer.ToBool(false), Timeout: DefaultTargetTimeout, }, }, "username": { opts: []TargetOption{ Address("10.0.0.1:57400"), Username("admin"), }, config: &types.TargetConfig{ Name: "10.0.0.1:57400", Address: "10.0.0.1:57400", Username: pointer.ToString("admin"), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(false), Timeout: DefaultTargetTimeout, }, }, "two_addresses": { opts: []TargetOption{ Address("10.0.0.1:57400"), Address("10.0.0.2:57400"), Insecure(true), }, config: &types.TargetConfig{ Name: "10.0.0.1:57400", Address: "10.0.0.1:57400,10.0.0.2:57400", Insecure: pointer.ToBool(true), SkipVerify: pointer.ToBool(false), Timeout: DefaultTargetTimeout, }, }, "skip_verify": { opts: []TargetOption{ Address("10.0.0.1:57400"), SkipVerify(true), }, config: &types.TargetConfig{ Name: "10.0.0.1:57400", Address: "10.0.0.1:57400", Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(true), Timeout: DefaultTargetTimeout, }, }, "tlsca": { opts: []TargetOption{ Address("10.0.0.1:57400"), TLSCA("tlsca_path"), }, config: &types.TargetConfig{ Name: "10.0.0.1:57400", Address: "10.0.0.1:57400", Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(false), Timeout: DefaultTargetTimeout, TLSCA: pointer.ToString("tlsca_path"), }, }, "tls_key_cert": { opts: []TargetOption{ Address("10.0.0.1:57400"), TLSKey("tlskey_path"), TLSCert("tlscert_path"), }, config: &types.TargetConfig{ Name: "10.0.0.1:57400", Address: "10.0.0.1:57400", Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(false), Timeout: DefaultTargetTimeout, TLSKey: pointer.ToString("tlskey_path"), TLSCert: pointer.ToString("tlscert_path"), }, }, "token": { opts: []TargetOption{ Address("10.0.0.1:57400"), Token("token_value"), }, config: &types.TargetConfig{ Name: "10.0.0.1:57400", Address: "10.0.0.1:57400", Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(false), Timeout: DefaultTargetTimeout, Token: pointer.ToString("token_value"), }, }, "gzip": { opts: []TargetOption{ Address("10.0.0.1:57400"), Gzip(true), }, config: &types.TargetConfig{ Name: "10.0.0.1:57400", Address: "10.0.0.1:57400", Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(false), Timeout: DefaultTargetTimeout, Gzip: pointer.ToBool(true), }, }, } func TestNewTarget(t *testing.T) { for name, item := range targetTestSet { t.Run(name, func(t *testing.T) { tg, err := NewTarget(item.opts...) if err != nil { t.Errorf("failed at %q: %v", name, err) t.Fail() } if tg.Config.String() != item.config.String() { t.Errorf("failed at %q", name) t.Errorf("expected %+v", item.config) t.Errorf(" got %+v", tg.Config) t.Fail() } }) } } ================================================ FILE: pkg/api/testutils/utils.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package testutils import ( "bytes" "github.com/google/go-cmp/cmp" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmi/proto/gnmi_ext" tpb "github.com/openconfig/grpctunnel/proto/tunnel" ) func CapabilitiesResponsesEqual(rsp1, rsp2 *gnmi.CapabilityResponse) bool { if rsp1 == nil && rsp2 == nil { return true } if rsp1 == nil || rsp2 == nil { return false } if rsp1.GNMIVersion != rsp2.GNMIVersion { return false } if len(rsp1.SupportedEncodings) != len(rsp2.SupportedEncodings) { return false } if len(rsp1.SupportedModels) != len(rsp2.SupportedModels) { return false } for i := range rsp1.SupportedEncodings { if rsp1.SupportedEncodings[i] != rsp2.SupportedEncodings[i] { return false } } for i := range rsp1.SupportedModels { if !cmp.Equal(rsp1.SupportedModels[i], rsp2.SupportedModels[i]) { return false } } return true } func GetRequestsEqual(req1, req2 *gnmi.GetRequest) bool { if req1 == nil && req2 == nil { return true } if req1 == nil || req2 == nil { return false } if req1.Encoding != req2.Encoding || req1.Type != req2.Type { return false } if !GnmiPathsEqual(req1.Prefix, req2.Prefix) { return false } if len(req1.Path) != len(req2.Path) { return false } for i := range req1.Path { if !GnmiPathsEqual(req1.Path[i], req2.Path[i]) { return false } } if len(req1.Extension) != len(req2.Extension) { return false } if len(req1.UseModels) != len(req2.UseModels) { return false } for i := range req1.UseModels { if req1.UseModels[i].Name != req2.UseModels[i].Name { return false } } return true } func SetRequestsEqual(req1, req2 *gnmi.SetRequest) bool { if req1 == nil && req2 == nil { return true } if req1 == nil || req2 == nil { return false } if len(req1.GetDelete()) != len(req2.GetDelete()) || len(req1.GetReplace()) != len(req2.GetReplace()) || len(req1.GetUpdate()) != len(req2.GetUpdate()) { return false } if !GnmiPathsEqual(req1.GetPrefix(), req2.GetPrefix()) { return false } for i := range req1.GetDelete() { if !GnmiPathsEqual(req1.GetDelete()[i], req2.GetDelete()[i]) { return false } } for i := range req1.GetUpdate() { if !GnmiPathsEqual(req1.GetUpdate()[i].GetPath(), req2.GetUpdate()[i].GetPath()) { return false } if !cmp.Equal(req1.GetUpdate()[i].GetVal().GetValue(), req2.GetUpdate()[i].GetVal().GetValue()) { return false } } for i := range req1.GetReplace() { if !GnmiPathsEqual(req1.GetReplace()[i].GetPath(), req2.GetReplace()[i].GetPath()) { return false } if !cmp.Equal(req1.GetReplace()[i].GetVal().GetValue(), req2.GetReplace()[i].GetVal().GetValue()) { return false } } return true } func SubscribeRequestsEqual(req1, req2 *gnmi.SubscribeRequest) bool { if req1 == nil && req2 == nil { return true } if req1 == nil || req2 == nil { return false } if len(req1.GetExtension()) != len(req2.GetExtension()) { return false } // only checks if extensions are of the same type for i, ext := range req1.GetExtension() { switch ext.Ext.(type) { case *gnmi_ext.Extension_RegisteredExt: switch req2.GetExtension()[i].Ext.(type) { case *gnmi_ext.Extension_RegisteredExt: default: return false } case *gnmi_ext.Extension_History: switch req2.GetExtension()[i].Ext.(type) { case *gnmi_ext.Extension_History: default: return false } case *gnmi_ext.Extension_MasterArbitration: switch req2.GetExtension()[i].Ext.(type) { case *gnmi_ext.Extension_MasterArbitration: default: return false } } } switch req1.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: switch req2.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: default: return false } case *gnmi.SubscribeRequest_Poll: switch req2.Request.(type) { case *gnmi.SubscribeRequest_Poll: default: return false } } // compare subscribe request subscribe switch req1 := req1.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: switch req2 := req2.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: if req1.Subscribe.GetEncoding() != req2.Subscribe.GetEncoding() { return false } if req1.Subscribe.GetMode() != req2.Subscribe.GetMode() { return false } if req1.Subscribe.GetQos().GetMarking() != req2.Subscribe.GetQos().GetMarking() { return false } if len(req1.Subscribe.GetSubscription()) != len(req2.Subscribe.GetSubscription()) { return false } if req1.Subscribe.GetUpdatesOnly() != req2.Subscribe.GetUpdatesOnly() { return false } if req1.Subscribe.GetAllowAggregation() != req2.Subscribe.GetAllowAggregation() { return false } if !GnmiPathsEqual(req1.Subscribe.Prefix, req2.Subscribe.Prefix) { return false } if len(req1.Subscribe.GetUseModels()) != len(req2.Subscribe.GetUseModels()) { return false } for i := range req1.Subscribe.GetUseModels() { if req1.Subscribe.GetUseModels()[i].Name != req2.Subscribe.GetUseModels()[i].Name { return false } } for i, sub := range req1.Subscribe.GetSubscription() { if !GnmiSubscriptionEqual(sub, req1.Subscribe.GetSubscription()[i]) { return false } } } } return true } func GetResponsesEqual(rsp1, rsp2 *gnmi.GetResponse) bool { if rsp1 == nil && rsp2 == nil { return true } if rsp1 == nil || rsp2 == nil { return false } if len(rsp1.GetNotification()) != len(rsp2.GetNotification()) { return false } for i := range rsp1.GetNotification() { if !GnmiNotificationsEqual(rsp1.GetNotification()[i], rsp2.GetNotification()[i]) { return false } } return true } func SetResponsesEqual(rsp1, rsp2 *gnmi.SetResponse) bool { if rsp1 == nil && rsp2 == nil { return true } if rsp1 == nil || rsp2 == nil { return false } if len(rsp1.GetResponse()) != len(rsp2.GetResponse()) { return false } for i := range rsp1.GetResponse() { if !GnmiUpdateResultEqual(rsp1.GetResponse()[i], rsp2.GetResponse()[i]) { return false } } return true } func SubscribeResponsesEqual(rsp1, rsp2 *gnmi.SubscribeResponse) bool { if rsp1 == nil && rsp2 == nil { return true } if rsp1 == nil || rsp2 == nil { return false } switch rsp1.GetResponse().(type) { case *gnmi.SubscribeResponse_Update: switch rsp2.GetResponse().(type) { case *gnmi.SubscribeResponse_Update: default: return false } case *gnmi.SubscribeResponse_SyncResponse: switch rsp2.GetResponse().(type) { case *gnmi.SubscribeResponse_SyncResponse: default: return false } } switch rsp1 := rsp1.GetResponse().(type) { case *gnmi.SubscribeResponse_Update: switch rsp2 := rsp2.GetResponse().(type) { case *gnmi.SubscribeResponse_Update: return GnmiNotificationsEqual(rsp1.Update, rsp2.Update) } case *gnmi.SubscribeResponse_SyncResponse: switch rsp2 := rsp2.GetResponse().(type) { case *gnmi.SubscribeResponse_SyncResponse: if rsp1.SyncResponse != rsp2.SyncResponse { return false } } } return true } func GnmiPathsEqual(p1, p2 *gnmi.Path) bool { if p1 == nil && p2 == nil { return true } if p1 == nil || p2 == nil { return false } if p1.Origin != p2.Origin { return false } if p1.Target != p2.Target { return false } if len(p1.Elem) != len(p2.Elem) { return false } for i, e := range p1.Elem { if e.Name != p2.Elem[i].Name { return false } if !cmp.Equal(e.Key, p2.Elem[i].Key) { return false } } return true } func GnmiSubscriptionEqual(s1, s2 *gnmi.Subscription) bool { if s1 == nil && s2 != nil { return false } if s1 != nil && s2 == nil { return false } if s1.Mode != s2.Mode { return false } if s1.SampleInterval != s2.SampleInterval { return false } if s1.SuppressRedundant != s2.SuppressRedundant { return false } if !GnmiPathsEqual(s1.Path, s2.Path) { return false } return true } func GnmiUpdatesEqual(u1, u2 *gnmi.Update) bool { if u1 == nil && u2 == nil { return true } if u1 == nil || u2 == nil { return false } if u1.GetDuplicates() != u2.GetDuplicates() { return false } if !GnmiPathsEqual(u1.GetPath(), u2.GetPath()) { return false } return cmp.Equal(u1.GetVal().GetValue(), u2.GetVal().GetValue()) } func GnmiNotificationsEqual(n1, n2 *gnmi.Notification) bool { if n1.GetAtomic() != n2.GetAtomic() { return false } // compare timestamps if n1.GetTimestamp() != n2.GetTimestamp() { return false } // compare prefixes if !GnmiPathsEqual(n1.GetPrefix(), n2.GetPrefix()) { return false } // compare updates for j := range n1.GetUpdate() { if !GnmiUpdatesEqual(n1.GetUpdate()[j], n2.GetUpdate()[j]) { return false } } // compare deletes for j := range n1.GetDelete() { if !GnmiPathsEqual(n1.GetDelete()[j], n2.GetDelete()[j]) { return false } } return true } func GnmiUpdateResultEqual(u1, u2 *gnmi.UpdateResult) bool { if u1 == nil && u2 == nil { return true } if u1 == nil || u2 == nil { return false } if u1.GetOp() != u2.GetOp() { return false } if !GnmiPathsEqual(u1.GetPath(), u2.GetPath()) { return false } return true } func GnmiValuesEqual(v1, v2 *gnmi.TypedValue) bool { if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } switch v1 := v1.GetValue().(type) { case *gnmi.TypedValue_AnyVal: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_AnyVal: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } if v1.AnyVal == nil && v2.AnyVal == nil { return true } if v1.AnyVal == nil || v2.AnyVal == nil { return false } if v1.AnyVal.GetTypeUrl() != v2.AnyVal.GetTypeUrl() { return false } return bytes.Equal(v1.AnyVal.GetValue(), v2.AnyVal.GetValue()) default: return false } case *gnmi.TypedValue_AsciiVal: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_AsciiVal: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } return v1.AsciiVal == v2.AsciiVal default: return false } case *gnmi.TypedValue_BoolVal: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_BoolVal: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } return v1.BoolVal == v2.BoolVal default: return false } case *gnmi.TypedValue_BytesVal: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_BytesVal: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } return bytes.Equal(v1.BytesVal, v2.BytesVal) default: return false } case *gnmi.TypedValue_DecimalVal: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_DecimalVal: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } //lint:ignore SA1019 still need DecimalVal for backward compatibility if v1.DecimalVal.GetDigits() != v2.DecimalVal.GetDigits() { return false } //lint:ignore SA1019 still need DecimalVal for backward compatibility return v1.DecimalVal.GetPrecision() == v2.DecimalVal.GetPrecision() default: return false } case *gnmi.TypedValue_FloatVal: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_FloatVal: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } //lint:ignore SA1019 still need FloatVal for backward compatibility return v1.FloatVal == v2.FloatVal default: return false } case *gnmi.TypedValue_IntVal: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_IntVal: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } return v1.IntVal == v2.IntVal default: return false } case *gnmi.TypedValue_JsonIetfVal: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_JsonIetfVal: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } return bytes.Equal(v1.JsonIetfVal, v2.JsonIetfVal) default: return false } case *gnmi.TypedValue_JsonVal: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_JsonVal: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } return bytes.Equal(v1.JsonVal, v2.JsonVal) default: return false } case *gnmi.TypedValue_LeaflistVal: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_LeaflistVal: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } if len(v1.LeaflistVal.GetElement()) != len(v2.LeaflistVal.GetElement()) { return false } for i := range v1.LeaflistVal.GetElement() { if !GnmiValuesEqual(v1.LeaflistVal.Element[i], v2.LeaflistVal.Element[i]) { return false } } default: return false } case *gnmi.TypedValue_ProtoBytes: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_ProtoBytes: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } return bytes.Equal(v1.ProtoBytes, v2.ProtoBytes) default: return false } case *gnmi.TypedValue_StringVal: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_StringVal: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } return v1.StringVal == v2.StringVal default: return false } case *gnmi.TypedValue_UintVal: switch v2 := v2.GetValue().(type) { case *gnmi.TypedValue_UintVal: if v1 == nil && v2 == nil { return true } if v1 == nil || v2 == nil { return false } return v1.UintVal == v2.UintVal default: return false } } return true } func RegisterOpEqual(r1, r2 *tpb.RegisterOp) bool { if r1 == nil && r2 == nil { return true } if r1 == nil || r2 == nil { return false } switch r1 := r1.GetRegistration().(type) { case *tpb.RegisterOp_Target: switch r2 := r2.GetRegistration().(type) { case *tpb.RegisterOp_Target: if r1.Target.GetAccept() != r2.Target.GetAccept() { return false } if r1.Target.GetOp() != r2.Target.GetOp() { return false } if r1.Target.GetTarget() != r2.Target.GetTarget() { return false } if r1.Target.GetError() != r2.Target.GetError() { return false } if r1.Target.GetTargetType() != r2.Target.GetTargetType() { return false } default: return false } case *tpb.RegisterOp_Session: switch r2 := r2.GetRegistration().(type) { case *tpb.RegisterOp_Session: if r1.Session.GetAccept() != r2.Session.GetAccept() { return false } if r1.Session.GetTarget() != r2.Session.GetTarget() { return false } if r1.Session.GetError() != r2.Session.GetError() { return false } if r1.Session.GetTargetType() != r2.Session.GetTargetType() { return false } if r1.Session.GetTag() != r2.Session.GetTag() { return false } default: return false } case *tpb.RegisterOp_Subscription: switch r2 := r2.GetRegistration().(type) { case *tpb.RegisterOp_Subscription: if r1.Subscription.GetAccept() != r2.Subscription.GetAccept() { return false } if r1.Subscription.GetOp() != r2.Subscription.GetOp() { return false } if r1.Subscription.GetError() != r2.Subscription.GetError() { return false } if r1.Subscription.GetTargetType() != r2.Subscription.GetTargetType() { return false } default: return false } } return true } func TunnelDataEqual(r1, r2 *tpb.Data) bool { if r1 == nil && r2 == nil { return true } if r1 == nil || r2 == nil { return false } if r1.GetClose() != r2.GetClose() { return false } if !bytes.Equal(r1.GetData(), r2.GetData()) { return false } if r1.GetTag() != r2.GetTag() { return false } return true } ================================================ FILE: pkg/api/tunnel.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package api import ( "fmt" tpb "github.com/openconfig/grpctunnel/proto/tunnel" "google.golang.org/protobuf/proto" ) // TunnelOption is a function that acts on the supplied proto.Message. // The message is expected to be one of the protobuf defined gRPC tunnel messages // exchanged by the RPCs or any of the nested messages. type TunnelOption func(proto.Message) error // apply is a helper function that simply applies the options to the proto.Message. // It returns an error if any of the options fails. func applyTunnelOpts(m proto.Message, opts ...TunnelOption) error { for _, o := range opts { if err := o(m); err != nil { return err } } return nil } func NewRegisterOpTarget(opts ...TunnelOption) (*tpb.RegisterOp, error) { m := &tpb.RegisterOp{ Registration: new(tpb.RegisterOp_Target), } err := applyTunnelOpts(m, opts...) if err != nil { return nil, err } return m, nil } func NewRegisterOpSession(opts ...TunnelOption) (*tpb.RegisterOp, error) { m := &tpb.RegisterOp{ Registration: new(tpb.RegisterOp_Session), } err := applyTunnelOpts(m, opts...) if err != nil { return nil, err } return m, nil } func NewRegisterOpSubscription(opts ...TunnelOption) (*tpb.RegisterOp, error) { m := &tpb.RegisterOp{ Registration: new(tpb.RegisterOp_Subscription), } err := applyTunnelOpts(m, opts...) if err != nil { return nil, err } return m, nil } func NewData(opts ...TunnelOption) (*tpb.Data, error) { m := new(tpb.Data) err := applyTunnelOpts(m, opts...) if err != nil { return nil, err } return m, nil } // Messages options func TunnelTarget(opts ...TunnelOption) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.RegisterOp: switch msg := msg.Registration.(type) { case *tpb.RegisterOp_Target: target := new(tpb.Target) err := applyTunnelOpts(target, opts...) if err != nil { return err } msg.Target = target } default: return fmt.Errorf("option TunnelTarget: %w: %T", ErrInvalidMsgType, msg) } return nil } } func TunnelSession(opts ...TunnelOption) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.RegisterOp: switch msg := msg.Registration.(type) { case *tpb.RegisterOp_Session: session := new(tpb.Session) err := applyTunnelOpts(session, opts...) if err != nil { return err } msg.Session = session } default: return fmt.Errorf("option TunnelSession: %w: %T", ErrInvalidMsgType, msg) } return nil } } func TunnelSubscription(opts ...TunnelOption) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.RegisterOp: switch msg := msg.Registration.(type) { case *tpb.RegisterOp_Subscription: subscription := new(tpb.Subscription) err := applyTunnelOpts(subscription, opts...) if err != nil { return err } msg.Subscription = subscription } default: return fmt.Errorf("option TunnelSubscription: %w: %T", ErrInvalidMsgType, msg) } return nil } } // Common Options func TargetOpRemove() func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.Target: msg.Op = tpb.Target_REMOVE default: return fmt.Errorf("option TargetOpRemove: %w: %T", ErrInvalidMsgType, msg) } return nil } } func Accept(b bool) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.Target: msg.Accept = b case *tpb.Session: msg.Accept = b case *tpb.Subscription: msg.Accept = b default: return fmt.Errorf("option Accept: %w: %T", ErrInvalidMsgType, msg) } return nil } } func TargetName(n string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.Target: msg.Target = n case *tpb.Session: msg.Target = n default: return fmt.Errorf("option TargetName: %w: %T", ErrInvalidMsgType, msg) } return nil } } func TargetType(typ string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.Target: msg.TargetType = typ case *tpb.Session: msg.TargetType = typ case *tpb.Subscription: msg.TargetType = typ default: return fmt.Errorf("option TargetType: %w: %T", ErrInvalidMsgType, msg) } return nil } } func Error(e string) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.Target: msg.Error = e case *tpb.Session: msg.Error = e case *tpb.Subscription: msg.Error = e default: return fmt.Errorf("option Error: %w: %T", ErrInvalidMsgType, msg) } return nil } } // Target Options func TargetOpAdd() func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.Target: msg.Op = tpb.Target_ADD default: return fmt.Errorf("option TargetOpAdd: %w: %T", ErrInvalidMsgType, msg) } return nil } } func Tag(t int32) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.Session: msg.Tag = t case *tpb.Data: msg.Tag = t default: return fmt.Errorf("option Tag: %w: %T", ErrInvalidMsgType, msg) } return nil } } // Subscription Options func SubscriptionOpSubscribe() func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.Subscription: msg.Op = tpb.Subscription_SUBCRIBE // default: return fmt.Errorf("option SubscriptionOpSubscribe: %w: %T", ErrInvalidMsgType, msg) } return nil } } func SubscriptionOpUnsubscribe() func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.Subscription: msg.Op = tpb.Subscription_UNSUBCRIBE // default: return fmt.Errorf("option SubscriptionOpUnsubscribe: %w: %T", ErrInvalidMsgType, msg) } return nil } } // Data Options func Data(d []byte) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.Data: msg.Data = d default: return fmt.Errorf("option Data: %w: %T", ErrInvalidMsgType, msg) } return nil } } func Close(b bool) func(msg proto.Message) error { return func(msg proto.Message) error { if msg == nil { return ErrInvalidMsgType } switch msg := msg.ProtoReflect().Interface().(type) { case *tpb.Data: msg.Close = b default: return fmt.Errorf("option Close: %w: %T", ErrInvalidMsgType, msg) } return nil } } ================================================ FILE: pkg/api/tunnel_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package api import ( "errors" "testing" tpb "github.com/openconfig/grpctunnel/proto/tunnel" "github.com/openconfig/gnmic/pkg/api/testutils" ) type registerOpInput struct { opts []TunnelOption msg *tpb.RegisterOp err error } var registerOpTargetTestSet = map[string]registerOpInput{ "target_add": { opts: []TunnelOption{ TunnelTarget( TargetOpAdd(), Accept(true), TargetName("target1"), TargetType("target_type1"), ), }, msg: &tpb.RegisterOp{ Registration: &tpb.RegisterOp_Target{ Target: &tpb.Target{ Op: tpb.Target_ADD, Accept: true, Target: "target1", TargetType: "target_type1", }, }}, err: nil, }, "target_remove": { opts: []TunnelOption{ TunnelTarget( TargetOpRemove(), Accept(true), TargetName("target1"), TargetType("target_type1"), ), }, msg: &tpb.RegisterOp{ Registration: &tpb.RegisterOp_Target{ Target: &tpb.Target{ Op: tpb.Target_REMOVE, Accept: true, Target: "target1", TargetType: "target_type1", }, }}, err: nil, }, "target_error": { opts: []TunnelOption{ TunnelTarget( TargetOpRemove(), Accept(true), TargetName("target1"), TargetType("target_type1"), Error("err1"), ), }, msg: &tpb.RegisterOp{ Registration: &tpb.RegisterOp_Target{ Target: &tpb.Target{ Op: tpb.Target_REMOVE, Accept: true, Target: "target1", TargetType: "target_type1", Error: "err1", }, }}, err: nil, }, "target_nok": { opts: []TunnelOption{ TunnelTarget( Tag(42), Accept(true), TargetName("target1"), TargetType("target_type1"), ), }, msg: nil, err: ErrInvalidMsgType, }, } var registerOpSessionTestSet = map[string]registerOpInput{ "session_ok": { opts: []TunnelOption{ TunnelSession( Tag(42), Accept(true), TargetName("target1"), TargetType("target_type1"), ), }, msg: &tpb.RegisterOp{ Registration: &tpb.RegisterOp_Session{ Session: &tpb.Session{ Tag: 42, Accept: true, Target: "target1", TargetType: "target_type1", }, }}, err: nil, }, "session_nok": { opts: []TunnelOption{ TunnelSession( TargetOpAdd(), Accept(true), TargetName("target1"), TargetType("target_type1"), ), }, msg: nil, err: ErrInvalidMsgType, }, "session_err": { opts: []TunnelOption{ TunnelSession( Tag(42), Accept(true), TargetName("target1"), TargetType("target_type1"), Error("err1"), ), }, msg: &tpb.RegisterOp{ Registration: &tpb.RegisterOp_Session{ Session: &tpb.Session{ Tag: 42, Accept: true, Target: "target1", TargetType: "target_type1", Error: "err1", }, }}, err: nil, }, } var registerOpSubscriptionTestSet = map[string]registerOpInput{ "subscription_op_subscribe": { opts: []TunnelOption{ TunnelSubscription( SubscriptionOpSubscribe(), Accept(true), TargetType("target_type1"), ), }, msg: &tpb.RegisterOp{ Registration: &tpb.RegisterOp_Subscription{ Subscription: &tpb.Subscription{ Op: tpb.Subscription_SUBCRIBE, Accept: true, TargetType: "target_type1", }, }}, err: nil, }, "subscription_op_unsubscribe": { opts: []TunnelOption{ TunnelSubscription( SubscriptionOpUnsubscribe(), Accept(true), TargetType("target_type1"), ), }, msg: &tpb.RegisterOp{ Registration: &tpb.RegisterOp_Subscription{ Subscription: &tpb.Subscription{ Op: tpb.Subscription_UNSUBCRIBE, Accept: true, TargetType: "target_type1", }, }}, err: nil, }, "subscription_nok": { opts: []TunnelOption{ TunnelSubscription( SubscriptionOpSubscribe(), Accept(true), TargetName("target1"), TargetType("target_type1"), ), }, msg: nil, err: ErrInvalidMsgType, }, "subscription_err": { opts: []TunnelOption{ TunnelSubscription( SubscriptionOpUnsubscribe(), Accept(true), TargetType("target_type1"), Error("err1"), ), }, msg: &tpb.RegisterOp{ Registration: &tpb.RegisterOp_Subscription{ Subscription: &tpb.Subscription{ Op: tpb.Subscription_UNSUBCRIBE, Accept: true, TargetType: "target_type1", Error: "err1", }, }}, err: nil, }, } func TestNewRegister(t *testing.T) { for name, item := range registerOpTargetTestSet { t.Run(name, func(t *testing.T) { nreq, err := NewRegisterOpTarget(item.opts...) if err != nil { uerr := errors.Unwrap(err) if !errors.Is(uerr, item.err) { t.Errorf("%q failed", name) t.Errorf("%q expected err : %v", name, item.err) t.Errorf("%q got err : %v", name, err) t.Fail() } return } if !testutils.RegisterOpEqual(nreq, item.msg) { t.Errorf("%q failed", name) t.Errorf("%q expected result : %+v", name, item.msg) t.Errorf("%q got result : %+v", name, nreq) t.Fail() } }) } for name, item := range registerOpSessionTestSet { t.Run(name, func(t *testing.T) { nreq, err := NewRegisterOpSession(item.opts...) if err != nil { uerr := errors.Unwrap(err) if !errors.Is(uerr, item.err) { t.Errorf("%q failed", name) t.Errorf("%q expected err : %v", name, item.err) t.Errorf("%q got err : %v", name, err) t.Fail() } return } if !testutils.RegisterOpEqual(nreq, item.msg) { t.Errorf("%q failed", name) t.Errorf("%q expected result : %+v", name, item.msg) t.Errorf("%q got result : %+v", name, nreq) t.Fail() } }) } for name, item := range registerOpSubscriptionTestSet { t.Run(name, func(t *testing.T) { nreq, err := NewRegisterOpSubscription(item.opts...) if err != nil { uerr := errors.Unwrap(err) if !errors.Is(uerr, item.err) { t.Errorf("%q failed", name) t.Errorf("%q expected err : %v", name, item.err) t.Errorf("%q got err : %v", name, err) t.Fail() } return } if !testutils.RegisterOpEqual(nreq, item.msg) { t.Errorf("%q failed", name) t.Errorf("%q expected result : %+v", name, item.msg) t.Errorf("%q got result : %+v", name, nreq) t.Fail() } }) } } type dataInput struct { opts []TunnelOption msg *tpb.Data err error } var dataTestSet = map[string]dataInput{ "data_ok": { opts: []TunnelOption{ Tag(42), Data([]byte("foo")), Close(true), }, msg: &tpb.Data{ Tag: 42, Data: []byte("foo"), Close: true, }, err: nil, }, "data_nok": { opts: []TunnelOption{ TargetName("bar"), Tag(42), Data([]byte("foo")), Close(true), }, msg: nil, err: ErrInvalidMsgType, }, } func TestNewData(t *testing.T) { for name, item := range dataTestSet { t.Run(name, func(t *testing.T) { nreq, err := NewData(item.opts...) if err != nil { uerr := errors.Unwrap(err) if !errors.Is(uerr, item.err) { t.Errorf("%q failed", name) t.Errorf("%q expected err : %v", name, item.err) t.Errorf("%q got err : %v", name, err) t.Fail() } return } if !testutils.TunnelDataEqual(nreq, item.msg) { t.Errorf("%q failed", name) t.Errorf("%q expected result : %+v", name, item.msg) t.Errorf("%q got result : %+v", name, nreq) t.Fail() } }) } } ================================================ FILE: pkg/api/types/sasl.go ================================================ package types type SASL struct { User string `mapstructure:"user,omitempty"` Password string `mapstructure:"password,omitempty"` Mechanism string `mapstructure:"mechanism,omitempty"` TokenURL string `mapstructure:"token-url,omitempty"` } ================================================ FILE: pkg/api/types/subscription.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package types import ( "encoding/json" "fmt" "strings" "time" ) const ( notApplicable = "NA" ) // SubscriptionConfig // type SubscriptionConfig struct { Name string `mapstructure:"name,omitempty" json:"name,omitempty"` Models []string `mapstructure:"models,omitempty" json:"models,omitempty"` Prefix string `mapstructure:"prefix,omitempty" json:"prefix,omitempty"` Target string `mapstructure:"target,omitempty" json:"target,omitempty"` SetTarget bool `mapstructure:"set-target,omitempty" json:"set-target,omitempty"` Paths []string `mapstructure:"paths,omitempty" json:"paths,omitempty"` Mode string `mapstructure:"mode,omitempty" json:"mode,omitempty"` StreamMode string `mapstructure:"stream-mode,omitempty" json:"stream-mode,omitempty"` Encoding *string `mapstructure:"encoding,omitempty" json:"encoding,omitempty"` Qos *uint32 `mapstructure:"qos,omitempty" json:"qos,omitempty"` SampleInterval *time.Duration `mapstructure:"sample-interval,omitempty" json:"sample-interval,omitempty"` HeartbeatInterval *time.Duration `mapstructure:"heartbeat-interval,omitempty" json:"heartbeat-interval,omitempty"` SuppressRedundant bool `mapstructure:"suppress-redundant,omitempty" json:"suppress-redundant,omitempty"` UpdatesOnly bool `mapstructure:"updates-only,omitempty" json:"updates-only,omitempty"` History *HistoryConfig `mapstructure:"history,omitempty" json:"history,omitempty"` StreamSubscriptions []*SubscriptionConfig `mapstructure:"stream-subscriptions,omitempty" json:"stream-subscriptions,omitempty"` Outputs []string `mapstructure:"outputs,omitempty" json:"outputs,omitempty"` Depth uint32 `mapstructure:"depth,omitempty" json:"depth,omitempty"` } type HistoryConfig struct { Snapshot time.Time `mapstructure:"snapshot,omitempty" json:"snapshot,omitempty"` Start time.Time `mapstructure:"start,omitempty" json:"start,omitempty"` End time.Time `mapstructure:"end,omitempty" json:"end,omitempty"` } // String // func (sc *SubscriptionConfig) String() string { b, err := json.Marshal(sc) if err != nil { return "" } return string(b) } func (sc *SubscriptionConfig) PathsString() string { return fmt.Sprintf("- %s", strings.Join(sc.Paths, "\n- ")) } func (sc *SubscriptionConfig) PrefixString() string { if sc.Prefix == "" { return notApplicable } return sc.Prefix } func (sc *SubscriptionConfig) ModeString() string { if strings.ToLower(sc.Mode) == "stream" { return fmt.Sprintf("%s/%s", strings.ToLower(sc.Mode), strings.ToLower(sc.StreamMode)) } return strings.ToLower(sc.Mode) } func (sc *SubscriptionConfig) SampleIntervalString() string { if strings.ToLower(sc.Mode) == "stream" && strings.ToLower(sc.StreamMode) == "sample" { if sc.SampleInterval == nil { return "0s" } return sc.SampleInterval.String() } return notApplicable } func (sc *SubscriptionConfig) ModelsString() string { return fmt.Sprintf("- %s", strings.Join(sc.Models, "\n- ")) } func (sc *SubscriptionConfig) QosString() string { if sc.Qos == nil { return notApplicable } return fmt.Sprintf("%d", *sc.Qos) } func (sc *SubscriptionConfig) HeartbeatIntervalString() string { if sc.HeartbeatInterval == nil { return "0s" } return sc.HeartbeatInterval.String() } func (sc *SubscriptionConfig) SuppressRedundantString() string { return fmt.Sprintf("%t", sc.SuppressRedundant) } func (sc *SubscriptionConfig) UpdatesOnlyString() string { return fmt.Sprintf("%t", sc.UpdatesOnly) } ================================================ FILE: pkg/api/types/target.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package types import ( "crypto/tls" "encoding/json" "fmt" "maps" "os" "reflect" "slices" "strings" "time" "golang.org/x/oauth2" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/oauth" "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/keepalive" "github.com/openconfig/gnmic/pkg/api/utils" ) // map of supported cipher suites func ciphersMap() map[string]uint16 { return map[string]uint16{ // secure, up to tls1.2 "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, // secure, only tls1.2 "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // secure, tls1.3 "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, // secure, ECDHE "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, // insecure "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, } } var cipherSuitesPreferenceOrder = []uint16{ // AEADs w/ ECDHE tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, // CBC w/ ECDHE tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // AEADs w/o ECDHE tls.TLS_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // CBC w/o ECDHE tls.TLS_RSA_WITH_AES_128_CBC_SHA, tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 3DES tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // disabled cipher suites // CBC_SHA256 tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, tls.TLS_RSA_WITH_AES_128_CBC_SHA256, // RC4 tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, tls.TLS_RSA_WITH_RC4_128_SHA, } var disabledCipherSuites = []uint16{ // CBC_SHA256 tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, tls.TLS_RSA_WITH_AES_128_CBC_SHA256, // RC4 tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, tls.TLS_RSA_WITH_RC4_128_SHA, } var ( defaultCipherSuitesLen = len(cipherSuitesPreferenceOrder) - len(disabledCipherSuites) defaultCipherSuites = cipherSuitesPreferenceOrder[:defaultCipherSuitesLen] ) var defaultCipherSuitesTLS13 = []uint16{ tls.TLS_AES_128_GCM_SHA256, tls.TLS_AES_256_GCM_SHA384, tls.TLS_CHACHA20_POLY1305_SHA256, } // TargetConfig // type TargetConfig struct { Name string `mapstructure:"name,omitempty" yaml:"name,omitempty" json:"name,omitempty"` Address string `mapstructure:"address,omitempty" yaml:"address,omitempty" json:"address,omitempty"` Username *string `mapstructure:"username,omitempty" yaml:"username,omitempty" json:"username,omitempty"` Password *string `mapstructure:"password,omitempty" yaml:"password,omitempty" json:"password,omitempty"` AuthScheme string `mapstructure:"auth-scheme,omitempty" yaml:"auth-scheme,omitempty" json:"auth-scheme,omitempty"` Timeout time.Duration `mapstructure:"timeout,omitempty" yaml:"timeout,omitempty" json:"timeout,omitempty"` Insecure *bool `mapstructure:"insecure,omitempty" yaml:"insecure,omitempty" json:"insecure,omitempty"` TLSCA *string `mapstructure:"tls-ca,omitempty" yaml:"tls-ca,omitempty" json:"tls-ca,omitempty"` TLSCert *string `mapstructure:"tls-cert,omitempty" yaml:"tls-cert,omitempty" json:"tls-cert,omitempty"` TLSKey *string `mapstructure:"tls-key,omitempty" yaml:"tls-key,omitempty" json:"tls-key,omitempty"` SkipVerify *bool `mapstructure:"skip-verify,omitempty" yaml:"skip-verify,omitempty" json:"skip-verify,omitempty"` TLSServerName string `mapstructure:"tls-server-name,omitempty" yaml:"tls-server-name,omitempty" json:"tls-server-name,omitempty"` Subscriptions []string `mapstructure:"subscriptions,omitempty" yaml:"subscriptions,omitempty" json:"subscriptions,omitempty"` Outputs []string `mapstructure:"outputs,omitempty" yaml:"outputs,omitempty" json:"outputs,omitempty"` BufferSize uint `mapstructure:"buffer-size,omitempty" yaml:"buffer-size,omitempty" json:"buffer-size,omitempty"` GRPCReadBufferSize *int `mapstructure:"grpc-read-buffer-size,omitempty" yaml:"grpc-read-buffer-size,omitempty" json:"grpc-read-buffer-size,omitempty"` GRPCWriteBufferSize *int `mapstructure:"grpc-write-buffer-size,omitempty" yaml:"grpc-write-buffer-size,omitempty" json:"grpc-write-buffer-size,omitempty"` GRPCConnWindowSize *int `mapstructure:"grpc-conn-window-size,omitempty" yaml:"grpc-conn-window-size,omitempty" json:"grpc-conn-window-size,omitempty"` GRPCWindowSize *int `mapstructure:"grpc-window-size,omitempty" yaml:"grpc-window-size,omitempty" json:"grpc-window-size,omitempty"` GRPCStaticConnWindowSize *int `mapstructure:"grpc-static-conn-window-size,omitempty" yaml:"grpc-static-conn-window-size,omitempty" json:"grpc-static-conn-window-size,omitempty"` GRPCStaticStreamWindowSize *int `mapstructure:"grpc-static-stream-window-size,omitempty" yaml:"grpc-static-stream-window-size,omitempty" json:"grpc-static-stream-window-size,omitempty"` RetryTimer time.Duration `mapstructure:"retry-timer,omitempty" yaml:"retry-timer,omitempty" json:"retry-timer,omitempty"` TLSMinVersion string `mapstructure:"tls-min-version,omitempty" yaml:"tls-min-version,omitempty" json:"tls-min-version,omitempty"` TLSMaxVersion string `mapstructure:"tls-max-version,omitempty" yaml:"tls-max-version,omitempty" json:"tls-max-version,omitempty"` TLSVersion string `mapstructure:"tls-version,omitempty" yaml:"tls-version,omitempty" json:"tls-version,omitempty"` LogTLSSecret *bool `mapstructure:"log-tls-secret,omitempty" yaml:"log-tls-secret,omitempty" json:"log-tls-secret,omitempty"` ProtoFiles []string `mapstructure:"proto-files,omitempty" yaml:"proto-files,omitempty" json:"proto-files,omitempty"` ProtoDirs []string `mapstructure:"proto-dirs,omitempty" yaml:"proto-dirs,omitempty" json:"proto-dirs,omitempty"` Tags []string `mapstructure:"tags,omitempty" yaml:"tags,omitempty" json:"tags,omitempty"` EventTags map[string]string `mapstructure:"event-tags,omitempty" yaml:"event-tags,omitempty" json:"event-tags,omitempty"` Gzip *bool `mapstructure:"gzip,omitempty" yaml:"gzip,omitempty" json:"gzip,omitempty"` Token *string `mapstructure:"token,omitempty" yaml:"token,omitempty" json:"token,omitempty"` Proxy string `mapstructure:"proxy,omitempty" yaml:"proxy,omitempty" json:"proxy,omitempty"` // TunnelTargetType string `mapstructure:"-" yaml:"tunnel-target-type,omitempty" json:"tunnel-target-type,omitempty"` Encoding *string `mapstructure:"encoding,omitempty" yaml:"encoding,omitempty" json:"encoding,omitempty"` Metadata map[string]string `mapstructure:"metadata,omitempty" yaml:"metadata,omitempty" json:"metadata,omitempty"` CipherSuites []string `mapstructure:"cipher-suites,omitempty" yaml:"cipher-suites,omitempty" json:"cipher-suites,omitempty"` TCPKeepalive time.Duration `mapstructure:"tcp-keepalive,omitempty" yaml:"tcp-keepalive,omitempty" json:"tcp-keepalive,omitempty"` GRPCKeepalive *ClientKeepalive `mapstructure:"grpc-keepalive,omitempty" yaml:"grpc-keepalive,omitempty" json:"grpc-keepalive,omitempty"` tlsConfig *tls.Config } type ClientKeepalive struct { Time time.Duration `mapstructure:"time,omitempty"` Timeout time.Duration `mapstructure:"timeout,omitempty"` PermitWithoutStream bool `mapstructure:"permit-without-stream,omitempty"` } func (tc TargetConfig) String() string { if tc.Password != nil { pwd := "****" tc.Password = &pwd } b, err := json.Marshal(tc) if err != nil { return "" } return string(b) } func clonePtr[T any](p *T) *T { if p == nil { return nil } v := *p return &v } func (tc *TargetConfig) DeepCopy() *TargetConfig { if tc == nil { return nil } ntc := &TargetConfig{ Name: tc.Name, Address: tc.Address, Username: clonePtr(tc.Username), Password: clonePtr(tc.Password), AuthScheme: tc.AuthScheme, Timeout: tc.Timeout, Insecure: clonePtr(tc.Insecure), TLSCA: clonePtr(tc.TLSCA), TLSCert: clonePtr(tc.TLSCert), TLSKey: clonePtr(tc.TLSKey), SkipVerify: clonePtr(tc.SkipVerify), TLSServerName: tc.TLSServerName, Subscriptions: make([]string, 0, len(tc.Subscriptions)), Outputs: make([]string, 0, len(tc.Outputs)), BufferSize: tc.BufferSize, GRPCReadBufferSize: clonePtr(tc.GRPCReadBufferSize), GRPCWriteBufferSize: clonePtr(tc.GRPCWriteBufferSize), GRPCConnWindowSize: clonePtr(tc.GRPCConnWindowSize), GRPCWindowSize: clonePtr(tc.GRPCWindowSize), GRPCStaticConnWindowSize: clonePtr(tc.GRPCStaticConnWindowSize), GRPCStaticStreamWindowSize: clonePtr(tc.GRPCStaticStreamWindowSize), RetryTimer: tc.RetryTimer, TLSMinVersion: tc.TLSMinVersion, TLSMaxVersion: tc.TLSMaxVersion, TLSVersion: tc.TLSVersion, LogTLSSecret: clonePtr(tc.LogTLSSecret), ProtoFiles: make([]string, 0, len(tc.ProtoFiles)), ProtoDirs: make([]string, 0, len(tc.ProtoDirs)), Tags: make([]string, 0, len(tc.Tags)), EventTags: make(map[string]string, len(tc.EventTags)), Gzip: clonePtr(tc.Gzip), Token: clonePtr(tc.Token), Proxy: tc.Proxy, TunnelTargetType: tc.TunnelTargetType, Encoding: clonePtr(tc.Encoding), Metadata: make(map[string]string, len(tc.Metadata)), CipherSuites: make([]string, 0, len(tc.CipherSuites)), TCPKeepalive: tc.TCPKeepalive, } ntc.Subscriptions = append(ntc.Subscriptions, tc.Subscriptions...) ntc.Outputs = append(ntc.Outputs, tc.Outputs...) ntc.ProtoFiles = append(ntc.ProtoFiles, tc.ProtoFiles...) ntc.ProtoDirs = append(ntc.ProtoDirs, tc.ProtoDirs...) ntc.Tags = append(ntc.Tags, tc.Tags...) ntc.CipherSuites = append(ntc.CipherSuites, tc.CipherSuites...) maps.Copy(ntc.EventTags, tc.EventTags) maps.Copy(ntc.Metadata, tc.Metadata) if tc.GRPCKeepalive != nil { ntc.GRPCKeepalive = &ClientKeepalive{ Time: tc.GRPCKeepalive.Time, Timeout: tc.GRPCKeepalive.Timeout, PermitWithoutStream: tc.GRPCKeepalive.PermitWithoutStream, } } return ntc } func (tc *TargetConfig) SetTLSConfig(tlsConfig *tls.Config) { tc.tlsConfig = tlsConfig } // NewTLSConfig // func (tc *TargetConfig) NewTLSConfig() (*tls.Config, error) { if tc.tlsConfig != nil { return tc.tlsConfig, nil } var ca, cert, key string if tc.TLSCA != nil { ca = *tc.TLSCA } if tc.TLSCert != nil { cert = *tc.TLSCert } if tc.TLSKey != nil { key = *tc.TLSKey } var skipVerify bool if tc.SkipVerify != nil { skipVerify = *tc.SkipVerify } tlsConfig, err := utils.NewTLSConfig(ca, cert, key, "", skipVerify, false) if err != nil { return nil, err } if tlsConfig == nil { return nil, nil } if tc.LogTLSSecret != nil && *tc.LogTLSSecret { logPath := tc.Name + ".tlssecret.log" w, err := os.Create(logPath) if err != nil { return nil, err } tlsConfig.KeyLogWriter = w } tlsConfig.MaxVersion = tc.getTLSMaxVersion() tlsConfig.MinVersion = tc.getTLSMinVersion() tlsConfig.ServerName = tc.TLSServerName // tc.cipher-suites is not set if len(tlsConfig.CipherSuites) == 0 && len(tc.CipherSuites) == 0 { cs := make([]uint16, len(defaultCipherSuites), len(defaultCipherSuites)+len(defaultCipherSuitesTLS13)) copy(cs, defaultCipherSuites) if tlsConfig.MaxVersion == tls.VersionTLS13 || tlsConfig.MaxVersion == 0 { cs = append(cs, defaultCipherSuitesTLS13...) } tlsConfig.CipherSuites = cs } // tc.cipher-suites is set if len(tlsConfig.CipherSuites) == 0 && len(tc.CipherSuites) != 0 { tlsConfig.CipherSuites = make([]uint16, 0, len(tc.CipherSuites)) cmap := ciphersMap() for _, cs := range tc.CipherSuites { if _, ok := cmap[cs]; !ok { return nil, fmt.Errorf("unknown cipher suite %q", cs) } tlsConfig.CipherSuites = append(tlsConfig.CipherSuites, cmap[cs]) } } return tlsConfig, nil } // GrpcDialOptions creates the grpc.dialOption list from the target's configuration func (tc *TargetConfig) GrpcDialOptions() ([]grpc.DialOption, error) { tOpts := make([]grpc.DialOption, 0, 1) // gzip if tc.Gzip != nil && *tc.Gzip { tOpts = append(tOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) } // gRPC keepalive if tc.GRPCKeepalive != nil { tOpts = append(tOpts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: tc.GRPCKeepalive.Time, Timeout: tc.GRPCKeepalive.Timeout, PermitWithoutStream: tc.GRPCKeepalive.PermitWithoutStream, })) } if tc.GRPCReadBufferSize != nil { tOpts = append(tOpts, grpc.WithReadBufferSize(*tc.GRPCReadBufferSize)) } if tc.GRPCWriteBufferSize != nil { tOpts = append(tOpts, grpc.WithWriteBufferSize(*tc.GRPCWriteBufferSize)) } if tc.GRPCConnWindowSize != nil { tOpts = append(tOpts, grpc.WithInitialConnWindowSize(int32(*tc.GRPCConnWindowSize))) } if tc.GRPCWindowSize != nil { tOpts = append(tOpts, grpc.WithInitialWindowSize(int32(*tc.GRPCWindowSize))) } if tc.GRPCStaticConnWindowSize != nil { tOpts = append(tOpts, grpc.WithStaticConnWindowSize(int32(*tc.GRPCStaticConnWindowSize))) } if tc.GRPCStaticStreamWindowSize != nil { tOpts = append(tOpts, grpc.WithStaticStreamWindowSize(int32(*tc.GRPCStaticStreamWindowSize))) } // insecure if tc.Insecure != nil && *tc.Insecure { tOpts = append(tOpts, grpc.WithTransportCredentials( insecure.NewCredentials(), ), ) return tOpts, nil } // secure tlsConfig, err := tc.NewTLSConfig() if err != nil { return nil, err } tOpts = append(tOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) // token credentials if tc.Token != nil && *tc.Token != "" { tOpts = append(tOpts, grpc.WithPerRPCCredentials( oauth.TokenSource{ TokenSource: oauth2.StaticTokenSource( &oauth2.Token{ AccessToken: *tc.Token, }, ), }, )) } return tOpts, nil } func (tc *TargetConfig) UsernameString() string { if tc.Username == nil { return notApplicable } return *tc.Username } func (tc *TargetConfig) PasswordString() string { if tc.Password == nil { return notApplicable } return *tc.Password } func (tc *TargetConfig) InsecureString() string { if tc.Insecure == nil { return notApplicable } return fmt.Sprintf("%t", *tc.Insecure) } func (tc *TargetConfig) TLSCAString() string { if tc.TLSCA == nil || *tc.TLSCA == "" { return notApplicable } return *tc.TLSCA } func (tc *TargetConfig) TLSKeyString() string { if tc.TLSKey == nil || *tc.TLSKey == "" { return notApplicable } return *tc.TLSKey } func (tc *TargetConfig) TLSCertString() string { if tc.TLSCert == nil || *tc.TLSCert == "" { return notApplicable } return *tc.TLSCert } func (tc *TargetConfig) SkipVerifyString() string { if tc.SkipVerify == nil { return notApplicable } return fmt.Sprintf("%t", *tc.SkipVerify) } func (tc *TargetConfig) SubscriptionString() string { return fmt.Sprintf("- %s", strings.Join(tc.Subscriptions, "\n")) } func (tc *TargetConfig) OutputsString() string { return strings.Join(tc.Outputs, "\n") } func (tc *TargetConfig) BufferSizeString() string { return fmt.Sprintf("%d", tc.BufferSize) } func (tc *TargetConfig) getTLSMinVersion() uint16 { v := tlsVersionStringToUint(tc.TLSVersion) if v > 0 { return v } return tlsVersionStringToUint(tc.TLSMinVersion) } func (tc *TargetConfig) getTLSMaxVersion() uint16 { v := tlsVersionStringToUint(tc.TLSVersion) if v > 0 { return v } return tlsVersionStringToUint(tc.TLSMaxVersion) } func tlsVersionStringToUint(v string) uint16 { switch v { default: return 0 case "1.3": return tls.VersionTLS13 case "1.2": return tls.VersionTLS12 case "1.1": return tls.VersionTLS11 case "1.0", "1": return tls.VersionTLS10 } } func (tc *TargetConfig) Equal(other *TargetConfig) bool { if tc == other { return true } if tc == nil || other == nil { return false } ptrEq := func(a, b any) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } return reflect.DeepEqual(a, b) } return tc.Name == other.Name && tc.Address == other.Address && ptrEq(tc.Username, other.Username) && ptrEq(tc.Password, other.Password) && tc.AuthScheme == other.AuthScheme && tc.Timeout == other.Timeout && ptrEq(tc.Insecure, other.Insecure) && ptrEq(tc.TLSCA, other.TLSCA) && ptrEq(tc.TLSCert, other.TLSCert) && ptrEq(tc.TLSKey, other.TLSKey) && ptrEq(tc.SkipVerify, other.SkipVerify) && tc.TLSServerName == other.TLSServerName && slices.Equal(tc.Subscriptions, other.Subscriptions) && slices.Equal(tc.Outputs, other.Outputs) && tc.BufferSize == other.BufferSize && tc.RetryTimer == other.RetryTimer && tc.TLSMinVersion == other.TLSMinVersion && tc.TLSMaxVersion == other.TLSMaxVersion && tc.TLSVersion == other.TLSVersion && ptrEq(tc.LogTLSSecret, other.LogTLSSecret) && slices.Equal(tc.ProtoFiles, other.ProtoFiles) && slices.Equal(tc.ProtoDirs, other.ProtoDirs) && slices.Equal(tc.Tags, other.Tags) && maps.Equal(tc.EventTags, other.EventTags) && ptrEq(tc.Gzip, other.Gzip) && ptrEq(tc.Token, other.Token) && tc.Proxy == other.Proxy && tc.TunnelTargetType == other.TunnelTargetType && ptrEq(tc.Encoding, other.Encoding) && maps.Equal(tc.Metadata, other.Metadata) && slices.Equal(tc.CipherSuites, other.CipherSuites) && tc.TCPKeepalive == other.TCPKeepalive && reflect.DeepEqual(tc.GRPCKeepalive, other.GRPCKeepalive) && tc.GRPCReadBufferSize == other.GRPCReadBufferSize && tc.GRPCWriteBufferSize == other.GRPCWriteBufferSize && tc.GRPCConnWindowSize == other.GRPCConnWindowSize && tc.GRPCWindowSize == other.GRPCWindowSize && tc.GRPCStaticConnWindowSize == other.GRPCStaticConnWindowSize && tc.GRPCStaticStreamWindowSize == other.GRPCStaticStreamWindowSize } ================================================ FILE: pkg/api/types/tls.go ================================================ package types import "fmt" type TLSConfig struct { CaFile string `mapstructure:"ca-file,omitempty"` KeyFile string `mapstructure:"key-file,omitempty"` CertFile string `mapstructure:"cert-file,omitempty"` SkipVerify bool `mapstructure:"skip-verify,omitempty"` ClientAuth string `mapstructure:"client-auth,omitempty"` } func (t *TLSConfig) Validate() error { if t == nil { return nil } switch t.ClientAuth { case "", "request": case "require", "verify-if-given", "require-verify": if t.CaFile == "" { return fmt.Errorf("ca-file is required when `client-auth` is %q", t.ClientAuth) } default: return fmt.Errorf("unknown `client-auth` mode: %s", t.ClientAuth) } return nil } func (t *TLSConfig) Equal(other *TLSConfig) bool { if t == nil && other == nil { return true } if t == nil || other == nil { return false } return t.CaFile == other.CaFile && t.CertFile == other.CertFile && t.KeyFile == other.KeyFile && t.SkipVerify == other.SkipVerify && t.ClientAuth == other.ClientAuth } ================================================ FILE: pkg/api/utils/tls.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package utils import ( "bufio" "bytes" "context" "crypto/rand" "crypto/rsa" "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "fmt" "io" "math/big" "os" "path/filepath" "sync" "time" ) // NewTLSConfig generates a *tls.Config based on given CA, certificate, key files and skipVerify flag // if certificate and key are missing a self signed key pair is generated. // The certificates paths can be local or remote, http(s) and (s)ftp are supported for remote files. func NewTLSConfig(ca, cert, key, clientAuth string, skipVerify, genSelfSigned bool) (*tls.Config, error) { if !(skipVerify || ca != "" || (cert != "" && key != "")) { return nil, nil } tlsConfig := &tls.Config{ InsecureSkipVerify: skipVerify, } // set clientAuth switch clientAuth { case "": if ca != "" { tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert } case "request": tlsConfig.ClientAuth = tls.RequestClientCert case "require": tlsConfig.ClientAuth = tls.RequireAnyClientCert case "verify-if-given": tlsConfig.ClientAuth = tls.VerifyClientCertIfGiven case "require-verify": tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert default: return nil, fmt.Errorf("unknown client-auth mode: %s", clientAuth) } if cert != "" && key != "" { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() var certBytes, keyBytes []byte errCh := make(chan error, 2) wg := new(sync.WaitGroup) wg.Add(2) go func() { defer wg.Done() var err error certBytes, err = ReadLocalFile(ctx, cert) if err != nil { errCh <- err return } }() go func() { defer wg.Done() var err error keyBytes, err = ReadLocalFile(ctx, key) if err != nil { errCh <- err return } }() wg.Wait() close(errCh) for err := range errCh { return nil, err } certificate, err := tls.X509KeyPair(certBytes, keyBytes) if err != nil { return nil, err } tlsConfig.Certificates = []tls.Certificate{certificate} } else if genSelfSigned { cert, err := SelfSignedCerts() if err != nil { return nil, err } tlsConfig.Certificates = []tls.Certificate{cert} } if ca != "" { certPool, err := LoadCACertificates(ca) if err != nil { return nil, err } tlsConfig.RootCAs = certPool tlsConfig.ClientCAs = certPool } return tlsConfig, nil } func SelfSignedCerts() (tls.Certificate, error) { notBefore := time.Now() notAfter := notBefore.Add(365 * 24 * time.Hour) serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) if err != nil { return tls.Certificate{}, nil } certTemplate := &x509.Certificate{ SerialNumber: serialNumber, Subject: pkix.Name{ Organization: []string{"openconfig.net"}, }, DNSNames: []string{"openconfig.net"}, NotBefore: notBefore, NotAfter: notAfter, KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, } priv, err := rsa.GenerateKey(rand.Reader, 4096) if err != nil { return tls.Certificate{}, nil } derBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, &priv.PublicKey, priv) if err != nil { return tls.Certificate{}, nil } certBuff := new(bytes.Buffer) keyBuff := new(bytes.Buffer) pem.Encode(certBuff, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) pem.Encode(keyBuff, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) return tls.X509KeyPair(certBuff.Bytes(), keyBuff.Bytes()) } // readLocalFile reads a file from the local file system, // unmarshals the content into a map[string]*types.TargetConfig // and returns func ReadLocalFile(ctx context.Context, path string) ([]byte, error) { // read from stdin if path == "-" { return readFromStdin(ctx) } // local file f, err := os.Open(path) if err != nil { return nil, err } defer f.Close() st, err := f.Stat() if err != nil { return nil, err } if st.IsDir() { return nil, fmt.Errorf("%q is a directory", path) } data := make([]byte, st.Size()) rd := bufio.NewReader(f) _, err = rd.Read(data) if err != nil && err != io.EOF { return nil, err } return data, nil } // read bytes from stdin func readFromStdin(ctx context.Context) ([]byte, error) { // read from stdin data := make([]byte, 0, 128) rd := bufio.NewReader(os.Stdin) buf := make([]byte, 128) for { select { case <-ctx.Done(): return nil, ctx.Err() default: n, err := rd.Read(buf) if err == io.EOF { data = append(data, buf[:n]...) return data, nil } if err != nil { return nil, err } data = append(data, buf[:n]...) } } } // LoadCACertificates reads PEM-encoded CA certificates from a file and adds them to a CertPool. // It returns the CertPool and any error encountered. func LoadCACertificates(caPath string) (*x509.CertPool, error) { st, err := os.Stat(caPath) if err != nil { return nil, fmt.Errorf("failed to stat the cert file: %s: %w", caPath, err) } if st.IsDir() { files, err := os.ReadDir(caPath) if err != nil { return nil, fmt.Errorf("failed to read the cert directory: %s: %w", caPath, err) } certPool := x509.NewCertPool() for _, file := range files { if file.IsDir() { continue } err = loadCACertificatesToPool(filepath.Join(caPath, file.Name()), certPool) if err != nil { return nil, fmt.Errorf("failed to load the cert file: %s: %w", filepath.Join(caPath, file.Name()), err) } } return certPool, nil } // caPath is a single cert file certPool := x509.NewCertPool() err = loadCACertificatesToPool(caPath, certPool) if err != nil { return nil, fmt.Errorf("failed to load the cert file: %s: %w", caPath, err) } return certPool, nil } func loadCACertificatesToPool(filePath string, certPool *x509.CertPool) error { certPEMBlock, err := os.ReadFile(filePath) if err != nil { return fmt.Errorf("failed to read the cert file: %s: %w", filePath, err) } for { block, rest := pem.Decode(certPEMBlock) if block == nil { break } certPEMBlock = rest cert, err := x509.ParseCertificate(block.Bytes) if err != nil { return fmt.Errorf("failed to parse certificate: %w", err) } if !cert.IsCA { return fmt.Errorf("file %s contains a certificate that is not a CA", filePath) } certPool.AddCert(cert) } return nil } ================================================ FILE: pkg/api/utils/utils.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package utils import ( "log" "net" "reflect" ) const ( DefaultLoggingFlags = log.LstdFlags | log.Lmicroseconds | log.Lmsgprefix ) func MergeMaps(dst, src map[string]any) map[string]any { if dst == nil { dst = make(map[string]any) } if src == nil { return dst } for key, srcVal := range src { if dstVal, ok := dst[key]; ok { srcMap, srcMapOk := mapify(srcVal) dstMap, dstMapOk := mapify(dstVal) if srcMapOk && dstMapOk { srcVal = MergeMaps(dstMap, srcMap) } } dst[key] = srcVal } return dst } func mapify(i interface{}) (map[string]interface{}, bool) { value := reflect.ValueOf(i) if value.Kind() == reflect.Map { m := map[string]interface{}{} for _, k := range value.MapKeys() { m[k.String()] = value.MapIndex(k).Interface() } return m, true } return map[string]interface{}{}, false } func GetHost(hostport string) string { h, _, err := net.SplitHostPort(hostport) if err != nil { return hostport } return h } func Convert(i interface{}) interface{} { switch x := i.(type) { case map[interface{}]interface{}: nm := map[string]interface{}{} for k, v := range x { nm[k.(string)] = Convert(v) } return nm case map[string]interface{}: for k, v := range x { x[k] = Convert(v) } case []interface{}: for k, v := range x { x[k] = Convert(v) } } return i } ================================================ FILE: pkg/api/utils/utils_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package utils import ( "reflect" "testing" "github.com/google/go-cmp/cmp" ) var convertTestSet = []struct { name string in interface{} out interface{} }{ { name: "string", in: "test1", out: "test1", }, { name: "map[interface{}]interface{}", in: map[interface{}]interface{}{ "a": "b", }, out: map[string]interface{}{ "a": "b", }, }, { name: "map[string]interface{}", in: map[string]interface{}{ "a": map[interface{}]interface{}{ "b": "c", }, }, out: map[string]interface{}{ "a": map[string]interface{}{ "b": "c", }, }, }, { name: "[]interface{}", in: []interface{}{ "a", }, out: []interface{}{ "a", }, }, } func TestConvert(t *testing.T) { for _, item := range convertTestSet { t.Run(item.name, func(t *testing.T) { o := Convert(item.in) if !cmp.Equal(o, item.out) { t.Logf("%q failed", item.name) t.Fail() } }) } } func TestMergeMaps(t *testing.T) { tests := []struct { name string // description of this test case // Named input parameters for target function. dst map[string]interface{} src map[string]interface{} want map[string]interface{} }{ { name: "empty", dst: nil, src: nil, want: map[string]interface{}{}, }, { name: "empty_dst", dst: nil, src: map[string]interface{}{"a": "b"}, want: map[string]interface{}{"a": "b"}, }, { name: "empty_src", dst: map[string]interface{}{"a": "b"}, src: nil, want: map[string]interface{}{"a": "b"}, }, { name: "merge", dst: map[string]interface{}{"a": "b"}, src: map[string]interface{}{"a": "c"}, want: map[string]interface{}{"a": "c"}, }, { name: "merge_with_map", dst: map[string]interface{}{"a": "b"}, src: map[string]interface{}{"a": map[string]interface{}{"c": "d"}}, want: map[string]interface{}{"a": map[string]interface{}{"c": "d"}}, }, { name: "merge_with_map_and_slice", dst: map[string]interface{}{"a": "b"}, src: map[string]interface{}{"a": map[string]interface{}{"c": "d"}}, want: map[string]interface{}{"a": map[string]interface{}{"c": "d"}}, }, { name: "merge_with_slice", dst: map[string]interface{}{"a": "b"}, src: map[string]interface{}{"a": []interface{}{"c", "d"}}, want: map[string]interface{}{"a": []interface{}{"c", "d"}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := MergeMaps(tt.dst, tt.src) if !reflect.DeepEqual(got, tt.want) { t.Logf("%q failed", tt.name) t.Logf("got: %v", got) t.Logf("want: %v", tt.want) t.Fail() } }) } } ================================================ FILE: pkg/app/api.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "crypto/tls" "encoding/json" "fmt" "io" "net/http" "net/http/pprof" "path/filepath" "sort" "strings" "github.com/AlekSi/pointer" "github.com/gorilla/handlers" "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/lockers" ) func (a *App) newAPIServer() (*http.Server, error) { a.routes() var tlscfg *tls.Config var err error if a.Config.APIServer.TLS != nil { tlscfg, err = utils.NewTLSConfig( a.Config.APIServer.TLS.CaFile, a.Config.APIServer.TLS.CertFile, a.Config.APIServer.TLS.KeyFile, a.Config.APIServer.TLS.ClientAuth, false, // skip-verify true, // genSelfSigned ) if err != nil { return nil, err } } if a.Config.APIServer.EnableProfiling { a.router.HandleFunc("/debug/pprof/", pprof.Index) a.router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) a.router.HandleFunc("/debug/pprof/profile", pprof.Profile) a.router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) a.router.Path("/debug/pprof/symbol").Methods("POST", "GET").HandlerFunc(pprof.Symbol) a.router.HandleFunc("/debug/pprof/trace", pprof.Trace) a.router.Handle("/debug/pprof/heap", pprof.Handler("heap")) a.router.Handle("/debug/pprof/mutex", pprof.Handler("mutex")) a.router.Handle("/debug/pprof/threadcreate", pprof.Handler("threadcreate")) a.router.Handle("/debug/pprof/goroutine", pprof.Handler("goroutine")) a.router.Handle("/debug/pprof/allocs", pprof.Handler("allocs")) a.router.Handle("/debug/pprof/block", pprof.Handler("block")) } if a.Config.APIServer.EnableMetrics { a.router.Handle("/metrics", promhttp.HandlerFor(a.reg, promhttp.HandlerOpts{})) a.reg.MustRegister(collectors.NewGoCollector()) a.reg.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) a.reg.MustRegister(subscribeResponseReceivedCounter) a.reg.MustRegister(subscribeResponseFailedCounter) a.registerTargetMetrics() go a.startClusterMetrics() } s := &http.Server{ Addr: a.Config.APIServer.Address, Handler: a.router, ReadTimeout: a.Config.APIServer.Timeout / 2, WriteTimeout: a.Config.APIServer.Timeout / 2, } if tlscfg != nil { s.TLSConfig = tlscfg } return s, nil } type APIErrors struct { Errors []string `json:"errors,omitempty"` } func (a *App) handleConfigTargetsGet(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] var err error a.configLock.RLock() defer a.configLock.RUnlock() if id == "" { // copy targets map targets := make(map[string]*types.TargetConfig, len(a.Config.Targets)) for n, tc := range a.Config.Targets { ntc := tc.DeepCopy() ntc.Password = pointer.ToString("****") targets[n] = ntc } err = json.NewEncoder(w).Encode(targets) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) } return } if t, ok := a.Config.Targets[id]; ok { tc := t.DeepCopy() tc.Password = pointer.ToString("****") err = json.NewEncoder(w).Encode(tc) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) } return } w.WriteHeader(http.StatusNotFound) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("target %q not found", id)}}) } func (a *App) handleConfigTargetsPost(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer r.Body.Close() tc := new(types.TargetConfig) err = json.Unmarshal(body, tc) if err != nil { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } a.AddTargetConfig(tc) } func (a *App) handleConfigTargetsSubscriptions(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if !a.targetConfigExists(id) { w.WriteHeader(http.StatusNotFound) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("target %q not found", id)}}) return } body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer r.Body.Close() var data map[string][]string err = json.Unmarshal(body, &data) if err != nil { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } subs, ok := data["subscriptions"] if !ok { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"subscriptions not found"}}) return } err = a.UpdateTargetSubscription(a.ctx, id, subs) if err != nil { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } func (a *App) handleConfigTargetsDelete(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] err := a.DeleteTarget(r.Context(), id) if err != nil { w.WriteHeader(http.StatusNotFound) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } func (a *App) handleConfigSubscriptions(w http.ResponseWriter, r *http.Request) { a.handlerCommonGet(w, a.Config.Subscriptions) } func (a *App) handleConfigOutputs(w http.ResponseWriter, r *http.Request) { a.handlerCommonGet(w, a.Config.Outputs) } func (a *App) handleConfigClustering(w http.ResponseWriter, r *http.Request) { a.handlerCommonGet(w, a.Config.Clustering) } func (a *App) handleConfigAPIServer(w http.ResponseWriter, r *http.Request) { a.handlerCommonGet(w, a.Config.APIServer) } func (a *App) handleConfigGNMIServer(w http.ResponseWriter, r *http.Request) { a.handlerCommonGet(w, a.Config.GnmiServer) } func (a *App) handleConfigInputs(w http.ResponseWriter, r *http.Request) { a.handlerCommonGet(w, a.Config.Inputs) } func (a *App) handleConfigProcessors(w http.ResponseWriter, r *http.Request) { a.handlerCommonGet(w, a.Config.Processors) } func (a *App) handleConfig(w http.ResponseWriter, r *http.Request) { nc := &config.Config{ GlobalFlags: a.Config.GlobalFlags, LocalFlags: a.Config.LocalFlags, FileConfig: a.Config.FileConfig, Targets: make(map[string]*types.TargetConfig, len(a.Config.Targets)), Subscriptions: a.Config.Subscriptions, Outputs: a.Config.Outputs, Inputs: a.Config.Inputs, Processors: a.Config.Processors, Clustering: a.Config.Clustering, GnmiServer: a.Config.GnmiServer, APIServer: a.Config.APIServer, Loader: a.Config.Loader, Actions: a.Config.Actions, TunnelServer: a.Config.TunnelServer, } for n, t := range a.Config.Targets { tc := t.DeepCopy() tc.Password = pointer.ToString("****") nc.Targets[n] = tc } a.handlerCommonGet(w, nc) } func (a *App) handleTargetsGet(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id == "" { a.handlerCommonGet(w, a.Targets) return } if t, ok := a.Targets[id]; ok { a.handlerCommonGet(w, t) return } w.WriteHeader(http.StatusNotFound) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"no targets found"}}) } func (a *App) handleTargetsPost(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id == "" { w.WriteHeader(http.StatusBadRequest) return } tc, ok := a.Config.Targets[id] if !ok { w.WriteHeader(http.StatusNotFound) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("target %q not found", id)}}) return } go a.TargetSubscribeStream(a.ctx, tc) } func (a *App) handleTargetsDelete(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id == "" { w.WriteHeader(http.StatusBadRequest) return } if _, ok := a.Targets[id]; !ok { w.WriteHeader(http.StatusNotFound) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("target %q not found", id)}}) return } err := a.DeleteTarget(a.ctx, id) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } type clusteringResponse struct { ClusterName string `json:"name,omitempty"` NumberOfLockedTargets int `json:"number-of-locked-targets"` Leader string `json:"leader,omitempty"` Members []clusterMember `json:"members,omitempty"` } type clusterMember struct { Name string `json:"name,omitempty"` APIEndpoint string `json:"api-endpoint,omitempty"` IsLeader bool `json:"is-leader,omitempty"` NumberOfLockedTargets int `json:"number-of-locked-nodes"` LockedTargets []string `json:"locked-targets,omitempty"` } func (a *App) handleClusteringGet(w http.ResponseWriter, r *http.Request) { if a.Config.Clustering == nil { return } ctx, cancel := context.WithCancel(r.Context()) defer cancel() resp := new(clusteringResponse) resp.ClusterName = a.Config.ClusterName var err error resp.Leader, err = a.getLeaderName(ctx) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } services, err := a.locker.GetServices(ctx, fmt.Sprintf("%s-gnmic-api", a.Config.ClusterName), nil) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } instanceNodes, err := a.getInstanceToTargetsMapping(ctx) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } for _, v := range instanceNodes { resp.NumberOfLockedTargets += len(v) } resp.Members = make([]clusterMember, len(services)) for i, s := range services { scheme := getServiceScheme(s) resp.Members[i].APIEndpoint = fmt.Sprintf("%s%s", scheme, s.Address) resp.Members[i].Name = strings.TrimSuffix(s.ID, "-api") resp.Members[i].IsLeader = resp.Leader == resp.Members[i].Name resp.Members[i].NumberOfLockedTargets = len(instanceNodes[resp.Members[i].Name]) resp.Members[i].LockedTargets = instanceNodes[resp.Members[i].Name] } b, err := json.Marshal(resp) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } _, err = w.Write(b) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) } } func (a *App) handleHealthzGet(w http.ResponseWriter, r *http.Request) { s := map[string]string{"status": "healthy"} b, err := json.Marshal(s) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } _, err = w.Write(b) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) } } func (a *App) handleAdminShutdown(w http.ResponseWriter, r *http.Request) { a.Logger.Printf("shutting down due to user request") a.Cfn() } func (a *App) handleClusteringMembersGet(w http.ResponseWriter, r *http.Request) { if a.Config.Clustering == nil { return } ctx, cancel := context.WithCancel(r.Context()) defer cancel() // get leader leader, err := a.getLeaderName(ctx) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } services, err := a.locker.GetServices(ctx, fmt.Sprintf("%s-gnmic-api", a.Config.ClusterName), nil) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } instanceNodes, err := a.getInstanceToTargetsMapping(ctx) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } members := make([]clusterMember, len(services)) for i, s := range services { scheme := getServiceScheme(s) members[i].APIEndpoint = fmt.Sprintf("%s%s", scheme, s.Address) members[i].Name = strings.TrimSuffix(s.ID, "-api") members[i].IsLeader = leader == members[i].Name members[i].NumberOfLockedTargets = len(instanceNodes[members[i].Name]) members[i].LockedTargets = instanceNodes[members[i].Name] } b, err := json.Marshal(members) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } _, err = w.Write(b) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) } } func (a *App) handleClusteringLeaderGet(w http.ResponseWriter, r *http.Request) { if a.Config.Clustering == nil { return } ctx, cancel := context.WithCancel(r.Context()) defer cancel() // get leader leader, err := a.getLeaderName(ctx) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } services, err := a.locker.GetServices(ctx, fmt.Sprintf("%s-gnmic-api", a.Config.ClusterName), nil) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } instanceNodes, err := a.getInstanceToTargetsMapping(ctx) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } members := make([]clusterMember, 1) for _, s := range services { if strings.TrimSuffix(s.ID, "-api") != leader { continue } scheme := getServiceScheme(s) // add the leader as a member then break from loop members[0].APIEndpoint = fmt.Sprintf("%s%s", scheme, s.Address) members[0].Name = strings.TrimSuffix(s.ID, "-api") members[0].IsLeader = true members[0].NumberOfLockedTargets = len(instanceNodes[members[0].Name]) members[0].LockedTargets = instanceNodes[members[0].Name] break } b, err := json.Marshal(members) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.Write(b) } func (a *App) handleClusteringLeaderDelete(w http.ResponseWriter, r *http.Request) { if a.Config.Clustering == nil { return } if !a.isLeader { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"not leader"}}) return } err := a.locker.Unlock(r.Context(), a.leaderKey()) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } func (a *App) handleClusteringDrainInstance(w http.ResponseWriter, r *http.Request) { if a.Config.Clustering == nil { return } if !a.isLeader { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"not leader"}}) return } vars := mux.Vars(r) id := vars["id"] if id == "" { w.WriteHeader(http.StatusBadRequest) return } ctx := r.Context() services, err := a.locker.GetServices(ctx, fmt.Sprintf("%s-gnmic-api", a.Config.ClusterName), []string{ fmt.Sprintf("instance-name=%s", id), }) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if len(services) == 0 { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"unknown instance: " + id}}) return } targets, err := a.getInstanceTargets(ctx, id) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } go func() { a.dispatchLock.Lock() defer a.dispatchLock.Unlock() for _, t := range targets { err = a.unassignTarget(a.ctx, t, services[0].ID) if err != nil { a.Logger.Printf("failed to unassign target %s: %v", t, err) continue } tc, ok := a.Config.Targets[t] if !ok { a.Logger.Printf("could not find target %s config", t) continue } err = a.dispatchTarget(a.ctx, tc, id+"-api") if err != nil { a.Logger.Printf("failed to dispatch target %s: %v", t, err) continue } } }() } func (a *App) handleClusterRebalance(w http.ResponseWriter, r *http.Request) { if a.Config.Clustering == nil { return } if !a.isLeader { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"not leader"}}) return } go func() { err := a.clusterRebalanceTargets() if err != nil { a.Logger.Printf("failed to rebalance: %v", err) } }() } // helpers func headersMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Content-Type", "application/json") next.ServeHTTP(w, r) }) } func (a *App) loggingMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if (!a.Config.APIServer.HealthzDisableLogging && r.URL.Path == "/api/v1/healthz") || r.URL.Path != "/api/v1/healthz" { next = handlers.LoggingHandler(a.Logger.Writer(), next) } next.ServeHTTP(w, r) }) } func (a *App) handlerCommonGet(w http.ResponseWriter, i interface{}) { a.configLock.RLock() defer a.configLock.RUnlock() b, err := json.Marshal(i) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } _, err = w.Write(b) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) } } func (a *App) getLeaderName(ctx context.Context) (string, error) { leaderKey := fmt.Sprintf("gnmic/%s/leader", a.Config.ClusterName) leader, err := a.locker.List(ctx, leaderKey) if err != nil { return "", nil } return leader[leaderKey], nil } func (a *App) getInstanceTargets(ctx context.Context, instance string) ([]string, error) { locks, err := a.locker.List(ctx, fmt.Sprintf("gnmic/%s/targets", a.Config.Clustering.ClusterName)) if err != nil { return nil, err } if a.Config.Debug { a.Logger.Println("current locks:", locks) } targets := make([]string, 0) for k, v := range locks { if v == instance { targets = append(targets, filepath.Base(k)) } } sort.Strings(targets) return targets, nil } // getServiceScheme returns the scheme of the service based on the protocol tag // the tag is expected to be in the format "protocol=" // if the tag is not found, the scheme is "http" func getServiceScheme(service *lockers.Service) string { scheme := "http" for _, t := range service.Tags { if strings.HasPrefix(t, "protocol=") { if strings.Split(t, "=")[1] != "" { scheme = strings.Split(t, "=")[1] } break } } return scheme } ================================================ FILE: pkg/app/app.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "errors" "fmt" "io" "log" "net" "net/http" "os" "sort" "strings" "sync" "time" "github.com/fsnotify/fsnotify" "github.com/fullstorydev/grpcurl" "github.com/gorilla/mux" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/jhump/protoreflect/desc" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/goyang/pkg/yang" "github.com/openconfig/grpctunnel/tunnel" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" "github.com/spf13/pflag" "golang.org/x/sync/semaphore" "google.golang.org/grpc" "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/grpclog" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/target" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/cache" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/formatters/plugin_manager" "github.com/openconfig/gnmic/pkg/inputs" "github.com/openconfig/gnmic/pkg/lockers" "github.com/openconfig/gnmic/pkg/outputs" "github.com/openconfig/gnmic/pkg/utils" "github.com/openconfig/gnmic/pkg/version" "github.com/zestor-dev/zestor/store" "github.com/zestor-dev/zestor/store/gomap" ) const ( defaultHTTPClientTimeout = 5 * time.Second ) var obscuredAttrs = []string{ "password", } type App struct { ctx context.Context Cfn context.CancelFunc RootCmd *cobra.Command sem *semaphore.Weighted // configLock *sync.RWMutex Config *config.Config Store store.Store[any] // collector dialOpts []grpc.DialOption operLock *sync.RWMutex Outputs map[string]outputs.Output Inputs map[string]inputs.Input Targets map[string]*target.Target targetsChan chan *target.Target activeTargets map[string]struct{} targetsLockFn map[string]context.CancelFunc rootDesc desc.Descriptor // end collector router *mux.Router locker lockers.Locker clusteringClient *http.Client // api apiServices map[string]*lockers.Service isLeader bool dispatchLock *sync.Mutex // prometheus registry reg *prometheus.Registry // Logger *log.Logger out io.Writer // prompt mode PromptMode bool PromptHistory []string SchemaTree *yang.Entry // yang modules *yang.Modules // wg *sync.WaitGroup printLock *sync.Mutex errCh chan error // gNMI cache, used if a gnmi-server is configured // with subscribe or proxy commands. c cache.Cache // tunnel server // gRPC server where the tunnel service will be registered grpcTunnelSrv *grpc.Server tunServer *tunnel.Server ttm *sync.RWMutex tunTargets map[tunnel.Target]struct{} tunTargetCfn map[tunnel.Target]context.CancelFunc // processors plugin manager pm *plugin_manager.PluginManager // pprof pprof *pprofServer } func New() *App { ctx, cancel := context.WithCancel(context.Background()) a := &App{ ctx: ctx, Cfn: cancel, RootCmd: new(cobra.Command), sem: semaphore.NewWeighted(1), configLock: new(sync.RWMutex), Store: gomap.NewMemStore(store.StoreOptions[any]{}), Config: config.New(), reg: prometheus.NewRegistry(), // operLock: new(sync.RWMutex), Targets: make(map[string]*target.Target), Outputs: make(map[string]outputs.Output), Inputs: make(map[string]inputs.Input), targetsChan: make(chan *target.Target), activeTargets: make(map[string]struct{}), targetsLockFn: make(map[string]context.CancelFunc), // router: mux.NewRouter(), apiServices: make(map[string]*lockers.Service), dispatchLock: new(sync.Mutex), Logger: log.New(io.Discard, "[gnmic] ", log.LstdFlags|log.Lmsgprefix), out: os.Stdout, PromptHistory: make([]string, 0, 128), SchemaTree: &yang.Entry{ Dir: make(map[string]*yang.Entry), }, wg: new(sync.WaitGroup), printLock: new(sync.Mutex), // tunnel server ttm: new(sync.RWMutex), tunTargets: make(map[tunnel.Target]struct{}), tunTargetCfn: make(map[tunnel.Target]context.CancelFunc), // pprof pprof: newPprofServer(), } a.router.StrictSlash(true) a.router.Use(headersMiddleware, a.loggingMiddleware) return a } func (a *App) Context() context.Context { if a.ctx == nil { return context.Background() } return a.ctx } func (a *App) InitGlobalFlags() { a.RootCmd.ResetFlags() a.RootCmd.PersistentFlags().StringVar(&a.Config.CfgFile, "config", "", "main config file") a.RootCmd.PersistentFlags().StringSliceVarP(&a.Config.GlobalFlags.Address, "address", "a", []string{}, "comma separated gnmi targets addresses") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.Username, "username", "u", "", "username") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.Password, "password", "p", "", "password") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.Port, "port", "", defaultGrpcPort, "gRPC port") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.Encoding, "encoding", "e", "json", fmt.Sprintf("one of %q. Case insensitive", encodingNames)) a.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.Insecure, "insecure", "", false, "insecure connection") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSCa, "tls-ca", "", "", "tls certificate authority") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSCert, "tls-cert", "", "", "tls certificate") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSKey, "tls-key", "", "", "tls key") a.RootCmd.PersistentFlags().DurationVarP(&a.Config.GlobalFlags.Timeout, "timeout", "", 10*time.Second, "grpc timeout, valid formats: 10s, 1m30s, 1h") a.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.Debug, "debug", "d", false, "debug mode") a.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.EnablePprof, "enable-pprof", "", false, "enable go pprof") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.PprofAddr, "pprof-addr", "", defaultPprofAddr, "pprof host/IP and port to listen on") a.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.SkipVerify, "skip-verify", "", false, "skip verify tls connection") a.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.NoPrefix, "no-prefix", "", false, "do not add [ip:port] prefix to print output in case of multiple targets") a.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.ProxyFromEnv, "proxy-from-env", "", false, "use proxy from environment") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.Format, "format", "", "", fmt.Sprintf("output format, one of: %q", formatNames)) a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.LogFile, "log-file", "", "", "log file path") a.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.Log, "log", "", false, "write log messages to stderr") a.RootCmd.PersistentFlags().IntVarP(&a.Config.GlobalFlags.MaxMsgSize, "max-msg-size", "", msgSize, "max grpc msg size") a.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.PrintRequest, "print-request", "", false, "print request as well as the response(s)") a.RootCmd.PersistentFlags().DurationVarP(&a.Config.GlobalFlags.Retry, "retry", "", defaultRetryTimer, "retry timer for RPCs") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSMinVersion, "tls-min-version", "", "", fmt.Sprintf("minimum TLS supported version, one of %q", tlsVersions)) a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSMaxVersion, "tls-max-version", "", "", fmt.Sprintf("maximum TLS supported version, one of %q", tlsVersions)) a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSVersion, "tls-version", "", "", fmt.Sprintf("set TLS version. Overwrites --tls-min-version and --tls-max-version, one of %q", tlsVersions)) a.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.LogTLSSecret, "log-tls-secret", "", false, "enable logging of a TLS pre-master secret to a file") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TLSServerName, "tls-server-name", "", "", "sets the server name to be used when verifying the hostname on the returned certificates unless --skip-verify is set") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.ClusterName, "cluster-name", "", defaultClusterName, "cluster name the gnmic instance belongs to, this is used for target loadsharing via a locker") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.InstanceName, "instance-name", "", "", "gnmic instance name") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.API, "api", "", "", "gnmic api address") a.RootCmd.PersistentFlags().StringArrayVarP(&a.Config.GlobalFlags.ProtoFile, "proto-file", "", nil, "proto file(s) name(s)") a.RootCmd.PersistentFlags().StringArrayVarP(&a.Config.GlobalFlags.ProtoDir, "proto-dir", "", nil, "directory to look for proto files specified with --proto-file") a.RootCmd.PersistentFlags().StringArrayVarP(&a.Config.GlobalFlags.RegisteredExtensions, "registered-extensions", "", nil, "registered (custom) extensions") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.RequestExtensions, "request-extensions", "", "", "add registered (custom) extensions to request") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.TargetsFile, "targets-file", "", "", "path to file with targets configuration") a.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.Gzip, "gzip", "", false, "enable gzip compression on gRPC connections") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.Token, "token", "", "", "token value, used for gRPC token based authentication") a.RootCmd.PersistentFlags().StringArrayVarP(&a.Config.GlobalFlags.File, "file", "", nil, "YANG file(s)") a.RootCmd.PersistentFlags().StringArrayVarP(&a.Config.GlobalFlags.Dir, "dir", "", nil, "YANG dir(s)") a.RootCmd.PersistentFlags().StringArrayVarP(&a.Config.GlobalFlags.Exclude, "exclude", "", nil, "YANG module names to be excluded") a.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.UseTunnelServer, "use-tunnel-server", "", false, "use tunnel server to dial targets") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.AuthScheme, "auth-scheme", "", "", "authentication scheme to use for the target's username/password") a.RootCmd.PersistentFlags().BoolVarP(&a.Config.GlobalFlags.CalculateLatency, "calculate-latency", "", false, "calculate the delta between each message timestamp and the receive timestamp. JSON format only") a.RootCmd.PersistentFlags().StringToStringP("metadata", "H", a.Config.GlobalFlags.Metadata, "add metadata to gRPC requests (`key=value`)") a.RootCmd.PersistentFlags().StringVarP(&a.Config.GlobalFlags.PluginProcessorsPath, "processors-plugins-path", "P", "", "filesystem path where gNMIc will look for even_plugin processors to initialize") a.RootCmd.PersistentFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(flag.Name, flag) }) } func (a *App) PreRunE(cmd *cobra.Command, args []string) error { err := a.Config.ToStore(a.Store) if err != nil { return err } if a.Config.Debug { fmt.Println(a.Store.Dump()) } if a.Config.EnablePprof { _, _, err := net.SplitHostPort(a.Config.GlobalFlags.PprofAddr) if err != nil { return fmt.Errorf("pprof error %v", err) } a.pprof.Start(a.Config.GlobalFlags.PprofAddr) a.Logger.Printf("pprof server started at %s/debug/pprof", a.Config.GlobalFlags.PprofAddr) go func() { err := <-a.pprof.ErrChan() a.Logger.Printf("pprof server failed: %v", err) }() } a.Config.SetGlobalsFromEnv(a.RootCmd) a.Config.SetPersistentFlagsFromFile(a.RootCmd) logOutput, flags, err := a.Config.SetLogger() if err != nil { return err } a.Logger.SetOutput(logOutput) a.Logger.SetFlags(flags) a.Config.Address = config.ParseAddressField(a.Config.Address) a.Logger.Printf("version=%s, commit=%s, date=%s, gitURL=%s, docs=https://gnmic.openconfig.net", version.Version, version.Commit, version.Date, version.GitURL) if a.Config.Debug { grpclog.SetLogger(a.Logger) //lint:ignore SA1019 see https://github.com/karimra/gnmic/issues/59 } a.Logger.Printf("using config file %q", a.Config.FileConfig.ConfigFileUsed()) a.logConfigKVs() return a.validateGlobals() } func (a *App) validateGlobals() error { if a.Config.Insecure { if a.Config.SkipVerify { return errors.New("flags --insecure and --skip-verify are mutually exclusive") } if a.Config.TLSCa != "" { return errors.New("flags --insecure and --tls-ca are mutually exclusive") } if a.Config.TLSCert != "" { return errors.New("flags --insecure and --tls-cert are mutually exclusive") } if a.Config.TLSKey != "" { return errors.New("flags --insecure and --tls-key are mutually exclusive") } if a.Config.TLSVersion != "" { return errors.New("flags --insecure and --tls-version are mutually exclusive") } if a.Config.TLSMaxVersion != "" { return errors.New("flags --insecure and --tls-max-version are mutually exclusive") } if a.Config.TLSMinVersion != "" { return errors.New("flags --insecure and --tls-min-version are mutually exclusive") } } return nil } func (a *App) logConfigKVs() { if a.Config.Debug { keys := a.Config.FileConfig.AllKeys() sort.Strings(keys) for _, k := range keys { if !a.Config.FileConfig.IsSet(k) { continue } v := a.Config.FileConfig.Get(k) for _, obsc := range obscuredAttrs { if strings.HasSuffix(k, obsc) { v = "***" } } a.Logger.Printf("%s='%v'(%T)", k, v, v) } } } func (a *App) PrintMsg(address string, msgName string, msg proto.Message) error { a.printLock.Lock() defer a.printLock.Unlock() if a.Config.PrintRequest { fmt.Fprint(os.Stderr, msgName) fmt.Fprintln(os.Stderr, "") } printPrefix := "" if len(a.Config.TargetsList()) > 1 && !a.Config.NoPrefix { printPrefix = fmt.Sprintf("[%s] ", address) } switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.CapabilityResponse: if len(a.Config.Format) == 0 { a.printCapResponse(printPrefix, msg) return nil } } registeredExtensions, err := utils.ParseRegisteredExtensions(a.Config.RegisteredExtensions) if err != nil { return err } mo := formatters.MarshalOptions{ Multiline: true, Indent: " ", Format: a.Config.Format, ValuesOnly: a.Config.GetValuesOnly, CalculateLatency: a.Config.CalculateLatency, ProtoFiles: a.Config.ProtoFile, ProtoDir: a.Config.ProtoDir, RegisteredExtensions: registeredExtensions, } b, err := mo.Marshal(msg, map[string]string{"source": address}) if err != nil { a.Logger.Printf("error marshaling message: %v", err) if !a.Config.Log { fmt.Printf("error marshaling message: %v", err) } return err } sb := strings.Builder{} sb.Write(b) fmt.Fprintf(a.out, "%s\n", indent(printPrefix, sb.String())) return nil } func (a *App) createCollectorDialOpts() { // append gRPC userAgent name opts := []grpc.DialOption{grpc.WithUserAgent(fmt.Sprintf("gNMIc/%s", version.Version))} // add maxMsgSize if a.Config.MaxMsgSize > 0 { opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(a.Config.MaxMsgSize))) } // Set NoProxy if !a.Config.ProxyFromEnv { opts = append(opts, grpc.WithNoProxy()) } // add gzip compressor if a.Config.Gzip { opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) } // enable metrics if a.Config.APIServer != nil && a.Config.APIServer.EnableMetrics && a.reg != nil { grpcClientMetrics := grpc_prometheus.NewClientMetrics() opts = append(opts, grpc.WithUnaryInterceptor(grpcClientMetrics.UnaryClientInterceptor()), grpc.WithStreamInterceptor(grpcClientMetrics.StreamClientInterceptor()), ) a.reg.MustRegister(grpcClientMetrics) } a.dialOpts = opts } func (a *App) watchConfig() { a.Logger.Printf("watching config...") a.Config.FileConfig.OnConfigChange(a.loadTargets) a.Config.FileConfig.WatchConfig() } func (a *App) loadTargets(e fsnotify.Event) { a.Logger.Printf("got config change notification: %v", e) ctx, cancel := context.WithCancel(a.ctx) defer cancel() err := a.sem.Acquire(ctx, 1) if err != nil { a.Logger.Printf("failed to acquire target loading semaphore: %v", err) return } defer a.sem.Release(1) switch e.Op { case fsnotify.Write, fsnotify.Create: newTargets, err := a.Config.GetTargets() if err != nil && !errors.Is(err, config.ErrNoTargetsFound) { a.Logger.Printf("failed getting targets from new config: %v", err) return } if !a.inCluster() { currentTargets := a.Targets // delete targets for n := range currentTargets { if _, ok := newTargets[n]; !ok { if a.Config.Debug { a.Logger.Printf("target %q deleted from config", n) } err = a.DeleteTarget(a.ctx, n) if err != nil { a.Logger.Printf("failed to delete target %q: %v", n, err) } } } // add targets var limiter *time.Ticker if a.Config.LocalFlags.SubscribeBackoff > 0 { limiter = time.NewTicker(a.Config.LocalFlags.SubscribeBackoff) } for n, tc := range newTargets { if _, ok := currentTargets[n]; !ok { if a.Config.Debug { a.Logger.Printf("target %q added to config", n) } a.AddTargetConfig(tc) a.wg.Add(1) go a.TargetSubscribeStream(a.ctx, tc) if limiter != nil { <-limiter.C } } } if limiter != nil { limiter.Stop() } return } // in a cluster if !a.isLeader { return } // in cluster && leader dist, err := a.getTargetToInstanceMapping(a.ctx) if err != nil { a.Logger.Printf("failed to get target to instance mapping: %v", err) return } // delete targets for t := range dist { if _, ok := newTargets[t]; !ok { err = a.deleteTarget(ctx, t) if err != nil { a.Logger.Printf("failed to delete target %q: %v", t, err) continue } } } // add new targets to cluster a.configLock.Lock() for _, tc := range newTargets { if _, ok := dist[tc.Name]; !ok { err = a.dispatchTarget(a.ctx, tc) if err != nil { a.Logger.Printf("failed to add target %q: %v", tc.Name, err) } } } a.configLock.Unlock() } } func (a *App) startAPIServer() { if a.Config.APIServer == nil { return } s, err := a.newAPIServer() if err != nil { a.Logger.Printf("failed to create a new API server: %v", err) return } go func() { var err error if s.TLSConfig != nil { err = s.ListenAndServeTLS("", "") if err != nil { a.Logger.Printf("API server err: %v", err) return } } else { err = s.ListenAndServe() if err != nil { a.Logger.Printf("API server err: %v", err) return } } }() } func (a *App) LoadProtoFiles() (desc.Descriptor, error) { if len(a.Config.ProtoFile) == 0 { return nil, nil } a.Logger.Printf("loading proto files...") descSource, err := grpcurl.DescriptorSourceFromProtoFiles(a.Config.ProtoDir, a.Config.ProtoFile...) if err != nil { a.Logger.Printf("failed to load proto files: %v", err) return nil, err } rootDesc, err := descSource.FindSymbol("Nokia.SROS.root") if err != nil { a.Logger.Printf("could not get symbol 'Nokia.SROS.root': %v", err) return nil, err } a.Logger.Printf("loaded proto files") a.rootDesc = rootDesc return rootDesc, nil } // GetTargets reads the targets configuration from flags or config file. // If enabled it will load targets from a configured tunnel server. func (a *App) GetTargets() (map[string]*types.TargetConfig, error) { targetsConfig, err := a.Config.GetTargets() if errors.Is(err, config.ErrNoTargetsFound) { if a.Config.UseTunnelServer { a.Logger.Printf("waiting %s for targets to register with the tunnel server...", a.Config.TunnelServer.TargetWaitTime) time.Sleep(a.Config.TunnelServer.TargetWaitTime) a.ttm.RLock() defer a.ttm.RUnlock() for tt := range a.tunTargets { tc := a.getTunnelTargetMatch(tt) if tc == nil { continue } err = a.Config.SetTargetConfigDefaults(tc) if err != nil { return nil, err } tc.Address = tc.Name a.AddTargetConfig(tc) } } else { return nil, fmt.Errorf("failed reading targets config: %v", err) } } else if err != nil { return nil, err } return targetsConfig, nil } func (a *App) CreateGNMIClient(ctx context.Context, t *target.Target) error { if t.Client != nil { return nil } targetDialOpts := a.dialOpts if a.Config.UseTunnelServer { targetDialOpts = append(targetDialOpts, grpc.WithContextDialer(a.tunDialerFn(ctx, t.Config)), ) t.Config.Address = t.Config.Name } a.Logger.Printf("creating gRPC client for target %q", t.Config.Name) if err := t.CreateGNMIClient(ctx, targetDialOpts...); err != nil { if errors.Is(err, context.DeadlineExceeded) { return fmt.Errorf("failed to create a gRPC client for target %q, timeout (%s) reached", t.Config.Name, t.Config.Timeout) } return fmt.Errorf("failed to create a gRPC client for target %q : %w", t.Config.Name, err) } return nil } ================================================ FILE: pkg/app/capabilities.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "fmt" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmi/proto/gnmi_ext" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/grpctunnel/tunnel" "github.com/spf13/cobra" "github.com/spf13/pflag" ) func (a *App) CapPreRunE(cmd *cobra.Command, _ []string) error { a.Config.SetLocalFlagsFromFile(cmd) a.createCollectorDialOpts() return a.initTunnelServer(tunnel.ServerConfig{ AddTargetHandler: a.tunServerAddTargetHandler, DeleteTargetHandler: a.tunServerDeleteTargetHandler, RegisterHandler: a.tunServerRegisterHandler, Handler: a.tunServerHandler, }) } func (a *App) CapRunE(cmd *cobra.Command, args []string) error { defer a.InitCapabilitiesFlags(cmd) if a.Config.Format == formatEvent { return fmt.Errorf("format event not supported for Capabilities RPC") } ctx, cancel := context.WithCancel(a.ctx) defer cancel() // targetsConfig, err := a.GetTargets() if err != nil { return err } if a.PromptMode { // prompt mode for _, tc := range targetsConfig { a.AddTargetConfig(tc) } } numTargets := len(a.Config.Targets) a.errCh = make(chan error, numTargets*2) a.wg.Add(numTargets) for _, tc := range a.Config.Targets { go a.ReqCapabilities(ctx, tc) } a.wg.Wait() return a.checkErrors() } func (a *App) ReqCapabilities(ctx context.Context, tc *types.TargetConfig) { defer a.wg.Done() ext := make([]*gnmi_ext.Extension, 0) // if a.Config.PrintRequest { err := a.PrintMsg(tc.Name, "Capabilities Request:", &gnmi.CapabilityRequest{ Extension: ext, }) if err != nil { a.logError(fmt.Errorf("target %q: %v", tc.Name, err)) } } a.Logger.Printf("sending gNMI CapabilityRequest: gnmi_ext.Extension='%v' to %s", ext, tc.Name) response, err := a.ClientCapabilities(ctx, tc, ext...) if err != nil { a.logError(fmt.Errorf("target %q, capabilities request failed: %v", tc.Name, err)) return } err = a.PrintMsg(tc.Name, "Capabilities Response:", response) if err != nil { a.logError(fmt.Errorf("target %q: %v", tc.Name, err)) } } func (a *App) InitCapabilitiesFlags(cmd *cobra.Command) { cmd.ResetFlags() cmd.Flags().BoolVarP(&a.Config.LocalFlags.CapabilitiesVersion, "version", "", false, "show gnmi version only") cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } ================================================ FILE: pkg/app/clustering.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "bytes" "context" "crypto/tls" "encoding/json" "errors" "fmt" "net" "net/http" "path/filepath" "sort" "strconv" "strings" "time" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/lockers" ) const ( defaultClusterName = "default-cluster" retryTimer = 10 * time.Second lockWaitTime = 100 * time.Millisecond apiServiceName = "gnmic-api" protocolTagName = "__protocol" maxRebalanceLoop = 100 ) var ( errNoMoreSuitableServices = errors.New("no more suitable services for this target") errNotFound = errors.New("not found") ) func (a *App) InitLocker() error { if a.Config.Clustering == nil { return nil } if a.Config.Clustering.Locker == nil { return errors.New("missing locker config under clustering key") } if lockerType, ok := a.Config.Clustering.Locker["type"]; ok { a.Logger.Printf("starting locker type %q", lockerType) if initializer, ok := lockers.Lockers[lockerType.(string)]; ok { lock := initializer() err := lock.Init(a.ctx, a.Config.Clustering.Locker, lockers.WithLogger(a.Logger)) if err != nil { return err } a.locker = lock return nil } return fmt.Errorf("unknown locker type %q", lockerType) } return errors.New("missing locker type field") } func (a *App) leaderKey() string { return fmt.Sprintf("gnmic/%s/leader", a.Config.Clustering.ClusterName) } func (a *App) inCluster() bool { if a.Config == nil { return false } return !(a.Config.Clustering == nil) } func (a *App) apiServiceRegistration() { addr, port, _ := net.SplitHostPort(a.Config.APIServer.Address) p, _ := strconv.Atoi(port) tags := make([]string, 0, 2+len(a.Config.Clustering.Tags)) tags = append(tags, fmt.Sprintf("cluster-name=%s", a.Config.Clustering.ClusterName)) tags = append(tags, fmt.Sprintf("instance-name=%s", a.Config.Clustering.InstanceName)) if a.Config.APIServer.TLS != nil { tags = append(tags, protocolTagName+"=https") } else { tags = append(tags, protocolTagName+"=http") } tags = append(tags, a.Config.Clustering.Tags...) serviceReg := &lockers.ServiceRegistration{ ID: a.Config.Clustering.InstanceName + "-api", Name: fmt.Sprintf("%s-%s", a.Config.Clustering.ClusterName, apiServiceName), Address: a.Config.Clustering.ServiceAddress, Port: p, Tags: tags, TTL: 5 * time.Second, } if serviceReg.Address == "" { serviceReg.Address = addr } var err error a.Logger.Printf("registering service %+v", serviceReg) for { select { case <-a.ctx.Done(): return default: err = a.locker.Register(a.ctx, serviceReg) if err != nil { a.Logger.Printf("api service registration failed: %v", err) time.Sleep(retryTimer) continue } return } } } func (a *App) startCluster() { if a.locker == nil || a.Config.Clustering == nil { return } // register api service go a.apiServiceRegistration() leaderKey := a.leaderKey() var err error START: // acquire leader key lock for { a.isLeader = false err = nil a.isLeader, err = a.locker.Lock(a.ctx, leaderKey, []byte(a.Config.Clustering.InstanceName)) if err != nil { a.Logger.Printf("failed to acquire leader lock: %v", err) time.Sleep(retryTimer) continue } if !a.isLeader { time.Sleep(retryTimer) continue } a.isLeader = true a.Logger.Printf("%q became the leader", a.Config.Clustering.InstanceName) break } ctx, cancel := context.WithCancel(a.ctx) defer cancel() go func() { go a.watchMembers(ctx) a.Logger.Printf("leader waiting %s before dispatching targets", a.Config.Clustering.LeaderWaitTimer) time.Sleep(a.Config.Clustering.LeaderWaitTimer) a.Logger.Printf("leader done waiting, starting loader and dispatching targets") go a.startLoader(ctx) go a.dispatchTargets(ctx) }() doneCh, errCh := a.locker.KeepLock(ctx, leaderKey) select { case <-doneCh: a.Logger.Printf("%q lost leader role", a.Config.Clustering.InstanceName) cancel() a.isLeader = false time.Sleep(retryTimer) goto START case err := <-errCh: a.Logger.Printf("%q failed to maintain the leader key: %v", a.Config.Clustering.InstanceName, err) cancel() a.isLeader = false time.Sleep(retryTimer) goto START case <-a.ctx.Done(): return } } func (a *App) watchMembers(ctx context.Context) { serviceName := fmt.Sprintf("%s-%s", a.Config.Clustering.ClusterName, apiServiceName) START: select { case <-ctx.Done(): return default: membersChan := make(chan []*lockers.Service) go func() { for { select { case <-ctx.Done(): return case srvs, ok := <-membersChan: if !ok { return } a.updateServices(srvs) } } }() err := a.locker.WatchServices(ctx, serviceName, []string{"cluster-name=" + a.Config.Clustering.ClusterName}, membersChan, a.Config.Clustering.ServicesWatchTimer) if err != nil { a.Logger.Printf("failed getting services: %v", err) time.Sleep(retryTimer) goto START } } } func (a *App) updateServices(srvs []*lockers.Service) { a.configLock.Lock() defer a.configLock.Unlock() numNewSrv := len(srvs) numCurrentSrv := len(a.apiServices) a.Logger.Printf("received service update with %d service(s)", numNewSrv) // no new services and no current services, continue if numNewSrv == 0 && numCurrentSrv == 0 { return } // no new services and having some services, delete all if numNewSrv == 0 && numCurrentSrv != 0 { a.Logger.Printf("deleting all services") a.apiServices = make(map[string]*lockers.Service) return } // no current services, add all new services if numCurrentSrv == 0 { for _, s := range srvs { a.Logger.Printf("adding service id %q", s.ID) a.apiServices[s.ID] = s } return } // newSrvs := make(map[string]*lockers.Service) for _, s := range srvs { newSrvs[s.ID] = s } // delete removed services for n := range a.apiServices { if _, ok := newSrvs[n]; !ok { a.Logger.Printf("deleting service id %q", n) delete(a.apiServices, n) } } // add new services for n, s := range newSrvs { a.Logger.Printf("adding service id %q", n) a.apiServices[n] = s } } func (a *App) dispatchTargets(ctx context.Context) { for { select { case <-ctx.Done(): return default: if len(a.apiServices) == 0 { a.Logger.Printf("no services found, waiting...") time.Sleep(a.Config.Clustering.TargetsWatchTimer) continue } a.dispatchLock.Lock() a.dispatchTargetsOnce(ctx) a.dispatchLock.Unlock() select { case <-ctx.Done(): return default: time.Sleep(a.Config.Clustering.TargetsWatchTimer) } } } } func (a *App) dispatchTargetsOnce(ctx context.Context) { dctx, cancel := context.WithTimeout(ctx, a.Config.Clustering.TargetsWatchTimer) defer cancel() for _, tc := range a.Config.Targets { err := a.dispatchTarget(dctx, tc) if err != nil { a.Logger.Printf("failed to dispatch target %q: %v", tc.Name, err) } if err == errNotFound { // no registered services, // no need to continue with other targets, // break from the targets loop break } if err == errNoMoreSuitableServices { // target has no suitable matching services, // continue to next target without wait continue } } } func (a *App) dispatchTarget(ctx context.Context, tc *types.TargetConfig, denied ...string) error { if a.Config.Debug { a.Logger.Printf("checking if %q is locked", tc.Name) } key := fmt.Sprintf("gnmic/%s/targets/%s", a.Config.Clustering.ClusterName, tc.Name) locked, err := a.locker.IsLocked(ctx, key) if err != nil { return err } if a.Config.Debug { a.Logger.Printf("target %q is locked: %v", tc.Name, locked) } if locked { return nil } a.Logger.Printf("dispatching target %q", tc.Name) if denied == nil { denied = make([]string, 0) } SELECTSERVICE: service, err := a.selectService(tc.Tags, denied...) if err != nil { return err } if service == nil { goto SELECTSERVICE } a.Logger.Printf("selected service %+v", service) // assign target to selected service err = a.assignTarget(ctx, tc, service) if err != nil { // add service to denied list and reselect a.Logger.Printf("failed assigning target %q to service %q: %v", tc.Name, service.ID, err) denied = append(denied, service.ID) goto SELECTSERVICE } // wait for lock to be acquired instanceName := "" for _, tag := range service.Tags { splitTag := strings.Split(tag, "=") if len(splitTag) == 2 && splitTag[0] == "instance-name" { instanceName = splitTag[1] } } a.Logger.Printf("[cluster-leader] waiting for lock %q to be acquired by %q", key, instanceName) retries := 0 WAIT: values, err := a.locker.List(ctx, key) if err != nil { a.Logger.Printf("failed getting value of %q: %v", key, err) time.Sleep(lockWaitTime) goto WAIT } if len(values) == 0 { retries++ if (retries+1)*int(lockWaitTime) >= int(a.Config.Clustering.TargetAssignmentTimeout) { a.Logger.Printf("[cluster-leader] max retries reached for target %q and service %q, reselecting...", tc.Name, service.ID) err = a.unassignTarget(ctx, tc.Name, service.ID) if err != nil { a.Logger.Printf("failed to unassign target %q from %q", tc.Name, service.ID) } goto SELECTSERVICE } time.Sleep(lockWaitTime) goto WAIT } if instance, ok := values[key]; ok { if instance == instanceName { a.Logger.Printf("[cluster-leader] lock %q acquired by %q", key, instanceName) return nil } } retries++ if (retries+1)*int(lockWaitTime) >= int(a.Config.Clustering.TargetAssignmentTimeout) { a.Logger.Printf("[cluster-leader] max retries reached for target %q and service %q, reselecting...", tc.Name, service.ID) err = a.unassignTarget(ctx, tc.Name, service.ID) if err != nil { a.Logger.Printf("failed to unassign target %q from %q", tc.Name, service.ID) } goto SELECTSERVICE } time.Sleep(lockWaitTime) goto WAIT } func (a *App) selectService(tags []string, denied ...string) (*lockers.Service, error) { numServices := len(a.apiServices) switch numServices { case 0: return nil, errNotFound case 1: for _, s := range a.apiServices { return s, nil } default: // select instance by tags matchingInstances := make([]string, 0) tagCount := a.getInstancesTagsMatches(tags) if len(tagCount) > 0 { matchingInstances = a.getHighestTagsMatches(tagCount) a.Logger.Printf("current instances with tags=%v: %+v", tags, matchingInstances) } else { for n := range a.apiServices { matchingInstances = append(matchingInstances, strings.TrimSuffix(n, "-api")) } } if len(matchingInstances) == 1 { return a.apiServices[fmt.Sprintf("%s-api", matchingInstances[0])], nil } // select instance by load load, err := a.getInstancesLoad(matchingInstances...) if err != nil { return nil, err } a.Logger.Printf("current instances load: %+v", load) // if there are no locks in place, return a random service if len(load) == 0 { for _, n := range matchingInstances { a.Logger.Printf("selected service name: %s", n) return a.apiServices[fmt.Sprintf("%s-api", n)], nil } } for _, d := range denied { delete(load, strings.TrimSuffix(d, "-api")) } a.Logger.Printf("current instances load after filtering: %+v", load) // all services were denied if len(load) == 0 { return nil, errNoMoreSuitableServices } ss := a.getLowLoadInstance(load) a.Logger.Printf("selected service name: %s", ss) if srv, ok := a.apiServices[fmt.Sprintf("%s-api", ss)]; ok { return srv, nil } return a.apiServices[ss], nil } return nil, errNotFound } func (a *App) getInstancesLoad(instances ...string) (map[string]int, error) { // read all current locks held by the cluster locks, err := a.locker.List(a.ctx, fmt.Sprintf("gnmic/%s/targets", a.Config.Clustering.ClusterName)) if err != nil { return nil, err } if a.Config.Debug { a.Logger.Println("current locks:", locks) } load := make(map[string]int) // using the read locks, calculate the number of targets each instance has locked for _, instance := range locks { if _, ok := load[instance]; !ok { load[instance] = 0 } load[instance]++ } // for instances that are registered but do not have any lock, // add a "0" load for _, s := range a.apiServices { instance := strings.TrimSuffix(s.ID, "-api") if _, ok := load[instance]; !ok { load[instance] = 0 } } if len(instances) > 0 { filteredLoad := make(map[string]int) for _, instance := range instances { if l, ok := load[instance]; ok { filteredLoad[instance] = l } else { filteredLoad[instance] = 0 } } return filteredLoad, nil } return load, nil } // loop through the current cluster load // find the instance with the lowest load func (a *App) getLowLoadInstance(load map[string]int) string { var ss string var low = -1 for s, l := range load { if low < 0 || l < low { ss = s low = l } } return ss } // loop through the current cluster load // find the instance(s) with the highest and lowest load func (a *App) getHighAndLowInstance(load map[string]int) (string, string) { var highIns, lowIns string var high = -1 var low = -1 for s, l := range load { if high < 0 || l > high { highIns = s high = l } if low < 0 || l < low { lowIns = s low = l } } return highIns, lowIns } func (a *App) getTargetToInstanceMapping(ctx context.Context) (map[string]string, error) { locks, err := a.locker.List(ctx, fmt.Sprintf("gnmic/%s/targets", a.Config.Clustering.ClusterName)) if err != nil { return nil, err } if a.Config.Debug { a.Logger.Println("current locks:", locks) } for k, v := range locks { delete(locks, k) locks[filepath.Base(k)] = v } return locks, nil } func (a *App) getInstanceToTargetsMapping(ctx context.Context) (map[string][]string, error) { locks, err := a.locker.List(ctx, fmt.Sprintf("gnmic/%s/targets", a.Config.Clustering.ClusterName)) if err != nil { return nil, err } if a.Config.Debug { a.Logger.Println("current locks:", locks) } rs := make(map[string][]string) for k, v := range locks { if _, ok := rs[v]; !ok { rs[v] = make([]string, 0) } rs[v] = append(rs[v], filepath.Base(k)) } for _, ls := range rs { sort.Strings(ls) } return rs, nil } func (a *App) getInstancesTagsMatches(tags []string) map[string]int { maxMatch := make(map[string]int) numTags := len(tags) if numTags == 0 { return maxMatch } for name, s := range a.apiServices { name = strings.TrimSuffix(name, "-api") maxMatch[name] = 0 for i, tag := range s.Tags { if i+1 > numTags { break } if tag == tags[i] { maxMatch[name]++ continue } break } } return maxMatch } func (a *App) getHighestTagsMatches(tagsCount map[string]int) []string { var ss = make([]string, 0) var high = -1 for s, c := range tagsCount { if high < 0 || c > high { ss = []string{strings.TrimSuffix(s, "-api")} high = c continue } if high == c { ss = append(ss, strings.TrimSuffix(s, "-api")) } } return ss } func (a *App) deleteTarget(ctx context.Context, name string) error { err := a.createAPIClient() if err != nil { return err } errs := make([]error, 0, len(a.apiServices)) for _, s := range a.apiServices { scheme := a.getServiceScheme(s) ctx, cancel := context.WithCancel(ctx) defer cancel() url := fmt.Sprintf("%s://%s/api/v1/config/targets/%s", scheme, s.Address, name) req, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil) if err != nil { a.Logger.Printf("failed to create a delete request: %v", err) errs = append(errs, err) continue } rsp, err := a.clusteringClient.Do(req) if err != nil { rsp.Body.Close() a.Logger.Printf("failed deleting target %q: %v", name, err) errs = append(errs, err) continue } rsp.Body.Close() a.Logger.Printf("received response code=%d, for DELETE %s", rsp.StatusCode, url) } if len(errs) == 0 { return nil } return fmt.Errorf("there was %d error(s) while deleting target %q", len(errs), name) } func (a *App) assignTarget(ctx context.Context, tc *types.TargetConfig, service *lockers.Service) error { // encode target config buffer := new(bytes.Buffer) err := json.NewEncoder(buffer).Encode(tc) if err != nil { return err } err = a.createAPIClient() if err != nil { return err } scheme := a.getServiceScheme(service) req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s://%s/api/v1/config/targets", scheme, service.Address), buffer) if err != nil { return err } req.Header.Set("Content-Type", "application/json") resp, err := a.clusteringClient.Do(req) if err != nil { return err } defer resp.Body.Close() a.Logger.Printf("got response code=%d for target %q config add from %q", resp.StatusCode, tc.Name, service.Address) if resp.StatusCode > 200 { return fmt.Errorf("status code=%d", resp.StatusCode) } // send target start req, err = http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s://%s/api/v1/targets/%s", scheme, service.Address, tc.Name), new(bytes.Buffer)) if err != nil { return err } resp, err = a.clusteringClient.Do(req) if err != nil { return err } defer resp.Body.Close() a.Logger.Printf("got response code=%d for target %q assignment from %q", resp.StatusCode, tc.Name, service.Address) if resp.StatusCode > 200 { return fmt.Errorf("status code=%d", resp.StatusCode) } return nil } func (a *App) unassignTarget(ctx context.Context, name string, serviceID string) error { err := a.createAPIClient() if err != nil { return err } if s, ok := a.apiServices[serviceID]; ok { scheme := a.getServiceScheme(s) url := fmt.Sprintf("%s://%s/api/v1/targets/%s", scheme, s.Address, name) ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() req, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil) if err != nil { return err } rsp, err := a.clusteringClient.Do(req) if err != nil { return err } defer rsp.Body.Close() a.Logger.Printf("received response code=%d, for DELETE %s", rsp.StatusCode, url) } return nil } func (a *App) getServiceScheme(service *lockers.Service) string { scheme := "http" for _, t := range service.Tags { if strings.HasPrefix(t, protocolTagName+"=") { scheme = strings.Split(t, "=")[1] break } } return scheme } func (a *App) createAPIClient() error { if a.clusteringClient != nil { return nil } // no certs if a.Config.Clustering.TLS == nil { a.clusteringClient = &http.Client{ Timeout: defaultHTTPClientTimeout, Transport: &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, }, }, } return nil } // with certs tlsConfig, err := utils.NewTLSConfig( a.Config.Clustering.TLS.CaFile, a.Config.Clustering.TLS.CertFile, a.Config.Clustering.TLS.KeyFile, "", a.Config.Clustering.TLS.SkipVerify, false) if err != nil { return err } a.clusteringClient = &http.Client{ Timeout: defaultHTTPClientTimeout, Transport: &http.Transport{ TLSClientConfig: tlsConfig, }, } return nil } func (a *App) clusterRebalanceTargets() error { a.dispatchLock.Lock() defer a.dispatchLock.Unlock() rebalanceCount := 0 // counts the number of iterations maxIter := -1 // stores the maximum expected number of iterations for { // get most loaded and least loaded load, err := a.getInstancesLoad() if err != nil { return err } highest, lowest := a.getHighAndLowInstance(load) lowLoad := load[lowest] highLoad := load[highest] delta := highLoad - lowLoad if maxIter < 0 { // set max number of iteration to delta/2 maxIter = delta / 2 if maxIter > maxRebalanceLoop { maxIter = maxRebalanceLoop } } a.Logger.Printf("rebalancing: high instance: %s=%d, low instance %s=%d", highest, highLoad, lowest, lowLoad) // nothing to do if delta < 2 { return nil } if rebalanceCount >= maxIter { return nil } // there is some work to do // get highest load instance targets highInstanceTargets, err := a.getInstanceTargets(a.ctx, highest) if err != nil { return err } if len(highInstanceTargets) == 0 { return nil } // pick one and move it to the lowest load instance err = a.unassignTarget(a.ctx, highInstanceTargets[0], highest+"-api") if err != nil { return err } tc, ok := a.Config.Targets[highInstanceTargets[0]] if !ok { return fmt.Errorf("could not find target %s config", highInstanceTargets[0]) } err = a.dispatchTarget(a.ctx, tc) if err != nil { return err } rebalanceCount++ } } ================================================ FILE: pkg/app/clustering_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "sort" "testing" "github.com/google/go-cmp/cmp" "github.com/openconfig/gnmic/pkg/lockers" ) var testSetGetInstancesTagsMatches = map[string]struct { a *App input []string result map[string]int }{ "test1": { a: &App{ apiServices: map[string]*lockers.Service{ "gnmic1-api": { Tags: []string{ "tag1", "tag2", "tag3", }, }, "gnmic2-api": { Tags: []string{ "tag1", "tag2", }, }, "gnmic3-api": {}, }, }, input: []string{ "tag1", "tag2", }, result: map[string]int{ "gnmic1": 2, "gnmic2": 2, "gnmic3": 0, }, }, "test2": { a: &App{ apiServices: map[string]*lockers.Service{ "gnmic1-api": { Tags: []string{ "tag1", "tag2", "tag3", }, }, "gnmic2-api": { Tags: []string{ "tag1", "tag2", }, }, "gnmic3-api": {}, }, }, input: []string{ "tag1", }, result: map[string]int{ "gnmic1": 1, "gnmic2": 1, "gnmic3": 0, }, }, "test3": { a: &App{ apiServices: map[string]*lockers.Service{ "gnmic1-api": { Tags: []string{ "tag1", "tag2", "tag3", }, }, "gnmic2-api": { Tags: []string{ "tag1", "tag2", }, }, "gnmic3-api": {}, }, }, input: []string{}, result: make(map[string]int), }, "test4": { a: &App{ apiServices: map[string]*lockers.Service{ "gnmic1-api": { Tags: []string{ "tag1", "tag2", "tag3", }, }, "gnmic2-api": { Tags: []string{ "tag1", "tag2", }, }, "gnmic3-api": {}, }, }, input: []string{ "tag2", }, result: map[string]int{ "gnmic1": 0, "gnmic2": 0, "gnmic3": 0, }, }, "test5": { a: &App{ apiServices: map[string]*lockers.Service{ "gnmic1-api": { Tags: []string{ "tag1", "tag2", "tag3", }, }, "gnmic2-api": { Tags: []string{ "tag1", "tag2", }, }, "gnmic3-api": { Tags: []string{ "tag1", }, }, }, }, input: []string{ "tag1", "tag2", "tag3", }, result: map[string]int{ "gnmic1": 3, "gnmic2": 2, "gnmic3": 1, }, }, } var testSetGetHighestTagsMatches = map[string]struct { input map[string]int result []string }{ "test1": { input: map[string]int{ "gnmic1": 2, "gnmic2": 2, "gnmic3": 0, }, result: []string{ "gnmic1", "gnmic2", }, }, "test2": { input: map[string]int{ "gnmic1": 0, "gnmic2": 0, "gnmic3": 0, }, result: []string{ "gnmic1", "gnmic2", "gnmic3", }, }, "test3": { input: map[string]int{ "gnmic1": 1, "gnmic2": 1, "gnmic3": 1, }, result: []string{ "gnmic1", "gnmic2", "gnmic3", }, }, "test4": { input: map[string]int{ "gnmic1": 0, "gnmic2": 0, "gnmic3": 0, }, result: []string{ "gnmic1", "gnmic2", "gnmic3", }, }, "test5": { input: map[string]int{ "gnmic1": 3, "gnmic2": 2, "gnmic3": 1, }, result: []string{ "gnmic1", }, }, } func TestGetInstancesTagsMatches(t *testing.T) { for name, item := range testSetGetInstancesTagsMatches { t.Run(name, func(t *testing.T) { res := item.a.getInstancesTagsMatches(item.input) t.Logf("exp value: %+v", item.result) t.Logf("got value: %+v", res) if !cmp.Equal(item.result, res) { t.Fail() } }) } } func TestGetHighestTagsMatches(t *testing.T) { a := &App{} for name, item := range testSetGetHighestTagsMatches { t.Run(name, func(t *testing.T) { res := a.getHighestTagsMatches(item.input) sort.Strings(res) t.Logf("exp value: %+v", item.result) t.Logf("got value: %+v", res) if !cmp.Equal(item.result, res) { t.Fail() } }) } } ================================================ FILE: pkg/app/const.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import "time" const ( defaultGrpcPort = "57400" msgSize = 512 * 1024 * 1024 defaultRetryTimer = 10 * time.Second defaultPprofAddr = "127.0.0.1:6060" formatJSON = "json" formatPROTOJSON = "protojson" formatPROTOTEXT = "prototext" formatEvent = "event" formatPROTO = "proto" formatFLAT = "flat" ) var encodingNames = []string{ "json", "bytes", "proto", "ascii", "json_ietf", } var formatNames = []string{ formatJSON, formatPROTOJSON, formatPROTOTEXT, formatEvent, formatPROTO, formatFLAT, } var tlsVersions = []string{"1.3", "1.2", "1.1", "1.0", "1"} ================================================ FILE: pkg/app/diff.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "errors" "fmt" "io" "os" "reflect" "sort" "strings" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/grpctunnel/tunnel" "github.com/spf13/cobra" "github.com/spf13/pflag" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/formatters" ) type targetDiffResponse struct { t string r *gnmi.GetResponse rs []proto.Message } // InitDiffFlags used to init or reset diffCmd flags for gnmic-prompt mode func (a *App) InitDiffFlags(cmd *cobra.Command) { cmd.ResetFlags() cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.DiffPath, "path", "", []string{}, "diff request paths") cmd.Flags().StringVarP(&a.Config.LocalFlags.DiffRef, "ref", "", "", "reference gNMI target to compare the other targets to") cmd.MarkFlagRequired("ref") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.DiffCompare, "compare", "", []string{}, "gNMI targets to compare to the reference") cmd.MarkFlagRequired("compare") cmd.Flags().StringVarP(&a.Config.LocalFlags.DiffPrefix, "prefix", "", "", "diff request prefix") cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.DiffModel, "model", "", []string{}, "diff request models") cmd.Flags().StringVarP(&a.Config.LocalFlags.DiffType, "type", "t", "ALL", "data type requested from the target. one of: ALL, CONFIG, STATE, OPERATIONAL") cmd.Flags().StringVarP(&a.Config.LocalFlags.DiffTarget, "target", "", "", "get request target") cmd.Flags().BoolVarP(&a.Config.LocalFlags.DiffSub, "sub", "", false, "use subscribe ONCE mode instead of a get request") cmd.Flags().Uint32VarP(&a.Config.LocalFlags.DiffQos, "qos", "", 0, "QoS marking in case subscribe RPC is used") cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } func (a *App) DiffPreRunE(cmd *cobra.Command, args []string) error { a.Config.SetLocalFlagsFromFile(cmd) if len(a.Config.LocalFlags.DiffPath) == 0 { a.Config.LocalFlags.DiffPath = []string{"/"} } a.Config.LocalFlags.DiffPath = config.SanitizeArrayFlagValue(a.Config.LocalFlags.DiffPath) a.Config.LocalFlags.DiffModel = config.SanitizeArrayFlagValue(a.Config.LocalFlags.DiffModel) a.Config.LocalFlags.DiffCompare = config.SanitizeArrayFlagValue(a.Config.LocalFlags.DiffCompare) a.createCollectorDialOpts() return a.initTunnelServer(tunnel.ServerConfig{ AddTargetHandler: a.tunServerAddTargetHandler, DeleteTargetHandler: a.tunServerDeleteTargetHandler, RegisterHandler: a.tunServerRegisterHandler, Handler: a.tunServerHandler, }) } func (a *App) DiffRunE(cmd *cobra.Command, args []string) error { defer a.InitDiffFlags(cmd) ctx, cancel := context.WithCancel(context.Background()) defer cancel() // setupCloseHandler(cancel) refTarget, targetsConfig, err := a.Config.GetDiffTargets() if err != nil { return fmt.Errorf("failed getting diff targets config: %v", err) } if refTarget == nil { return fmt.Errorf("failed getting diff reference target config") } if len(targetsConfig) == 0 { return fmt.Errorf("failed getting diff compare targets config") } if !a.PromptMode { // cfg := &collector.Config{ // Debug: a.Config.Debug, // Format: a.Config.Format, // TargetReceiveBuffer: a.Config.TargetBufferSize, // RetryTimer: a.Config.Retry, // } // allTargets := make(map[string]*types.TargetConfig) // for n, tc := range targetsConfig { // allTargets[n] = tc // } // allTargets[refTarget.Name] = refTarget // a.collector = collector.New(cfg, allTargets, // collector.WithDialOptions(a.createCollectorDialOpts()), // collector.WithLogger(a.Logger), // ) } else { // prompt mode a.AddTargetConfig(refTarget) for _, tc := range targetsConfig { a.AddTargetConfig(tc) } } numTargets := len(targetsConfig) + 1 a.errCh = make(chan error, numTargets*2) a.wg.Add(numTargets) compares := make([]*types.TargetConfig, 0, len(targetsConfig)) for _, t := range targetsConfig { compares = append(compares, t) } sort.Slice(compares, func(i, j int) bool { return compares[i].Name < compares[j].Name }) err = a.diff(ctx, cmd, refTarget, compares) if err != nil { a.logError(err) } return a.checkErrors() } func (a *App) diff(ctx context.Context, cmd *cobra.Command, ref *types.TargetConfig, compare []*types.TargetConfig) error { if a.Config.DiffSub { return a.subscribeBasedDiff(ctx, cmd, ref, compare) } return a.getBasedDiff(ctx, ref, compare) } func (a *App) subscribeBasedDiff(ctx context.Context, cmd *cobra.Command, ref *types.TargetConfig, compare []*types.TargetConfig) error { subReq, err := a.Config.CreateDiffSubscribeRequest(cmd) if err != nil { if errors.Is(errors.Unwrap(err), config.ErrConfig) { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } } numCompares := len(compare) refResponse := make([]proto.Message, 0) rspChan := make(chan *targetDiffResponse, numCompares) a.operLock.Lock() refTarget, err := a.initTarget(ref) a.operLock.Unlock() if err != nil { return err } ctx, cancel := context.WithCancel(ctx) defer cancel() go func() { defer a.wg.Done() err = refTarget.CreateGNMIClient(ctx, a.dialOpts...) if err != nil { a.logError(err) return } a.Logger.Printf("sending gNMI SubscribeRequest: subscribe='%+v', mode='%+v', encoding='%+v', to %s", subReq.Request, subReq.GetSubscribe().GetMode(), subReq.GetSubscribe().GetEncoding(), ref) rspChan, errChan := refTarget.SubscribeOnceChan(ctx, subReq) for { select { case r := <-rspChan: switch r.Response.(type) { case *gnmi.SubscribeResponse_Update: refResponse = append(refResponse, r) case *gnmi.SubscribeResponse_SyncResponse: return } case err := <-errChan: if err != io.EOF { a.logError(err) } return } } }() for _, tc := range compare { a.operLock.Lock() t, err := a.initTarget(tc) a.operLock.Unlock() if err != nil { return err } go func(tName string) { defer a.wg.Done() err = t.CreateGNMIClient(ctx, a.dialOpts...) if err != nil { a.logError(err) return } responses := make([]proto.Message, 0) a.Logger.Printf("sending gNMI SubscribeRequest: subscribe='%+v', mode='%+v', encoding='%+v', to %s", subReq.Request, subReq.GetSubscribe().GetMode(), subReq.GetSubscribe().GetEncoding(), tName) subRspChan, errChan := t.SubscribeOnceChan(ctx, subReq) for { select { case r := <-subRspChan: switch r.Response.(type) { case *gnmi.SubscribeResponse_Update: responses = append(responses, r) case *gnmi.SubscribeResponse_SyncResponse: rspChan <- &targetDiffResponse{ t: tName, rs: responses, } return } case err := <-errChan: if err == io.EOF { rspChan <- &targetDiffResponse{ t: tName, rs: responses, } return } a.logError(err) return } } }(tc.Name) continue } a.wg.Wait() close(rspChan) rsps := make([]*targetDiffResponse, 0, numCompares) for r := range rspChan { rsps = append(rsps, r) } if len(rsps) == 0 { a.Logger.Printf("missing response(s)") return fmt.Errorf("missing response(s)") } for _, cr := range rsps { fmt.Fprintf(os.Stderr, "%q vs %q\n", ref.Name, cr.t) err = a.responsesDiff(refResponse, cr.rs) if err != nil { a.logError(err) } } return nil } func (a *App) getBasedDiff(ctx context.Context, ref *types.TargetConfig, compare []*types.TargetConfig) error { getReq, err := a.Config.CreateDiffGetRequest() if err != nil { return err } ctx, cancel := context.WithCancel(ctx) defer cancel() var refResponse proto.Message numCompares := len(compare) go func() { defer a.wg.Done() a.Logger.Printf("sending gNMI GetRequest: prefix='%v', path='%v', type='%v', encoding='%v', models='%+v', extension='%+v' to %s", getReq.Prefix, getReq.Path, getReq.Type, getReq.Encoding, getReq.UseModels, getReq.Extension, ref) refResponse, err = a.ClientGet(ctx, ref, getReq) if err != nil { a.logError(fmt.Errorf("target %q get request failed: %v", ref, err)) return } }() rspChan := make(chan *targetDiffResponse, numCompares) for _, tc := range compare { go func(tc *types.TargetConfig) { defer a.wg.Done() a.Logger.Printf("sending gNMI GetRequest: prefix='%v', path='%v', type='%v', encoding='%v', models='%+v', extension='%+v' to %s", getReq.Prefix, getReq.Path, getReq.Type, getReq.Encoding, getReq.UseModels, getReq.Extension, tc.Name) response, err := a.ClientGet(ctx, tc, getReq) if err != nil { a.logError(fmt.Errorf("target %q get request failed: %v", tc.Name, err)) return } rspChan <- &targetDiffResponse{ t: tc.Name, r: response, } }(tc) } a.wg.Wait() close(rspChan) rsps := make([]*targetDiffResponse, 0, numCompares) for r := range rspChan { rsps = append(rsps, r) } if len(rsps) == 0 { return fmt.Errorf("no responses received") } sort.Slice(rsps, func(i, j int) bool { return rsps[i].t < rsps[j].t }) for _, cr := range rsps { fmt.Fprintf(os.Stderr, "%q vs %q\n", ref.Name, cr.t) err = a.responsesDiff([]proto.Message{refResponse}, []proto.Message{cr.r}) if err != nil { a.logError(err) } } return nil } func (a *App) responsesDiff(r1, r2 []proto.Message) error { rs1, err := formatters.ResponsesFlat(r1...) if err != nil { return err } rs2, err := formatters.ResponsesFlat(r2...) if err != nil { return err } var df diffs for p, v := range rs1 { if v2, ok := rs2[p]; ok { if !reflect.DeepEqual(v, v2) { df = append(df, diff{add: false, path: p, value: fmt.Sprintf("%v", v)}) df = append(df, diff{add: true, path: p, value: fmt.Sprintf("%v", v2)}) } delete(rs2, p) continue } df = append(df, diff{add: false, path: p, value: fmt.Sprintf("%v", v)}) continue } for p, v := range rs2 { df = append(df, diff{add: true, path: p, value: fmt.Sprintf("%v", v)}) } sort.Slice(df, func(i, j int) bool { return df[i].path < df[j].path }) fmt.Println(df) return nil } type diff struct { add bool path string value string } type diffs []diff func (ds diffs) String() string { ml := 0 for _, d := range ds { lp := len(d.path) if lp > ml { ml = lp } } tpl := fmt.Sprintf("%%-%ds", ml) sb := new(strings.Builder) numDiffs := len(ds) for i, d := range ds { if d.add { sb.WriteString("+\t") } else { sb.WriteString("-\t") } sb.WriteString(fmt.Sprintf(tpl, d.path)) sb.WriteString(": ") sb.WriteString(d.value) if i < numDiffs-1 { sb.WriteString("\n") } } return sb.String() } ================================================ FILE: pkg/app/generate.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "encoding/json" "errors" "fmt" "os" "path/filepath" "regexp" "sort" "strings" "github.com/huandu/xstrings" "github.com/openconfig/goyang/pkg/yang" "github.com/spf13/cobra" "github.com/spf13/pflag" "gopkg.in/yaml.v2" "github.com/openconfig/gnmic/pkg/api/path" "github.com/openconfig/gnmic/pkg/config" ) // options for formatting keys when generating yaml/json payloads type keyOpts struct { camelCase bool snakeCase bool } func (ko *keyOpts) format(s string) string { if ko.camelCase { return xstrings.ToCamelCase(s) } if ko.snakeCase { return xstrings.ToSnakeCase(s) } return s } func (a *App) GenerateRunE(cmd *cobra.Command, args []string) error { defer a.InitGenerateFlags(cmd) var output = os.Stdout if a.Config.GenerateOutput != "" { f, err := os.OpenFile(a.Config.GenerateOutput, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return err } defer f.Close() output = f } err := a.generateYangSchema(a.Config.GlobalFlags.File, a.Config.GlobalFlags.Exclude) if err != nil { return err } m := make(map[string]interface{}) kOpts := &keyOpts{ camelCase: a.Config.LocalFlags.GenerateCamelCase, snakeCase: a.Config.LocalFlags.GenerateSnakeCase, } for _, e := range a.SchemaTree.Dir { e.FixChoice() nm := toMap(e, a.Config.GenerateConfigOnly, kOpts) if nm == nil { continue } switch nm := nm.(type) { case map[string]interface{}: for k, v := range nm { m[kOpts.format(k)] = v } case []interface{}, string: m[kOpts.format(e.Name)] = nm } } v, err := getSubMapByPath(a.Config.GeneratePath, m, kOpts) if err != nil { return err } if output != os.Stdout { err = output.Truncate(0) if err != nil { return err } } if a.Config.GenerateJSON { enc := json.NewEncoder(output) enc.SetIndent("", " ") return enc.Encode(v) } return yaml.NewEncoder(output).Encode(v) } func (a *App) GeneratePreRunE(cmd *cobra.Command, args []string) error { a.Config.SetLocalFlagsFromFile(cmd) if a.Config.LocalFlags.GenerateCamelCase && a.Config.LocalFlags.GenerateSnakeCase { return errors.New("flags --camel-case and --snake-case are mutually exclusive") } return a.yangFilesPreProcessing() } func (a *App) yangFilesPreProcessing() error { a.Config.GlobalFlags.Dir = config.SanitizeArrayFlagValue(a.Config.GlobalFlags.Dir) a.Config.GlobalFlags.File = config.SanitizeArrayFlagValue(a.Config.GlobalFlags.File) a.Config.GlobalFlags.Exclude = config.SanitizeArrayFlagValue(a.Config.GlobalFlags.Exclude) var err error a.Config.GlobalFlags.Dir, err = resolveGlobs(a.Config.GlobalFlags.Dir) if err != nil { return err } a.Config.GlobalFlags.File, err = resolveGlobs(a.Config.GlobalFlags.File) if err != nil { return err } a.modules = yang.NewModules() for _, dirpath := range a.Config.GlobalFlags.Dir { expanded, err := yang.PathsWithModules(dirpath) if err != nil { return err } if a.Config.Debug { for _, fdir := range expanded { a.Logger.Printf("adding %s to YANG paths", fdir) } } a.modules.AddPath(expanded...) } yfiles, err := findYangFiles(a.Config.GlobalFlags.File) if err != nil { return err } a.Config.GlobalFlags.File = make([]string, 0, len(yfiles)) a.Config.GlobalFlags.File = append(a.Config.GlobalFlags.File, yfiles...) if a.Config.Debug { for _, file := range a.Config.GlobalFlags.File { a.Logger.Printf("loading %s file", file) } } return nil } func (a *App) GenerateSetRequestRunE(cmd *cobra.Command, args []string) error { defer a.InitGenerateSetRequestFlags(cmd) var output = os.Stdout if a.Config.GenerateOutput != "" { f, err := os.OpenFile(a.Config.GenerateOutput, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return err } defer f.Close() output = f } err := a.generateYangSchema(a.Config.GlobalFlags.File, a.Config.GlobalFlags.Exclude) if err != nil { return err } m := make(map[string]interface{}) for _, e := range a.SchemaTree.Dir { e.FixChoice() nm := toMap(e, true, new(keyOpts)) if nm == nil { continue } switch nm := nm.(type) { case map[string]interface{}: for k, v := range nm { m[k] = v } default: m[e.Name] = nm } } setReqFile, err := a.createSetRequestFile(m) if err != nil { return err } if output != os.Stdout { err = output.Truncate(0) if err != nil { return err } } if a.Config.GenerateJSON { enc := json.NewEncoder(output) enc.SetIndent("", " ") return enc.Encode(setReqFile) } return yaml.NewEncoder(output).Encode(setReqFile) } func (a *App) InitGenerateFlags(cmd *cobra.Command) { cmd.ResetFlags() // persistent flags cmd.PersistentFlags().StringVarP(&a.Config.LocalFlags.GenerateOutput, "output", "o", "", "output file, defaults to stdout") cmd.PersistentFlags().BoolVarP(&a.Config.LocalFlags.GenerateJSON, "json", "j", false, "generate output as JSON format instead of YAML") // local flags cmd.Flags().BoolVarP(&a.Config.LocalFlags.GenerateConfigOnly, "config-only", "", false, "generate output from YANG config nodes only") cmd.Flags().StringVarP(&a.Config.LocalFlags.GeneratePath, "path", "", "", "generate marshaled YANG body under specified path") cmd.Flags().BoolVarP(&a.Config.LocalFlags.GenerateCamelCase, "camel-case", "", false, "convert keys to camelCase") cmd.Flags().BoolVarP(&a.Config.LocalFlags.GenerateSnakeCase, "snake-case", "", false, "convert keys to snake_case") cmd.Flags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } func (a *App) InitGenerateSetRequestFlags(cmd *cobra.Command) { cmd.ResetFlags() cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.GenerateSetRequestReplacePath, "replace", "", []string{}, "replace path") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.GenerateSetRequestUpdatePath, "update", "", []string{}, "update path") cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } func (a *App) generateYangSchema(files, excludes []string) error { if len(files) == 0 { return nil } for _, name := range files { if err := a.modules.Read(name); err != nil { return err } } if errors := a.modules.Process(); len(errors) > 0 { for _, e := range errors { fmt.Fprintf(os.Stderr, "yang processing error: %v\n", e) } return fmt.Errorf("yang processing failed with %d errors", len(errors)) } // Keep track of the top level modules we read in. // Those are the only modules we want to print below. mods := map[string]*yang.Module{} var names []string for _, m := range a.modules.Modules { if mods[m.Name] == nil { mods[m.Name] = m names = append(names, m.Name) } } sort.Strings(names) entries := make([]*yang.Entry, len(names)) for x, n := range names { entries[x] = yang.ToEntry(mods[n]) } a.SchemaTree = buildRootEntry() excludeRegexes := make([]*regexp.Regexp, 0, len(excludes)) for _, e := range excludes { r, err := regexp.Compile(e) if err != nil { return err } excludeRegexes = append(excludeRegexes, r) } for _, entry := range entries { skip := false for _, r := range excludeRegexes { if r.MatchString(entry.Name) { a.Logger.Printf("skipping %s", entry.Name) skip = true break } } if !skip { updateAnnotation(entry) a.SchemaTree.Dir[entry.Name] = entry } } return nil } func (a *App) createSetRequestFile(m map[string]interface{}) (*config.SetRequestFile, error) { setReqFile := &config.SetRequestFile{ Replaces: make([]*config.UpdateItem, 0, len(a.Config.GenerateSetRequestReplacePath)), Updates: make([]*config.UpdateItem, 0, len(a.Config.GenerateSetRequestUpdatePath)), } var enc string if strings.ToUpper(a.Config.Encoding) != "JSON" { enc = strings.ToUpper(a.Config.Encoding) } if len(a.Config.GenerateSetRequestReplacePath)+len(a.Config.GenerateSetRequestUpdatePath) == 0 { sortedKeys := make([]string, 0, len(m)) for k := range m { sortedKeys = append(sortedKeys, k) } sort.Strings(sortedKeys) for _, n := range sortedKeys { setReqFile.Replaces = append(setReqFile.Replaces, &config.UpdateItem{ Path: fmt.Sprintf("/%s", n), Encoding: enc, Value: m[n], }) } return setReqFile, nil } for _, p := range a.Config.GenerateSetRequestReplacePath { uItem, err := pathToUpdateItem(p, m, new(keyOpts)) if err != nil { return nil, err } uItem.Encoding = enc setReqFile.Replaces = append(setReqFile.Replaces, uItem) } for _, p := range a.Config.GenerateSetRequestUpdatePath { uItem, err := pathToUpdateItem(p, m, new(keyOpts)) if err != nil { return nil, err } uItem.Encoding = enc setReqFile.Updates = append(setReqFile.Updates, uItem) } return setReqFile, nil } func buildRootEntry() *yang.Entry { return &yang.Entry{ Name: "root", Kind: yang.DirectoryEntry, Dir: make(map[string]*yang.Entry), Annotation: map[string]interface{}{ "schemapath": "/", "root": true, }, } } // updateAnnotation updates the schema info before encoding. func updateAnnotation(entry *yang.Entry) { for _, child := range entry.Dir { updateAnnotation(child) child.Annotation = map[string]interface{}{} t := child.Type if t == nil { continue } switch t.Kind { case yang.Ybits: nameMap := t.Bit.NameMap() bits := make([]string, 0, len(nameMap)) for bitstr := range nameMap { bits = append(bits, bitstr) } child.Annotation["bits"] = bits case yang.Yenum: nameMap := t.Enum.NameMap() enum := make([]string, 0, len(nameMap)) for enumstr := range nameMap { enum = append(enum, enumstr) } child.Annotation["enum"] = enum case yang.Yidentityref: identities := make([]string, 0, len(t.IdentityBase.Values)) for i := range t.IdentityBase.Values { identities = append(identities, t.IdentityBase.Values[i].PrefixedName()) } child.Annotation["prefix-qualified-identities"] = identities } if t.Root != nil { child.Annotation["root.type"] = t.Root.Name } } } func toMap(e *yang.Entry, configOnly bool, kopts *keyOpts) interface{} { if e == nil { return nil } if e.Config == yang.TSFalse && configOnly { return nil } m := make(map[string]interface{}) switch { case e.Dir == nil && e.ListAttr != nil: // leaf-list if e.Config == yang.TSFalse && configOnly { return nil } return e.Default case e.Dir == nil: // leaf if e.Config == yang.TSFalse && configOnly { return nil } if len(e.Default) > 0 { return e.Default[0] } return "" case e.ListAttr != nil: // list for n, child := range e.Dir { gChild := toMap(child, configOnly, kopts) switch gChild := gChild.(type) { case map[string]interface{}: for k, v := range gChild { m[kopts.format(k)] = v } case []interface{}, []string, string: m[kopts.format(n)] = gChild } } return []interface{}{m} default: // container nm := make(map[string]interface{}) for n, child := range e.Dir { if child.IsCase() || child.IsChoice() { for _, gchild := range child.Dir { nnm := toMap(gchild, configOnly, kopts) switch nnm := nnm.(type) { case map[string]interface{}: if child.IsChoice() { for k, v := range nnm { nm[kopts.format(k)] = v } } case nil: default: nm[kopts.format(n)] = nnm } } continue } nnm := toMap(child, configOnly, kopts) if nnm == nil { continue } nm[kopts.format(n)] = nnm } if e.Parent != nil && e.Parent.IsList() && !(e.IsCase() || e.IsChoice()) { m[kopts.format(e.Name)] = nm return m } for k, v := range nm { m[kopts.format(k)] = v } return m } } func pathToUpdateItem(p string, m map[string]interface{}, kopts *keyOpts) (*config.UpdateItem, error) { v, err := getSubMapByPath(p, m, kopts) return &config.UpdateItem{ Path: p, Value: v, }, err } func getSubMapByPath(p string, m map[string]interface{}, kopts *keyOpts) (interface{}, error) { if p == "" || p == "/" { return m, nil } // strip path from keys if any gp, err := path.ParsePath(p) if err != nil { return nil, fmt.Errorf("failed to parse xpath %q: %v", p, err) } pItems := make([]string, 0, len(gp.Elem)) for _, e := range gp.Elem { if e.Name != "" { pItems = append(pItems, kopts.format(e.Name)) } } // get value body recursively from map var rVal interface{} rVal = m for _, item := range pItems { switch rValm := rVal.(type) { case map[string]interface{}: if r, ok := rValm[item]; ok { rVal = r } else { return nil, fmt.Errorf("unknown path item %q in path %q", item, p) } case []interface{}: if len(rValm) != 1 { return nil, fmt.Errorf("got list with more than 1 item ?") } switch rValmn := rValm[0].(type) { case map[string]interface{}: if r, ok := rValmn[item]; ok { rVal = r } else { return nil, fmt.Errorf("unknown path item %q in path %q", item, p) } } default: return nil, fmt.Errorf("unexpected sub map format @%q: %T", item, rVal) } } return rVal, nil } ////// func resolveGlobs(globs []string) ([]string, error) { results := make([]string, 0, len(globs)) for _, pattern := range globs { for _, p := range strings.Split(pattern, ",") { if strings.ContainsAny(p, `*?[`) { // is a glob pattern matches, err := filepath.Glob(p) if err != nil { return nil, err } results = append(results, matches...) } else { // is not a glob pattern ( file or dir ) results = append(results, p) } } } return config.ExpandOSPaths(results) } func walkDir(path, ext string) ([]string, error) { fs := make([]string, 0) err := filepath.Walk(path, func(path string, _ os.FileInfo, err error) error { if err != nil { return err } fi, err := os.Stat(path) if err != nil { return err } switch mode := fi.Mode(); { case mode.IsRegular(): if filepath.Ext(path) == ext { fs = append(fs, path) } } return nil }) if err != nil { return nil, err } return fs, nil } func findYangFiles(files []string) ([]string, error) { yfiles := make([]string, 0, len(files)) for _, file := range files { fi, err := os.Stat(file) if err != nil { return nil, err } switch mode := fi.Mode(); { case mode.IsDir(): fls, err := walkDir(file, ".yang") if err != nil { return nil, err } yfiles = append(yfiles, fls...) case mode.IsRegular(): if filepath.Ext(file) == ".yang" { yfiles = append(yfiles, file) } } } return yfiles, nil } ================================================ FILE: pkg/app/generatePath.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "errors" "fmt" "github.com/spf13/cobra" "github.com/spf13/pflag" ) func (a *App) GeneratePathPreRunE(cmd *cobra.Command, args []string) error { a.Config.SetLocalFlagsFromFile(cmd) if a.Config.GeneratePathSearch && a.Config.GeneratePathWithDescr { return errors.New("flags --search and --descr cannot be used together") } if a.Config.LocalFlags.GeneratePathPathType != "xpath" && a.Config.LocalFlags.GeneratePathPathType != "gnmi" { return errors.New("path-type must be one of 'xpath' or 'gnmi'") } return nil } func (a *App) GeneratePathRunE(cmd *cobra.Command, args []string) error { return a.PathCmdRun( a.Config.GlobalFlags.Dir, a.Config.GlobalFlags.File, a.Config.GlobalFlags.Exclude, pathGenOpts{ search: a.Config.LocalFlags.GeneratePathSearch, withDescr: a.Config.LocalFlags.GeneratePathWithDescr, withTypes: a.Config.LocalFlags.GeneratePathWithTypes, withPrefix: a.Config.LocalFlags.GeneratePathWithPrefix, pathType: a.Config.LocalFlags.GeneratePathPathType, stateOnly: a.Config.LocalFlags.GeneratePathState, configOnly: a.Config.LocalFlags.GeneratePathConfig, json: a.Config.LocalFlags.GenerateJSON, withNonLeaves: a.Config.LocalFlags.GeneratePathWithNonLeaves, }, ) } func (a *App) InitGeneratePathFlags(cmd *cobra.Command) { cmd.ResetFlags() cmd.Flags().StringVarP(&a.Config.LocalFlags.GeneratePathPathType, "path-type", "", "xpath", "path type xpath or gnmi") cmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathWithDescr, "descr", "", false, "print leaf description") cmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathWithPrefix, "with-prefix", "", false, "include module/submodule prefix in path elements") cmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathWithTypes, "types", "", false, "print leaf type") cmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathSearch, "search", "", false, "search through path list") cmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathState, "state-only", "", false, "generate paths only for YANG leafs representing state data") cmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathConfig, "config-only", "", false, "generate paths only for YANG leafs representing config data") cmd.Flags().BoolVarP(&a.Config.LocalFlags.GeneratePathWithNonLeaves, "with-non-leaves", "", false, "also generate paths for non-leaf nodes") cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } ================================================ FILE: pkg/app/get.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "encoding/json" "fmt" "strings" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/grpctunnel/tunnel" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/formatters" ) func (a *App) GetPreRunE(cmd *cobra.Command, args []string) error { a.Config.SetLocalFlagsFromFile(cmd) a.Config.LocalFlags.GetPath = config.SanitizeArrayFlagValue(a.Config.LocalFlags.GetPath) a.Config.LocalFlags.GetModel = config.SanitizeArrayFlagValue(a.Config.LocalFlags.GetModel) a.Config.LocalFlags.GetProcessor = config.SanitizeArrayFlagValue(a.Config.LocalFlags.GetProcessor) err := a.initPluginManager() if err != nil { return err } a.createCollectorDialOpts() return a.initTunnelServer(tunnel.ServerConfig{ AddTargetHandler: a.tunServerAddTargetHandler, DeleteTargetHandler: a.tunServerDeleteTargetHandler, RegisterHandler: a.tunServerRegisterHandler, Handler: a.tunServerHandler, }) } func (a *App) GetRun(cmd *cobra.Command, args []string) error { defer a.InitGetFlags(cmd) ctx, cancel := context.WithCancel(context.Background()) defer cancel() // setupCloseHandler(cancel) targetsConfig, err := a.GetTargets() if err != nil { return fmt.Errorf("failed getting targets config: %v", err) } _, err = a.Config.GetActions() if err != nil { return fmt.Errorf("failed reading actions config: %v", err) } evps, err := a.intializeEventProcessors() if err != nil { return fmt.Errorf("failed to init event processors: %v", err) } if a.PromptMode { // prompt mode for _, tc := range targetsConfig { a.AddTargetConfig(tc) } } // event format if len(a.Config.GetProcessor) > 0 { a.Config.Format = formatEvent } if a.Config.Format == formatEvent { return a.handleGetRequestEvent(ctx, evps) } // other formats numTargets := len(a.Config.Targets) a.errCh = make(chan error, numTargets*3) a.wg.Add(numTargets) for _, tc := range a.Config.Targets { go a.GetRequest(ctx, tc) } a.wg.Wait() err = a.checkErrors() if err != nil { return err } return nil } func (a *App) GetRequest(ctx context.Context, tc *types.TargetConfig) { defer a.wg.Done() req, err := a.Config.CreateGetRequest(tc) if err != nil { a.logError(fmt.Errorf("target %q building Get request failed: %v", tc.Name, err)) return } response, err := a.getRequest(ctx, tc, req) if err != nil { a.logError(fmt.Errorf("target %q Get request failed: %v", tc.Name, err)) return } if response == nil { return } err = a.PrintMsg(tc.Name, "Get Response:", response) if err != nil { a.logError(fmt.Errorf("target %q: %v", tc.Name, err)) } } func (a *App) getRequest(ctx context.Context, tc *types.TargetConfig, req *gnmi.GetRequest) (*gnmi.GetResponse, error) { xreq := req if len(a.Config.LocalFlags.GetModel) > 0 { spModels, unspModels, err := a.filterModels(ctx, tc, a.Config.LocalFlags.GetModel) if err != nil { a.logError(fmt.Errorf("failed getting supported models from %q: %v", tc.Name, err)) return nil, err } if len(unspModels) > 0 { a.logError(fmt.Errorf("found unsupported models for target %q: %+v", tc.Name, unspModels)) } for _, m := range spModels { xreq.UseModels = append(xreq.UseModels, m) } } if a.Config.PrintRequest || a.Config.GetDryRun { err := a.PrintMsg(tc.Name, "Get Request:", req) if err != nil { a.logError(fmt.Errorf("target %q Get Request printing failed: %v", tc.Name, err)) } } if a.Config.GetDryRun { return nil, nil } a.Logger.Printf("sending gNMI GetRequest: prefix='%v', path='%v', type='%v', encoding='%v', models='%+v', extension='%+v' to %s", xreq.Prefix, xreq.Path, xreq.Type, xreq.Encoding, xreq.UseModels, xreq.Extension, tc.Name) response, err := a.ClientGet(ctx, tc, xreq) if err != nil { return nil, err } return response, nil } func (a *App) getModels(ctx context.Context, tc *types.TargetConfig) ([]*gnmi.ModelData, error) { capRsp, err := a.ClientCapabilities(ctx, tc) if err != nil { return nil, err } return capRsp.GetSupportedModels(), nil } func (a *App) filterModels(ctx context.Context, tc *types.TargetConfig, modelsNames []string) (map[string]*gnmi.ModelData, []string, error) { supModels, err := a.getModels(ctx, tc) if err != nil { return nil, nil, err } unsupportedModels := make([]string, 0) supportedModels := make(map[string]*gnmi.ModelData) var found bool for _, m := range modelsNames { found = false modelName := m var organization *string var version *string if strings.Contains(modelName, "/") { parts := strings.SplitN(modelName, "/", 2) organization = &parts[0] modelName = parts[1] } if strings.Contains(modelName, ":") { parts := strings.SplitN(modelName, ":", 2) modelName = parts[0] version = &parts[1] } for _, tModel := range supModels { if modelName == tModel.Name && (organization == nil || *organization == tModel.Organization) && (version == nil || *version == tModel.Version) { supportedModels[m] = tModel found = true break } } if !found { unsupportedModels = append(unsupportedModels, m) } } return supportedModels, unsupportedModels, nil } // InitGetFlags used to init or reset getCmd flags for gnmic-prompt mode func (a *App) InitGetFlags(cmd *cobra.Command) { cmd.ResetFlags() cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.GetPath, "path", "", []string{}, "get request paths") cmd.MarkFlagRequired("path") cmd.Flags().StringVarP(&a.Config.LocalFlags.GetPrefix, "prefix", "", "", "get request prefix") cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.GetModel, "model", "", []string{}, "get request models") cmd.Flags().StringVarP(&a.Config.LocalFlags.GetType, "type", "t", "ALL", "data type requested from the target. one of: ALL, CONFIG, STATE, OPERATIONAL") cmd.Flags().StringVarP(&a.Config.LocalFlags.GetTarget, "target", "", "", "get request target") cmd.Flags().BoolVarP(&a.Config.LocalFlags.GetValuesOnly, "values-only", "", false, "print GetResponse values only") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.GetProcessor, "processor", "", []string{}, "list of processor names to run") cmd.Flags().Uint32VarP(&a.Config.LocalFlags.GetDepth, "depth", "", 0, "depth extension value") cmd.Flags().BoolVarP(&a.Config.LocalFlags.GetDryRun, "dry-run", "", false, "prints the get request without initiating a gRPC connection") cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } func (a *App) intializeEventProcessors() ([]formatters.EventProcessor, error) { _, err := a.Config.GetEventProcessors() if err != nil { return nil, fmt.Errorf("failed reading event processors config: %v", err) } var evps = make([]formatters.EventProcessor, 0) for _, epName := range a.Config.GetProcessor { if epCfg, ok := a.Config.Processors[epName]; ok { epType := "" for k := range epCfg { epType = k break } if in, ok := formatters.EventProcessors[epType]; ok { ep := in() err := ep.Init(epCfg[epType], formatters.WithLogger(a.Logger), formatters.WithTargets(a.Config.Targets), formatters.WithActions(a.Config.Actions), ) if err != nil { return nil, fmt.Errorf("failed initializing event processor '%s' of type='%s': %v", epName, epType, err) } evps = append(evps, ep) continue } return nil, fmt.Errorf("%q event processor has an unknown type=%q", epName, epType) } return nil, fmt.Errorf("%q event processor not found", epName) } return evps, nil } func (a *App) handleGetRequestEvent(ctx context.Context, evps []formatters.EventProcessor) error { numTargets := len(a.Config.Targets) a.errCh = make(chan error, numTargets*3) a.wg.Add(numTargets) rsps := make(chan *getResponseEvents, numTargets) for _, tc := range a.Config.Targets { go func(tc *types.TargetConfig) { defer a.wg.Done() req, err := a.Config.CreateGetRequest(tc) if err != nil { a.errCh <- err return } resp, err := a.getRequest(ctx, tc, req) if err != nil { a.errCh <- err return } evs, err := formatters.GetResponseToEventMsgs(resp, map[string]string{"source": tc.Name}, evps...) if err != nil { a.errCh <- err } rsps <- &getResponseEvents{name: tc.Name, rsp: evs} }(tc) } a.wg.Wait() close(rsps) responses := make(map[string][]*formatters.EventMsg) for r := range rsps { responses[r.name] = r.rsp } err := a.checkErrors() if err != nil { return err } // sb := strings.Builder{} for name, r := range responses { sb.Reset() printPrefix := "" if len(a.Config.TargetsList()) > 1 && !a.Config.NoPrefix { printPrefix = fmt.Sprintf("[%s] ", name) } b, err := json.MarshalIndent(r, "", " ") if err != nil { return err } sb.Write(b) fmt.Fprintf(a.out, "%s\n", indent(printPrefix, sb.String())) } return nil } type getResponseEvents struct { // target name name string rsp []*formatters.EventMsg } ================================================ FILE: pkg/app/getset.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "encoding/json" "errors" "fmt" "github.com/itchyny/gojq" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/grpctunnel/tunnel" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/formatters" ) func (a *App) GetSetPreRunE(cmd *cobra.Command, args []string) error { a.Config.SetLocalFlagsFromFile(cmd) a.Config.LocalFlags.GetSetModel = config.SanitizeArrayFlagValue(a.Config.LocalFlags.GetSetModel) a.createCollectorDialOpts() return a.initTunnelServer(tunnel.ServerConfig{ AddTargetHandler: a.tunServerAddTargetHandler, DeleteTargetHandler: a.tunServerDeleteTargetHandler, RegisterHandler: a.tunServerRegisterHandler, Handler: a.tunServerHandler, }) } func (a *App) GetSetRunE(cmd *cobra.Command, args []string) error { defer a.InitGetSetFlags(cmd) if a.Config.Format == formatEvent { return fmt.Errorf("format event not supported for GetSet RPC") } ctx, cancel := context.WithCancel(context.Background()) defer cancel() // setupCloseHandler(cancel) targetsConfig, err := a.GetTargets() if err != nil { return fmt.Errorf("failed getting targets config: %v", err) } if !a.PromptMode { for _, tc := range targetsConfig { a.AddTargetConfig(tc) } } req, err := a.Config.CreateGASGetRequest() if err != nil { return err } numTargets := len(a.Config.Targets) a.errCh = make(chan error, numTargets*3) a.wg.Add(numTargets) for _, tc := range a.Config.Targets { go a.GetSetRequest(ctx, tc, req) } a.wg.Wait() return a.checkErrors() } func (a *App) GetSetRequest(ctx context.Context, tc *types.TargetConfig, req *gnmi.GetRequest) { defer a.wg.Done() xreq := req if len(a.Config.LocalFlags.GetSetModel) > 0 { spModels, unspModels, err := a.filterModels(ctx, tc, a.Config.LocalFlags.GetSetModel) if err != nil { a.logError(fmt.Errorf("failed getting supported models from %q: %v", tc.Name, err)) return } if len(unspModels) > 0 { a.logError(fmt.Errorf("found unsupported models for target %q: %+v", tc.Name, unspModels)) } for _, m := range spModels { xreq.UseModels = append(xreq.UseModels, m) } } if a.Config.PrintRequest { err := a.PrintMsg(tc.Name, "Get Request:", req) if err != nil { a.logError(fmt.Errorf("target %q Get Request printing failed: %v", tc.Name, err)) } } a.Logger.Printf("sending gNMI GetRequest: prefix='%v', path='%v', type='%v', encoding='%v', models='%+v', extension='%+v' to %s", xreq.Prefix, xreq.Path, xreq.Type, xreq.Encoding, xreq.UseModels, xreq.Extension, tc.Name) response, err := a.ClientGet(ctx, tc, xreq) if err != nil { a.logError(fmt.Errorf("target %q get request failed: %v", tc.Name, err)) return } err = a.PrintMsg(tc.Name, "Get Response:", response) if err != nil { a.logError(fmt.Errorf("target %q: %v", tc.Name, err)) } // q, err := gojq.Parse(a.Config.LocalFlags.GetSetCondition) if err != nil { a.logError(err) return } code, err := gojq.Compile(q) if err != nil { a.logError(err) return } mo := formatters.MarshalOptions{Format: "json"} b, err := mo.Marshal(response, map[string]string{"address": tc.Name}) if err != nil { a.logError(fmt.Errorf("error marshaling message: %v", err)) return } var input interface{} err = json.Unmarshal(b, &input) if err != nil { a.logError(fmt.Errorf("error unmarshaling message: %v", err)) return } iter := code.Run(input) var ok bool res, ok := iter.Next() if !ok { a.logError(fmt.Errorf("unexpected jq result type: %v", res)) // iterator not done, so the final result won't be a boolean return } if err, ok = res.(error); ok { if err != nil { a.logError(fmt.Errorf("condition evaluation failed: %v", err)) return } } switch res := res.(type) { case bool: a.Logger.Printf("GetSet condition evaluated to %v", res) if res { setReq, err := a.Config.CreateGASSetRequest(input) if err != nil { a.logError(err) return } if len(setReq.Delete) == 0 && len(setReq.Replace) == 0 && len(setReq.Update) == 0 { a.Logger.Printf("empty set request") return } a.setRequest(ctx, tc, setReq) } return default: a.logError(errors.New("unexpected condition return type")) return } } // InitGetSetFlags used to init or reset getsetCmd flags for gnmic-prompt mode func (a *App) InitGetSetFlags(cmd *cobra.Command) { cmd.ResetFlags() cmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetGet, "get", "", "", "get request paths") cmd.MarkFlagRequired("get") cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.GetSetModel, "model", "", []string{}, "get request models") cmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetPrefix, "prefix", "", "", "get request prefix") cmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetType, "type", "t", "ALL", "data type requested from the target. one of: ALL, CONFIG, STATE, OPERATIONAL") cmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetTarget, "target", "", "", "get request target") cmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetCondition, "condition", "", "any([true])", "condition to be met in order to execute the set request") cmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetUpdate, "update", "", "", "set update path template, a Go template or a jq expression") cmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetReplace, "replace", "", "", "set replace path template, a Go template or a jq expression") cmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetDelete, "delete", "", "", "set delete path template, a Go template or a jq expression") cmd.Flags().StringVarP(&a.Config.LocalFlags.GetSetValue, "value", "", "", "set value template, a Go template or a jq expression") cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } ================================================ FILE: pkg/app/gnmi_client.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "fmt" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmi/proto/gnmi_ext" "github.com/openconfig/gnmic/pkg/api/types" ) func (a *App) ClientCapabilities(ctx context.Context, tc *types.TargetConfig, ext ...*gnmi_ext.Extension) (*gnmi.CapabilityResponse, error) { // acquire writer lock a.operLock.Lock() t, err := a.initTarget(tc) a.operLock.Unlock() if err != nil { return nil, err } // acquire reader lock a.operLock.RLock() err = a.CreateGNMIClient(ctx, t) a.operLock.RUnlock() if err != nil { return nil, err } ctx, cancel := context.WithTimeout(ctx, t.Config.Timeout) defer cancel() capResponse, err := t.Capabilities(ctx, ext...) if err != nil { return nil, fmt.Errorf("%q CapabilitiesRequest failed: %v", t.Config.Address, err) } return capResponse, nil } func (a *App) ClientGet(ctx context.Context, tc *types.TargetConfig, req *gnmi.GetRequest) (*gnmi.GetResponse, error) { a.operLock.Lock() t, err := a.initTarget(tc) a.operLock.Unlock() if err != nil { return nil, err } // acquire reader lock a.operLock.RLock() err = a.CreateGNMIClient(ctx, t) a.operLock.RUnlock() if err != nil { return nil, err } ctx, cancel := context.WithTimeout(ctx, t.Config.Timeout) defer cancel() getResponse, err := t.Get(ctx, req) if err != nil { return nil, fmt.Errorf("%q GetRequest failed: %v", t.Config.Address, err) } return getResponse, nil } func (a *App) ClientSet(ctx context.Context, tc *types.TargetConfig, req *gnmi.SetRequest) (*gnmi.SetResponse, error) { a.operLock.Lock() t, err := a.initTarget(tc) a.operLock.Unlock() if err != nil { return nil, err } // acquire reader lock a.operLock.RLock() err = a.CreateGNMIClient(ctx, t) a.operLock.RUnlock() if err != nil { return nil, err } ctx, cancel := context.WithTimeout(ctx, t.Config.Timeout) defer cancel() setResponse, err := t.Set(ctx, req) if err != nil { return nil, fmt.Errorf("target %q SetRequest failed: %v", t.Config.Name, err) } return setResponse, nil } ================================================ FILE: pkg/app/gnmi_client_subscribe.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "errors" "fmt" "io" "os" "time" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/grpctunnel/tunnel" "google.golang.org/grpc" "github.com/openconfig/gnmic/pkg/api" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/lockers" "github.com/openconfig/gnmic/pkg/outputs" "github.com/openconfig/gnmic/pkg/utils" ) type subscriptionRequest struct { // subscription name name string // gNMI subscription request req *gnmi.SubscribeRequest } func (a *App) TargetSubscribeStream(ctx context.Context, tc *types.TargetConfig) { lockKey := a.targetLockKey(tc.Name) START: nctx, cancel := context.WithCancel(ctx) a.operLock.Lock() if cfn, ok := a.targetsLockFn[tc.Name]; ok { cfn() } a.targetsLockFn[tc.Name] = cancel t, err := a.initTarget(tc) a.operLock.Unlock() if err != nil { a.Logger.Printf("failed to initialize target %q: %v", tc.Name, err) return } select { // check if the context was canceled before retrying case <-nctx.Done(): return default: if a.locker != nil { a.Logger.Printf("acquiring lock for target %q", tc.Name) ok, err := a.locker.Lock(nctx, lockKey, []byte(a.Config.Clustering.InstanceName)) if err == lockers.ErrCanceled { a.Logger.Printf("lock attempt for target %q canceled", tc.Name) return } if err != nil { a.Logger.Printf("failed to lock target %q: %v", tc.Name, err) time.Sleep(a.Config.LocalFlags.SubscribeLockRetry) goto START } if !ok { time.Sleep(a.Config.LocalFlags.SubscribeLockRetry) goto START } a.Logger.Printf("acquired lock for target %q", tc.Name) } a.Logger.Printf("queuing target %q", tc.Name) a.targetsChan <- t a.Logger.Printf("subscribing to target: %q", tc.Name) go func() { err := a.clientSubscribe(nctx, tc) if err != nil { a.Logger.Printf("failed to subscribe: %v", err) return } }() if a.locker != nil { doneChan, errChan := a.locker.KeepLock(nctx, lockKey) for { select { case <-nctx.Done(): a.Logger.Printf("target %q stopped: %v", tc.Name, nctx.Err()) // drain errChan err := <-errChan a.Logger.Printf("target %q keepLock returned: %v", tc.Name, err) return case <-doneChan: a.Logger.Printf("target lock %q removed", tc.Name) return case err := <-errChan: a.Logger.Printf("failed to maintain target %q lock: %v", tc.Name, err) a.stopTarget(ctx, tc.Name) if errors.Is(err, context.Canceled) { return } time.Sleep(a.Config.LocalFlags.SubscribeLockRetry) goto START } } } } } func (a *App) TargetSubscribeOnce(ctx context.Context, tc *types.TargetConfig) error { nctx, cancel := context.WithCancel(ctx) defer cancel() a.operLock.Lock() _, err := a.initTarget(tc) a.operLock.Unlock() if err != nil { a.Logger.Printf("failed to initialize target %q: %v", tc.Name, err) return err } a.Logger.Printf("subscribing to target: %q", tc.Name) err = a.clientSubscribeOnce(nctx, tc) if err != nil { a.Logger.Printf("failed to subscribe: %v", err) return err } return nil } func (a *App) TargetSubscribePoll(ctx context.Context, tc *types.TargetConfig) { nctx, cancel := context.WithCancel(ctx) a.operLock.Lock() if cfn, ok := a.targetsLockFn[tc.Name]; ok { cfn() } a.targetsLockFn[tc.Name] = cancel _, err := a.initTarget(tc) a.operLock.Unlock() if err != nil { a.Logger.Printf("failed to initialize target %q: %v", tc.Name, err) return } a.Logger.Printf("subscribing to target: %q", tc.Name) err = a.clientSubscribe(nctx, tc) if err != nil { a.Logger.Printf("failed to subscribe: %v", err) return } } func (a *App) clientSubscribe(ctx context.Context, tc *types.TargetConfig) error { a.operLock.RLock() t, ok := a.Targets[tc.Name] a.operLock.RUnlock() if !ok { return fmt.Errorf("unknown target name: %q", tc.Name) } subscriptionsConfigs := t.Subscriptions if len(subscriptionsConfigs) == 0 { subscriptionsConfigs = a.Config.Subscriptions } if len(subscriptionsConfigs) == 0 { return fmt.Errorf("target %q has no subscriptions defined", tc.Name) } subRequests := make([]subscriptionRequest, 0, len(subscriptionsConfigs)) for scName, sc := range subscriptionsConfigs { req, err := utils.CreateSubscribeRequest(sc, tc, a.Config.Encoding) if err != nil { if errors.Is(errors.Unwrap(err), config.ErrConfig) || errors.Is(errors.Unwrap(err), api.ErrInvalidValue) { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } } subRequests = append(subRequests, subscriptionRequest{name: scName, req: req}) } if t.Cfn != nil { t.Cfn() } gnmiCtx, cancel := context.WithCancel(ctx) t.Cfn = cancel CRCLIENT: select { case <-gnmiCtx.Done(): return gnmiCtx.Err() default: targetDialOpts := make([]grpc.DialOption, len(a.dialOpts)) copy(targetDialOpts, a.dialOpts) if a.Config.UseTunnelServer { a.ttm.Lock() a.tunTargetCfn[tunnel.Target{ID: tc.Name, Type: tc.TunnelTargetType}] = cancel a.ttm.Unlock() targetDialOpts = append(targetDialOpts, grpc.WithContextDialer(a.tunDialerFn(gnmiCtx, tc)), ) // overwrite target address t.Config.Address = t.Config.Name } err := t.CreateGNMIClient(ctx, targetDialOpts...) if err != nil { if errors.Is(err, context.DeadlineExceeded) { a.Logger.Printf("failed to initialize target %q timeout (%s) reached", tc.Name, t.Config.Timeout) } else { a.Logger.Printf("failed to initialize target %q: %v", tc.Name, err) } a.Logger.Printf("retrying target %q in %s", tc.Name, t.Config.RetryTimer) time.Sleep(t.Config.RetryTimer) goto CRCLIENT } } a.Logger.Printf("target %q gNMI client created", t.Config.Name) for _, sreq := range subRequests { a.Logger.Printf("sending gNMI SubscribeRequest: subscribe='%+v', mode='%+v', encoding='%+v', to %s", sreq.req, sreq.req.GetSubscribe().GetMode(), sreq.req.GetSubscribe().GetEncoding(), t.Config.Name) go t.Subscribe(gnmiCtx, sreq.req, sreq.name) } return nil } func (a *App) clientSubscribeOnce(ctx context.Context, tc *types.TargetConfig) error { a.operLock.RLock() t, ok := a.Targets[tc.Name] a.operLock.RUnlock() if !ok { return fmt.Errorf("unknown target name: %q", tc.Name) } subscriptionsConfigs := t.Subscriptions if len(subscriptionsConfigs) == 0 { subscriptionsConfigs = a.Config.Subscriptions } if len(subscriptionsConfigs) == 0 { return fmt.Errorf("target %q has no subscriptions defined", tc.Name) } subRequests := make([]subscriptionRequest, 0) for _, sc := range subscriptionsConfigs { req, err := utils.CreateSubscribeRequest(sc, tc, a.Config.Encoding) if err != nil { if errors.Is(errors.Unwrap(err), config.ErrConfig) { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } } subRequests = append(subRequests, subscriptionRequest{name: sc.Name, req: req}) } gnmiCtx, cancel := context.WithCancel(ctx) t.Cfn = cancel CRCLIENT: targetDialOpts := a.dialOpts if a.Config.UseTunnelServer { a.ttm.Lock() a.tunTargetCfn[tunnel.Target{ID: tc.Name, Type: tc.TunnelTargetType}] = cancel a.ttm.Unlock() targetDialOpts = append(targetDialOpts, grpc.WithContextDialer(a.tunDialerFn(gnmiCtx, tc)), ) // overwrite target address t.Config.Address = t.Config.Name } if err := t.CreateGNMIClient(ctx, targetDialOpts...); err != nil { if errors.Is(err, context.DeadlineExceeded) { a.Logger.Printf("failed to initialize target %q timeout (%s) reached", tc.Name, t.Config.Timeout) } else { a.Logger.Printf("failed to initialize target %q: %v", tc.Name, err) } a.Logger.Printf("retrying target %q in %s", tc.Name, t.Config.RetryTimer) time.Sleep(t.Config.RetryTimer) goto CRCLIENT } a.Logger.Printf("target %q gNMI client created", t.Config.Name) OUTER: for _, sreq := range subRequests { a.Logger.Printf("sending gNMI SubscribeRequest: subscribe='%+v', mode='%+v', encoding='%+v', to %s", sreq.req, sreq.req.GetSubscribe().GetMode(), sreq.req.GetSubscribe().GetEncoding(), t.Config.Name) rspCh, errCh := t.SubscribeOnceChan(gnmiCtx, sreq.req) for { select { case err := <-errCh: if errors.Is(err, io.EOF) { a.Logger.Printf("target %q, subscription %q closed stream(EOF)", t.Config.Name, sreq.name) close(rspCh) // next subscription or end continue OUTER } return err case rsp := <-rspCh: m := outputs.Meta{"source": t.Config.Name, "format": a.Config.Format, "subscription-name": sreq.name} a.export(ctx, rsp, m, t.Config.Outputs...) } } } return nil } func (a *App) clientSubscribePoll(ctx context.Context, targetName, subscriptionName string) error { a.operLock.RLock() t, ok := a.Targets[targetName] a.operLock.RUnlock() if !ok { return fmt.Errorf("unknown target name %q", targetName) } return t.SubscribePoll(ctx, subscriptionName) } ================================================ FILE: pkg/app/gnmi_server.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "encoding/json" "errors" "fmt" "io" "net" "strconv" "sync" "time" "github.com/hashicorp/consul/api" "github.com/openconfig/gnmi/proto/gnmi" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/path" "github.com/openconfig/gnmic/pkg/api/server" "github.com/openconfig/gnmic/pkg/api/target" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/cache" ) type streamClient struct { target string req *gnmi.SubscribeRequest stream gnmi.GNMI_SubscribeServer errChan chan<- error } func (a *App) startGnmiServer() error { if a.Config.GnmiServer == nil { a.c = nil return nil } var err error a.c, err = cache.New(a.Config.GnmiServer.Cache, cache.WithLogger(a.Logger)) if err != nil { a.Logger.Printf("failed to initialize gNMI cache: %v", err) return err } s, err := server.New(server.Config{ Address: a.Config.GnmiServer.Address, MaxUnaryRPC: a.Config.GnmiServer.MaxUnaryRPC, MaxStreamingRPC: a.Config.GnmiServer.MaxSubscriptions, MaxRecvMsgSize: a.Config.GnmiServer.MaxRecvMsgSize, MaxSendMsgSize: a.Config.GnmiServer.MaxSendMsgSize, MaxConcurrentStreams: a.Config.GnmiServer.MaxConcurrentStreams, TCPKeepalive: a.Config.GnmiServer.TCPKeepalive, Keepalive: a.Config.GnmiServer.GRPCKeepalive.Convert(), RateLimit: a.Config.GnmiServer.RateLimit, Timeout: a.Config.GnmiServer.Timeout, HealthEnabled: true, TLS: a.Config.GnmiServer.TLS, }, server.WithLogger(a.Logger), server.WithGetHandler(a.serverGetHandler), server.WithSetHandler(a.serverSetHandler), server.WithSubscribeHandler(a.serverSubscribeHandler), server.WithRegistry(a.reg), ) if err != nil { return err } ctx, cancel := context.WithCancel(a.ctx) go a.registerGNMIServer(ctx) go func() { defer cancel() err := s.Start(ctx) if err != nil { a.Logger.Print(err) } }() return nil } func (a *App) registerGNMIServer(ctx context.Context, defaultTags ...string) { if a.Config.GnmiServer.ServiceRegistration == nil { return } var err error clientConfig := &api.Config{ Address: a.Config.GnmiServer.ServiceRegistration.Address, Scheme: "http", Datacenter: a.Config.GnmiServer.ServiceRegistration.Datacenter, Token: a.Config.GnmiServer.ServiceRegistration.Token, } if a.Config.GnmiServer.ServiceRegistration.Username != "" && a.Config.GnmiServer.ServiceRegistration.Password != "" { clientConfig.HttpAuth = &api.HttpBasicAuth{ Username: a.Config.GnmiServer.ServiceRegistration.Username, Password: a.Config.GnmiServer.ServiceRegistration.Password, } } INITCONSUL: consulClient, err := api.NewClient(clientConfig) if err != nil { a.Logger.Printf("failed to connect to consul: %v", err) time.Sleep(1 * time.Second) goto INITCONSUL } self, err := consulClient.Agent().Self() if err != nil { a.Logger.Printf("failed to connect to consul: %v", err) time.Sleep(1 * time.Second) goto INITCONSUL } if cfg, ok := self["Config"]; ok { b, _ := json.Marshal(cfg) a.Logger.Printf("consul agent config: %s", string(b)) } ctx, cancel := context.WithCancel(ctx) defer cancel() h, p, err := net.SplitHostPort(a.Config.GnmiServer.Address) if err != nil { a.Logger.Printf("failed to split host and port from gNMI server address %q: %v", a.Config.GnmiServer.Address, err) return } pi, _ := strconv.Atoi(p) service := &api.AgentServiceRegistration{ ID: a.Config.InstanceName, Name: a.Config.GnmiServer.ServiceRegistration.Name, Address: h, Port: pi, Tags: append(defaultTags, a.Config.GnmiServer.ServiceRegistration.Tags...), Checks: api.AgentServiceChecks{ { TTL: a.Config.GnmiServer.ServiceRegistration.CheckInterval.String(), DeregisterCriticalServiceAfter: a.Config.GnmiServer.ServiceRegistration.DeregisterAfter, }, }, } if a.Config.Clustering != nil { if a.Config.Clustering.InstanceName != "" { service.ID = a.Config.Clustering.InstanceName } service.Name = a.Config.Clustering.ClusterName + "-gnmi-server" if service.Tags == nil { service.Tags = make([]string, 0) } service.Tags = append(service.Tags, fmt.Sprintf("cluster-name=%s", a.Config.Clustering.ClusterName)) } if service.ID == "" { service.ID = service.Name } service.Tags = append(service.Tags, fmt.Sprintf("instance-name=%s", service.ID)) ttlCheckID := "service:" + service.ID b, _ := json.Marshal(service) a.Logger.Printf("registering service: %s", string(b)) err = consulClient.Agent().ServiceRegister(service) if err != nil { a.Logger.Printf("failed to register service in consul: %v", err) return } err = consulClient.Agent().UpdateTTL(ttlCheckID, "", api.HealthPassing) if err != nil { a.Logger.Printf("failed to update TTL check to Passing: %v", err) } ticker := time.NewTicker(a.Config.GnmiServer.ServiceRegistration.CheckInterval / 2) for { select { case <-ticker.C: err = consulClient.Agent().UpdateTTL(ttlCheckID, "", api.HealthPassing) if err != nil { a.Logger.Printf("failed to update TTL check to Passing: %v", err) } case <-ctx.Done(): err = consulClient.Agent().UpdateTTL(ttlCheckID, ctx.Err().Error(), api.HealthCritical) if err != nil { a.Logger.Printf("failed to update TTL check to Critical: %v", err) } ticker.Stop() goto INITCONSUL } } } func (a *App) handleONCESubscriptionRequest(sc *streamClient) { var err error a.Logger.Printf("processing subscription to target %q", sc.target) paths := make([]*gnmi.Path, 0) switch req := sc.req.GetRequest().(type) { case *gnmi.SubscribeRequest_Subscribe: pr := req.Subscribe.GetPrefix() for _, sub := range req.Subscribe.GetSubscription() { paths = append(paths, &gnmi.Path{ Origin: pr.GetOrigin(), Target: pr.GetTarget(), Elem: append(pr.GetElem(), sub.GetPath().GetElem()...), }) } } // ro := &cache.ReadOpts{ Target: sc.target, Paths: paths, Mode: "once", UpdatesOnly: sc.req.GetSubscribe().GetUpdatesOnly(), } defer func() { if err != nil { a.Logger.Printf("error processing subscription to target %q: %v", sc.target, err) sc.errChan <- err return } a.Logger.Printf("subscription request to target %q processed", sc.target) }() for n := range a.c.Subscribe(sc.stream.Context(), ro) { if n.Err != nil { err = n.Err return } err = sc.stream.Send(&gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: n.Notification, }, }) if err != nil { return } } } func (a *App) handleStreamSubscriptionRequest(sc *streamClient) { peer, _ := peer.FromContext(sc.stream.Context()) errChan := make(chan error) defer close(errChan) // this context is required to signal this goroutine and `handleSampledQuery` goroutine that error has happened in cache ctx, cancel := context.WithCancel(sc.stream.Context()) a.Logger.Printf("processing STREAM subscription from %q to target %q", peer.Addr, sc.target) go func() { defer close(sc.errChan) for err := range errChan { if err == nil { a.Logger.Printf("subscription request from %q to target %q processed", peer.Addr, sc.target) } else if errors.Is(err, context.Canceled) { a.Logger.Printf("subscription to target %q canceled", sc.target) sc.errChan <- err cancel() } else { a.Logger.Printf("error processing STREAM subscription to target %q: %v", sc.target, err) sc.errChan <- err cancel() } } }() if sc.req.GetSubscribe().GetUpdatesOnly() { err := sc.stream.Send(&gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}, }) if err != nil { errChan <- err return } } var pr *gnmi.Path switch req := sc.req.GetRequest().(type) { case *gnmi.SubscribeRequest_Subscribe: pr = req.Subscribe.GetPrefix() } subs := sc.req.GetSubscribe().GetSubscription() wg := new(sync.WaitGroup) wg.Add(len(subs)) for i, sub := range subs { a.Logger.Printf("handling subscriptionList item[%d]: target %q, %q", i, sc.target, sub.String()) go func(sub *gnmi.Subscription) { defer wg.Done() var ro *cache.ReadOpts switch sub.GetMode() { case gnmi.SubscriptionMode_ON_CHANGE, gnmi.SubscriptionMode_TARGET_DEFINED: ro = &cache.ReadOpts{ Target: sc.target, Paths: []*gnmi.Path{ { Origin: pr.GetOrigin(), Target: pr.GetTarget(), Elem: append(pr.GetElem(), sub.GetPath().GetElem()...), }, }, Mode: cache.ReadMode_StreamOnChange, HeartbeatInterval: time.Duration(sub.GetHeartbeatInterval()), SuppressRedundant: sub.GetSuppressRedundant(), UpdatesOnly: sc.req.GetSubscribe().GetUpdatesOnly(), } case gnmi.SubscriptionMode_SAMPLE: period := time.Duration(sub.GetSampleInterval()) if period == 0 { period = a.Config.GnmiServer.DefaultSampleInterval } else if period < a.Config.GnmiServer.MinSampleInterval { period = a.Config.GnmiServer.MinSampleInterval } ro = &cache.ReadOpts{ Target: sc.target, Paths: []*gnmi.Path{ { Origin: pr.GetOrigin(), Target: pr.GetTarget(), Elem: append(pr.GetElem(), sub.GetPath().GetElem()...), }}, Mode: cache.ReadMode_StreamSample, SampleInterval: period, HeartbeatInterval: time.Duration(sub.GetHeartbeatInterval()), SuppressRedundant: sub.GetSuppressRedundant(), UpdatesOnly: sc.req.GetSubscribe().GetUpdatesOnly(), } } a.Logger.Printf("cache subscribe: %+v", ro) for n := range a.c.Subscribe(ctx, ro) { // `errChan <- n.Err` should trigger the gnmi-server side cleanup // only wait would be for the cache to close the channel if n.Err != nil { errChan <- n.Err a.Logger.Printf("cache subscribe failed: %+v: %v", ro, n.Err) // reader should only stop once the channel is closed by sender or otherwise // it coould block the senders who doesn't know that error has happened continue } err := sc.stream.Send(&gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: n.Notification, }, }) if err != nil { errChan <- n.Err } } }(sub) } // wait for ctx to be done <-ctx.Done() errChan <- ctx.Err() wg.Wait() } func (a *App) handlePolledSubscription(sc *streamClient) { defer close(sc.errChan) a.handleONCESubscriptionRequest(sc) sc.errChan <- sc.stream.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{ SyncResponse: true, }}) // var err error for { req, err := sc.stream.Recv() if errors.Is(err, io.EOF) { sc.errChan <- err return } switch req := req.Request.(type) { case *gnmi.SubscribeRequest_Poll: default: err = fmt.Errorf("unexpected request type: expecting a Poll request, rcvd: %v", req) a.Logger.Print(err) sc.errChan <- err return } if err != nil { a.Logger.Printf("target %q: failed poll subscription rcv: %v", sc.target, err) sc.errChan <- err return } a.Logger.Printf("target %q: repoll", sc.target) a.handleONCESubscriptionRequest(sc) sc.errChan <- sc.stream.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{ SyncResponse: true, }}) a.Logger.Printf("target %q: repoll done", sc.target) } } //// func (a *App) handlegNMIcInternalGet(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) { notifications := make([]*gnmi.Notification, 0, len(req.GetPath())) a.configLock.RLock() defer a.configLock.RUnlock() for _, p := range req.GetPath() { select { case <-ctx.Done(): return nil, ctx.Err() default: elems := path.PathElems(req.GetPrefix(), p) ns, err := a.handlegNMIGetPath(elems, req.GetEncoding()) if err != nil { return nil, err } notifications = append(notifications, ns...) } } return &gnmi.GetResponse{Notification: notifications}, nil } func (a *App) handlegNMIGetPath(elems []*gnmi.PathElem, enc gnmi.Encoding) ([]*gnmi.Notification, error) { notifications := make([]*gnmi.Notification, 0, len(elems)) for _, e := range elems { switch e.Name { // case "": case "targets": if e.Key != nil { if _, ok := e.Key["name"]; ok { for _, tc := range a.Config.Targets { if tc.Name == e.Key["name"] { notifications = append(notifications, targetConfigToNotification(tc, enc)) break } } } break } // no keys for _, tc := range a.Config.Targets { notifications = append(notifications, targetConfigToNotification(tc, enc)) } case "subscriptions": if e.Key != nil { if _, ok := e.Key["name"]; ok { for _, sub := range a.Config.Subscriptions { if sub.Name == e.Key["name"] { notifications = append(notifications, subscriptionConfigToNotification(sub, enc)) break } } } break } // no keys for _, sub := range a.Config.Subscriptions { notifications = append(notifications, subscriptionConfigToNotification(sub, enc)) } // case "outputs": // case "inputs": // case "processors": // case "clustering": // case "gnmi-server": default: return nil, status.Errorf(codes.InvalidArgument, "unknown path element %q", e.Name) } } return notifications, nil } func targetConfigToNotification(tc *types.TargetConfig, e gnmi.Encoding) *gnmi.Notification { switch e { case gnmi.Encoding_JSON, gnmi.Encoding_JSON_IETF: b, _ := json.Marshal(tc) n := &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Update: []*gnmi.Update{ { Path: &gnmi.Path{ Origin: "gnmic", Elem: []*gnmi.PathElem{ { Name: "target", Key: map[string]string{"name": tc.Name}, }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{JsonVal: b}, }, }, }, } return n case gnmi.Encoding_BYTES: n := &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Prefix: &gnmi.Path{ Origin: "gnmic", Elem: []*gnmi.PathElem{ { Name: "target", Key: map[string]string{"name": tc.Name}, }, }, }, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "address"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_BytesVal{BytesVal: []byte(tc.Address)}, }, }, }, } if tc.Username != nil { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "username"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_BytesVal{BytesVal: []byte(*tc.Username)}, }, }) } if tc.Insecure != nil { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "insecure"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_BytesVal{BytesVal: []byte(fmt.Sprint(*tc.Insecure))}, }, }) } if tc.SkipVerify != nil { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "skip-verify"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_BytesVal{BytesVal: []byte(fmt.Sprint(*tc.SkipVerify))}, }, }) } n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "timeout"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_BytesVal{BytesVal: []byte(tc.Timeout.String())}, }, }) if tc.TLSCA != nil && *tc.TLSCA != "" { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "tls-ca"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_BytesVal{BytesVal: []byte((tc.TLSCAString()))}, }, }) } if tc.TLSCert != nil && *tc.TLSCert != "" { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "tls-cert"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_BytesVal{BytesVal: []byte(tc.TLSCertString())}, }, }) } if tc.TLSKey != nil && *tc.TLSKey != "" { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "tls-key"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_BytesVal{BytesVal: []byte(tc.TLSKeyString())}, }, }) } if len(tc.Outputs) > 0 { typedVals := make([]*gnmi.TypedValue, 0, len(tc.Subscriptions)) for _, out := range tc.Outputs { typedVals = append(typedVals, &gnmi.TypedValue{ Value: &gnmi.TypedValue_BytesVal{BytesVal: []byte(out)}, }) } n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "outputs"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_LeaflistVal{ LeaflistVal: &gnmi.ScalarArray{ Element: typedVals, }, }, }, }) } if len(tc.Subscriptions) > 0 { typedVals := make([]*gnmi.TypedValue, 0, len(tc.Subscriptions)) for _, sub := range tc.Subscriptions { typedVals = append(typedVals, &gnmi.TypedValue{ Value: &gnmi.TypedValue_BytesVal{BytesVal: []byte(sub)}, }) } n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "subscriptions"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_LeaflistVal{ LeaflistVal: &gnmi.ScalarArray{ Element: typedVals, }, }, }, }) } return n case gnmi.Encoding_ASCII: n := &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Prefix: &gnmi.Path{ Origin: "gnmic", Elem: []*gnmi.PathElem{ { Name: "target", Key: map[string]string{"name": tc.Name}, }, }, }, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "address"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.Address}, }, }, }, } if tc.Username != nil { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "username"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: *tc.Username}, }, }) } if tc.Insecure != nil { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "insecure"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: fmt.Sprint(*tc.Insecure)}, }, }) } if tc.SkipVerify != nil { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "skip-verify"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: fmt.Sprint(*tc.SkipVerify)}, }, }) } n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "timeout"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.Timeout.String()}, }, }) if tc.TLSCA != nil && *tc.TLSCA != "" { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "tls-ca"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.TLSCAString()}, }, }) } if tc.TLSCert != nil && *tc.TLSCert != "" { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "tls-cert"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.TLSCertString()}, }, }) } if tc.TLSKey != nil && *tc.TLSKey != "" { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "tls-key"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.TLSKeyString()}, }, }) } if len(tc.Outputs) > 0 { typedVals := make([]*gnmi.TypedValue, 0, len(tc.Subscriptions)) for _, out := range tc.Outputs { typedVals = append(typedVals, &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: out}, }) } n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "outputs"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_LeaflistVal{ LeaflistVal: &gnmi.ScalarArray{ Element: typedVals, }, }, }, }) } if len(tc.Subscriptions) > 0 { typedVals := make([]*gnmi.TypedValue, 0, len(tc.Subscriptions)) for _, sub := range tc.Subscriptions { typedVals = append(typedVals, &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: sub}, }) } n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "subscriptions"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_LeaflistVal{ LeaflistVal: &gnmi.ScalarArray{ Element: typedVals, }, }, }, }) } return n } return nil } func subscriptionConfigToNotification(sub *types.SubscriptionConfig, e gnmi.Encoding) *gnmi.Notification { switch e { case gnmi.Encoding_JSON, gnmi.Encoding_JSON_IETF: b, _ := json.Marshal(sub) n := &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Update: []*gnmi.Update{ { Path: &gnmi.Path{ Origin: "gnmic", Elem: []*gnmi.PathElem{ { Name: "subscriptions", Key: map[string]string{"name": sub.Name}, }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{JsonVal: b}, }, }, }, } return n case gnmi.Encoding_BYTES: case gnmi.Encoding_ASCII: } return nil } func (a *App) serverGetHandler(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) { numPaths := len(req.GetPath()) if numPaths == 0 && req.GetPrefix() == nil { return nil, status.Errorf(codes.InvalidArgument, "missing path") } origins := make(map[string]struct{}) for _, p := range req.GetPath() { origins[p.GetOrigin()] = struct{}{} if p.GetOrigin() != "gnmic" { if _, ok := origins["gnmic"]; ok { return nil, status.Errorf(codes.InvalidArgument, "combining `gnmic` origin with other origin values is not supported") } } } if _, ok := origins["gnmic"]; ok { return a.handlegNMIcInternalGet(ctx, req) } targetName := req.GetPrefix().GetTarget() pr, _ := peer.FromContext(ctx) a.Logger.Printf("received Get request from %q to target %q", pr.Addr, targetName) targets, err := a.selectTargets(ctx, targetName) if err != nil { return nil, status.Errorf(codes.Internal, "could not find targets: %v", err) } numTargets := len(targets) if numTargets == 0 { return nil, status.Errorf(codes.NotFound, "unknown target %q", targetName) } results := make(chan *gnmi.Notification) errChan := make(chan error, numTargets) response := &gnmi.GetResponse{ // assume one notification per path per target Notification: make([]*gnmi.Notification, 0, numTargets*numPaths), } done := make(chan struct{}) ctx, cancel := context.WithCancel(ctx) defer cancel() go func() { for { select { case notif, ok := <-results: if !ok { close(done) return } response.Notification = append(response.Notification, notif) case <-ctx.Done(): return } } }() wg := new(sync.WaitGroup) wg.Add(numTargets) for name, t := range targets { go func(name string, t *target.Target) { defer wg.Done() creq := proto.Clone(req).(*gnmi.GetRequest) if creq.GetPrefix() == nil { creq.Prefix = new(gnmi.Path) } if creq.GetPrefix().GetTarget() == "" || creq.GetPrefix().GetTarget() == "*" { creq.Prefix.Target = name } res, err := t.Get(ctx, creq) if err != nil { a.Logger.Printf("target %q err: %v", name, err) errChan <- fmt.Errorf("target %q err: %v", name, err) return } for _, n := range res.GetNotification() { if n.GetPrefix() == nil { n.Prefix = new(gnmi.Path) } if n.GetPrefix().GetTarget() == "" { n.Prefix.Target = name } results <- n } }(name, t) } wg.Wait() close(results) close(errChan) for err := range errChan { if err != nil { return nil, status.Errorf(codes.Internal, "%v", err) } } <-done if a.Config.Debug { a.Logger.Printf("sending GetResponse to %q: %+v", pr.Addr, response) } return response, nil } func (a *App) serverSetHandler(ctx context.Context, req *gnmi.SetRequest) (*gnmi.SetResponse, error) { numUpdates := len(req.GetUpdate()) numReplaces := len(req.GetReplace()) numDeletes := len(req.GetDelete()) numUnionReplace := len(req.GetUnionReplace()) if numUpdates+numReplaces+numDeletes+numUnionReplace == 0 { return nil, status.Errorf(codes.InvalidArgument, "missing update/replace/delete path(s)") } targetName := req.GetPrefix().GetTarget() pr, _ := peer.FromContext(ctx) a.Logger.Printf("received Set request from %q to target %q", pr.Addr, targetName) targets, err := a.selectTargets(ctx, targetName) if err != nil { return nil, status.Errorf(codes.Internal, "could not find targets: %v", err) } numTargets := len(targets) if numTargets == 0 { return nil, status.Errorf(codes.NotFound, "unknown target(s) %q", targetName) } results := make(chan *gnmi.UpdateResult) errChan := make(chan error, numTargets) response := &gnmi.SetResponse{ // assume one update per target, per update/replace/delete Response: make([]*gnmi.UpdateResult, 0, numTargets*(numUpdates+numReplaces+numDeletes)), } done := make(chan struct{}) ctx, cancel := context.WithCancel(ctx) defer cancel() go func() { for { select { case upd, ok := <-results: if !ok { response.Timestamp = time.Now().UnixNano() close(done) return } response.Response = append(response.Response, upd) case <-ctx.Done(): return } } }() wg := new(sync.WaitGroup) wg.Add(numTargets) for name, t := range targets { go func(name string, t *target.Target) { defer wg.Done() creq := proto.Clone(req).(*gnmi.SetRequest) if creq.GetPrefix() == nil { creq.Prefix = new(gnmi.Path) } if creq.GetPrefix().GetTarget() == "" || creq.GetPrefix().GetTarget() == "*" { creq.Prefix.Target = name } res, err := t.Set(ctx, creq) if err != nil { a.Logger.Printf("target %q err: %v", name, err) errChan <- fmt.Errorf("target %q err: %v", name, err) return } for _, upd := range res.GetResponse() { upd.Path.Target = name results <- upd } }(name, t) } wg.Wait() close(results) close(errChan) for err := range errChan { if err != nil { return nil, status.Errorf(codes.Internal, "%v", err) } } <-done a.Logger.Printf("sending SetResponse to %q: %+v", pr.Addr, response) return response, nil } func (a *App) serverSubscribeHandler(req *gnmi.SubscribeRequest, stream gnmi.GNMI_SubscribeServer) error { pr, _ := peer.FromContext(stream.Context()) sc := &streamClient{ stream: stream, req: req, } sc.target = sc.req.GetSubscribe().GetPrefix().GetTarget() if sc.target == "" { sc.target = "*" sub := sc.req.GetSubscribe() if sub.GetPrefix() == nil { sub.Prefix = &gnmi.Path{Target: "*"} } else { sub.Prefix.Target = "*" } } a.Logger.Printf("received a subscribe request mode=%v from %q for target %q", sc.req.GetSubscribe().GetMode(), pr.Addr, sc.target) defer a.Logger.Printf("subscription from peer %q terminated", pr.Addr) // closing of this channel is handled by respective goroutines that are going to send error on this channel errChan := make(chan error, len(sc.req.GetSubscribe().GetSubscription())) sc.errChan = errChan // send-only switch sc.req.GetSubscribe().GetMode() { case gnmi.SubscriptionList_ONCE: go func() { a.handleONCESubscriptionRequest(sc) errChan <- sc.stream.Send(&gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}, }) close(errChan) }() case gnmi.SubscriptionList_POLL: go a.handlePolledSubscription(sc) case gnmi.SubscriptionList_STREAM: go a.handleStreamSubscriptionRequest(sc) default: return status.Errorf(codes.InvalidArgument, "unrecognized subscription mode: %v", sc.req.GetSubscribe().GetMode()) } // flushing the errChan defer func() { a.Logger.Printf("flushing subscription errChan") for range errChan { } }() // returning first non-nil error and flushing rest in defer for err := range errChan { if err != nil { return status.Errorf(codes.Internal, "%v", err) } } return nil } ================================================ FILE: pkg/app/inputs.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/inputs" ) func (a *App) InitInput(ctx context.Context, name string, tcs map[string]*types.TargetConfig) { a.configLock.Lock() defer a.configLock.Unlock() if _, ok := a.Inputs[name]; ok { return } if cfg, ok := a.Config.Inputs[name]; ok { if inputType, ok := cfg["type"]; ok { a.Logger.Printf("starting input type %s", inputType) if initializer, ok := inputs.Inputs[inputType.(string)]; ok { in := initializer() go func() { err := in.Start(ctx, name, cfg, inputs.WithLogger(a.Logger), inputs.WithName(a.Config.InstanceName), inputs.WithOutputs(a.Outputs), inputs.WithConfigStore(a.Store), ) if err != nil { a.Logger.Printf("failed to init input type %q: %v", inputType, err) } }() a.operLock.Lock() a.Inputs[name] = in a.operLock.Unlock() } } } } func (a *App) InitInputs(ctx context.Context) { for name := range a.Config.Inputs { a.InitInput(ctx, name, a.Config.Targets) } } ================================================ FILE: pkg/app/loaders.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "time" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/loaders" ) func (a *App) startLoader(ctx context.Context) { if len(a.Config.Loader) == 0 { return } if a.inCluster() { ticker := time.NewTicker(time.Second) // wait for instance to become the leader for range ticker.C { if a.isLeader { ticker.Stop() break } } } ldTypeS := a.Config.Loader["type"].(string) START: a.Logger.Printf("initializing loader type %q", ldTypeS) var fnTargetsDefaults func(tc *types.TargetConfig) error if expandEnv, ok := a.Config.Loader["expand-env"].(bool); ok && expandEnv { fnTargetsDefaults = a.Config.SetTargetConfigDefaultsExpandEnv } else { fnTargetsDefaults = a.Config.SetTargetConfigDefaults } ld := loaders.Loaders[ldTypeS]() err := ld.Init(ctx, a.Config.Loader, a.Logger, loaders.WithRegistry(a.reg), loaders.WithActions(a.Config.Actions), loaders.WithTargetsDefaults(fnTargetsDefaults), ) if err != nil { a.Logger.Printf("failed to init loader type %q: %v", ldTypeS, err) return } a.Logger.Printf("starting loader type %q", ldTypeS) for targetOp := range ld.Start(ctx) { // do deletes first, since target change equates to delete+add for _, del := range targetOp.Del { // not clustered, delete local target if !a.inCluster() { err = a.DeleteTarget(ctx, del) if err != nil { a.Logger.Printf("failed deleting target %q: %v", del, err) } continue } // clustered, delete target in all instances of the cluster err = a.deleteTarget(ctx, del) if err != nil { a.Logger.Printf("failed to delete target %q: %v", del, err) } } var limiter *time.Ticker if a.Config.LocalFlags.SubscribeBackoff > 0 { limiter = time.NewTicker(a.Config.LocalFlags.SubscribeBackoff) } for _, add := range targetOp.Add { err = fnTargetsDefaults(add) if err != nil { a.Logger.Printf("failed parsing new target configuration %s: %v", add, err) continue } // not clustered, add target and subscribe if !a.inCluster() { a.Config.Targets[add.Name] = add a.AddTargetConfig(add) a.wg.Add(1) go a.TargetSubscribeStream(ctx, add) if limiter != nil { <-limiter.C } continue } // clustered, dispatch a.configLock.Lock() a.Config.Targets[add.Name] = add err = a.dispatchTarget(ctx, add) if err != nil { a.Logger.Printf("failed dispatching target %q: %v", add.Name, err) } a.configLock.Unlock() } if limiter != nil { limiter.Stop() } } a.Logger.Printf("target loader stopped") select { case <-ctx.Done(): return default: goto START } } func (a *App) startLoaderProxy(ctx context.Context) { if len(a.Config.Loader) == 0 { return } ldTypeS := a.Config.Loader["type"].(string) START: a.Logger.Printf("initializing loader type %q", ldTypeS) var fnTargetsDefaults func(tc *types.TargetConfig) error if expandEnv, ok := a.Config.Loader["expand-env"].(bool); ok && expandEnv { fnTargetsDefaults = a.Config.SetTargetConfigDefaultsExpandEnv } else { fnTargetsDefaults = a.Config.SetTargetConfigDefaults } ld := loaders.Loaders[ldTypeS]() err := ld.Init(ctx, a.Config.Loader, a.Logger, loaders.WithRegistry(a.reg), loaders.WithActions(a.Config.Actions), loaders.WithTargetsDefaults(fnTargetsDefaults), ) if err != nil { a.Logger.Printf("failed to init loader type %q: %v", ldTypeS, err) return } a.Logger.Printf("starting loader type %q", ldTypeS) for targetOp := range ld.Start(ctx) { // do deletes first since target change is delete+add for _, del := range targetOp.Del { // clustered, delete target in all instances of the cluster a.operLock.Lock() t, ok := a.Targets[del] if ok { err = t.Close() if err != nil { a.Logger.Printf("failed to stop target %s: %v", del, err) } delete(a.Targets, del) } a.operLock.Unlock() } for _, add := range targetOp.Add { err = fnTargetsDefaults(add) if err != nil { a.Logger.Printf("failed parsing new target configuration %s: %v", add, err) continue } a.configLock.Lock() a.Config.Targets[add.Name] = add a.configLock.Unlock() } } a.Logger.Printf("target loader stopped") select { case <-ctx.Done(): return default: goto START } } ================================================ FILE: pkg/app/locker.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import "fmt" func (a *App) targetLockKey(s string) string { if a.Config.Clustering == nil { return s } if s == "" { return s } return fmt.Sprintf("gnmic/%s/targets/%s", a.Config.Clustering.ClusterName, s) } ================================================ FILE: pkg/app/logging.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "errors" "fmt" "os" ) func (a *App) logError(err error) { if err == nil { return } a.Logger.Print(err) if !a.Config.Log { fmt.Fprintln(os.Stderr, err) } if a.errCh == nil { return } a.errCh <- err } func (a *App) checkErrors() error { if a.errCh == nil { return nil } close(a.errCh) errs := make([]error, 0) for err := range a.errCh { errs = append(errs, err) } if len(errs) == 0 { return nil } if a.Config.Log { for _, err := range errs { fmt.Fprintln(os.Stderr, err) } } return errors.New("one or more requests failed") } ================================================ FILE: pkg/app/metrics.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "fmt" "strings" "time" "github.com/prometheus/client_golang/prometheus" ) const ( clusterMetricsUpdatePeriod = 10 * time.Second ) // subscribe var subscribeResponseReceivedCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "subscribe", Name: "number_of_received_subscribe_response_messages_total", Help: "Total number of received subscribe response messages", }, []string{"source", "subscription"}) var subscribeResponseFailedCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "subscribe", Name: "number_of_failed_subscribe_request_messages_total", Help: "Total number of failed subscribe requests", }, []string{"source", "subscription"}) // target var targetUPMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "target", Name: "up", Help: "Has value 1 if the gNMI connection to the target is established; otherwise, 0.", }, []string{"name"}) var targetConnStateMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "target", Name: "connection_state", Help: "The current gRPC connection state to the target. The value can be one of the following: 0(UNKNOWN), 1 (IDLE), 2 (CONNECTING), 3 (READY), 4 (TRANSIENT_FAILURE), or 5 (SHUTDOWN).", }, []string{"name"}) // cluster var clusterNumberOfLockedTargets = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "cluster", Name: "number_of_locked_targets", Help: "number of locked targets", }) var clusterIsLeader = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "cluster", Name: "is_leader", Help: "Has value 1 if this gnmic instance is the cluster leader, 0 otherwise", }) func (a *App) registerTargetMetrics() { err := a.reg.Register(targetUPMetric) if err != nil { a.Logger.Printf("failed to register target metric: %v", err) } err = a.reg.Register(targetConnStateMetric) if err != nil { a.Logger.Printf("failed to register target connection state metric: %v", err) } a.configLock.RLock() for _, t := range a.Config.Targets { targetUPMetric.WithLabelValues(t.Name).Set(0) targetConnStateMetric.WithLabelValues(t.Name).Set(0) } a.configLock.RUnlock() go func() { ticker := time.NewTicker(clusterMetricsUpdatePeriod) defer ticker.Stop() for { select { case <-a.ctx.Done(): return case <-ticker.C: ownTargets := make(map[string]string) if a.isLeader { lockedNodesPrefix := fmt.Sprintf("gnmic/%s/targets", a.Config.ClusterName) ctx, cancel := context.WithTimeout(a.ctx, clusterMetricsUpdatePeriod/2) lockedNodes, err := a.locker.List(ctx, lockedNodesPrefix) cancel() if err != nil { a.Logger.Printf("failed to get locked nodes key: %v", err) } for k, v := range lockedNodes { ownTargets[strings.TrimPrefix(k, lockedNodesPrefix+"/")] = v } } targetUPMetric.Reset() targetConnStateMetric.Reset() a.configLock.RLock() for _, tc := range a.Config.Targets { a.operLock.RLock() t, ok := a.Targets[tc.Name] a.operLock.RUnlock() if ok { switch t.ConnState() { case "IDLE": targetUPMetric.WithLabelValues(tc.Name).Set(1) targetConnStateMetric.WithLabelValues(tc.Name).Set(1) case "CONNECTING": targetUPMetric.WithLabelValues(tc.Name).Set(0) targetConnStateMetric.WithLabelValues(tc.Name).Set(2) case "READY": targetUPMetric.WithLabelValues(tc.Name).Set(1) targetConnStateMetric.WithLabelValues(tc.Name).Set(3) case "TRANSIENT_FAILURE": targetUPMetric.WithLabelValues(tc.Name).Set(0) targetConnStateMetric.WithLabelValues(tc.Name).Set(4) case "SHUTDOWN": targetUPMetric.WithLabelValues(tc.Name).Set(0) targetConnStateMetric.WithLabelValues(tc.Name).Set(5) default: targetUPMetric.WithLabelValues(tc.Name).Set(0) targetConnStateMetric.WithLabelValues(tc.Name).Set(0) } } else { if a.isLeader { if ownTargets[tc.Name] == a.Config.Clustering.InstanceName { targetUPMetric.WithLabelValues(tc.Name).Set(0) targetConnStateMetric.WithLabelValues(tc.Name).Set(0) } } else { targetUPMetric.WithLabelValues(tc.Name).Set(0) targetConnStateMetric.WithLabelValues(tc.Name).Set(0) } } } a.configLock.RUnlock() } } }() } func (a *App) startClusterMetrics() { if a.Config.APIServer == nil || !a.Config.APIServer.EnableMetrics || a.Config.Clustering == nil { return } var err error err = a.reg.Register(clusterNumberOfLockedTargets) if err != nil { a.Logger.Printf("failed to register metric: %v", err) } err = a.reg.Register(clusterIsLeader) if err != nil { a.Logger.Printf("failed to register metric: %v", err) } ticker := time.NewTicker(clusterMetricsUpdatePeriod) defer ticker.Stop() for { select { case <-a.ctx.Done(): return case <-ticker.C: ctx, cancel := context.WithTimeout(a.ctx, clusterMetricsUpdatePeriod/2) leaderKey := fmt.Sprintf("gnmic/%s/leader", a.Config.ClusterName) leader, err := a.locker.List(ctx, leaderKey) cancel() if err != nil { a.Logger.Printf("failed to get leader key: %v", err) } if leader[leaderKey] == a.Config.Clustering.InstanceName { clusterIsLeader.Set(1) } else { clusterIsLeader.Set(0) } lockedNodesPrefix := fmt.Sprintf("gnmic/%s/targets", a.Config.ClusterName) ctx, cancel = context.WithTimeout(a.ctx, clusterMetricsUpdatePeriod/2) lockedNodes, err := a.locker.List(ctx, lockedNodesPrefix) cancel() if err != nil { a.Logger.Printf("failed to get locked nodes key: %v", err) } numLockedNodes := 0 for _, v := range lockedNodes { if v == a.Config.Clustering.InstanceName { numLockedNodes++ } } clusterNumberOfLockedTargets.Set(float64(numLockedNodes)) } } } ================================================ FILE: pkg/app/outputs.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "fmt" "sync" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/outputs" ) func (a *App) InitOutput(ctx context.Context, name string, tcs map[string]*types.TargetConfig) { a.configLock.Lock() defer a.configLock.Unlock() if _, ok := a.Outputs[name]; ok { return } wg := new(sync.WaitGroup) if cfg, ok := a.Config.Outputs[name]; ok { if outType, ok := cfg["type"]; ok { a.Logger.Printf("starting output type %s", outType) if initializer, ok := outputs.Outputs[outType.(string)]; ok { out := initializer() wg.Add(1) go func() { defer wg.Done() err := out.Init(ctx, name, cfg, outputs.WithLogger(a.Logger), outputs.WithRegistry(a.reg), outputs.WithName(a.Config.InstanceName), outputs.WithClusterName(a.Config.ClusterName), outputs.WithConfigStore(a.Store), ) if err != nil { a.Logger.Printf("failed to init output type %q: %v", outType, err) } }() a.operLock.Lock() a.Outputs[name] = out a.operLock.Unlock() } } } wg.Wait() } func (a *App) InitOutputs(ctx context.Context) { for name := range a.Config.Outputs { a.InitOutput(ctx, name, a.Config.Targets) } } // AddOutputConfig adds an output called name, with config cfg if it does not already exist func (a *App) AddOutputConfig(name string, cfg map[string]interface{}) error { // if a.Outputs == nil { // a.Outputs = make(map[string]outputs.Output) // } if a.Config.Outputs == nil { a.Config.Outputs = make(map[string]map[string]interface{}) } if _, ok := a.Outputs[name]; ok { return fmt.Errorf("output %q already exists", name) } a.configLock.Lock() defer a.configLock.Unlock() a.Config.Outputs[name] = cfg return nil } func (a *App) DeleteOutput(name string) error { if a.Outputs == nil { return nil } a.operLock.Lock() defer a.operLock.Unlock() if _, ok := a.Outputs[name]; !ok { return fmt.Errorf("output %q does not exist", name) } o := a.Outputs[name] err := o.Close() if err != nil { a.Logger.Printf("failed to close output %q: %v", name, err) } delete(a.Outputs, name) return nil } ================================================ FILE: pkg/app/path.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "encoding/json" "errors" "fmt" "os" "sort" "strings" "github.com/manifoldco/promptui" "github.com/openconfig/goyang/pkg/yang" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/openconfig/gnmic/pkg/api/path" ) type pathGenOpts struct { search bool withDescr bool withTypes bool withPrefix bool pathType string stateOnly bool configOnly bool json bool withNonLeaves bool } type generatedPath struct { Path string `json:"path,omitempty"` PathWithPrefix string `json:"path-with-prefix,omitempty"` Type string `json:"type,omitempty"` EnumValues []string `json:"enum-values,omitempty"` Description string `json:"description,omitempty"` Default string `json:"default,omitempty"` IsState bool `json:"is-state,omitempty"` Namespace string `json:"namespace,omitempty"` FeatureList []string `json:"if-features,omitempty"` } func (a *App) PathCmdRun(d, f, e []string, pgo pathGenOpts) error { err := a.generateYangSchema(f, e) if err != nil { return err } gpaths := make([]*generatedPath, 0, 256) collected := make([]*yang.Entry, 0, 256) for _, entry := range a.SchemaTree.Dir { collected = append(collected, collectSchemaNodes(entry, !pgo.withNonLeaves)...) } for _, entry := range collected { // don't produce such paths in case of non-leaves if entry.IsCase() || entry.IsChoice() { continue } if !pgo.stateOnly && !pgo.configOnly || pgo.stateOnly && pgo.configOnly { gpaths = append(gpaths, a.generatePath(entry, pgo.pathType)) continue } state := isState(entry) if state && pgo.stateOnly { gpaths = append(gpaths, a.generatePath(entry, pgo.pathType)) continue } if !state && pgo.configOnly { gpaths = append(gpaths, a.generatePath(entry, pgo.pathType)) continue } } sort.Slice(gpaths, func(i, j int) bool { return gpaths[i].Path < gpaths[j].Path }) for _, gp := range gpaths { gp.PathWithPrefix = collapsePrefixes(gp.PathWithPrefix) } if pgo.json { b, err := json.MarshalIndent(gpaths, "", " ") if err != nil { return err } fmt.Fprintln(os.Stdout, string(b)) return nil } if len(gpaths) == 0 { return errors.New("no results found") } // regular print if !pgo.search { sb := new(strings.Builder) for _, gp := range gpaths { sb.Reset() if pgo.withPrefix { sb.WriteString(gp.PathWithPrefix) } else { sb.WriteString(gp.Path) } if pgo.withTypes { sb.WriteString("\t(type=") sb.WriteString(gp.Type) sb.WriteString(")") } if pgo.withDescr { sb.WriteString("\n") sb.WriteString(indent("\t", gp.Description)) } fmt.Fprintln(os.Stdout, sb.String()) } return nil } // search paths := make([]string, 0, len(gpaths)) for _, gp := range gpaths { paths = append(paths, gp.Path) } p := promptui.Select{ Label: "select path", Items: paths, Size: 10, Stdout: os.Stdout, HideSelected: true, Searcher: func(input string, index int) bool { kws := strings.Split(input, " ") result := true for _, kw := range kws { if strings.HasPrefix(kw, "!") { kw = strings.TrimLeft(kw, "!") if kw == "" { continue } result = result && !strings.Contains(paths[index], kw) } else { result = result && strings.Contains(paths[index], kw) } } return result }, Keys: &promptui.SelectKeys{ Prev: promptui.Key{Code: promptui.KeyPrev, Display: promptui.KeyPrevDisplay}, Next: promptui.Key{Code: promptui.KeyNext, Display: promptui.KeyNextDisplay}, PageUp: promptui.Key{Code: promptui.KeyBackward, Display: promptui.KeyBackwardDisplay}, PageDown: promptui.Key{Code: promptui.KeyForward, Display: promptui.KeyForwardDisplay}, Search: promptui.Key{Code: ':', Display: ":"}, }, } index, selected, err := p.Run() if err != nil { return err } fmt.Println(selected) fmt.Println(a.generateTypeInfo(collected[index])) return nil } func (a *App) PathPreRunE(cmd *cobra.Command, args []string) error { a.Config.SetLocalFlagsFromFile(cmd) if a.Config.PathSearch && a.Config.PathWithDescr { return errors.New("flags --search and --descr cannot be used together") } if a.Config.LocalFlags.PathPathType != "xpath" && a.Config.LocalFlags.PathPathType != "gnmi" { return errors.New("path-type must be one of 'xpath' or 'gnmi'") } return a.yangFilesPreProcessing() } func (a *App) PathRunE(cmd *cobra.Command, args []string) error { return a.PathCmdRun( a.Config.GlobalFlags.Dir, a.Config.GlobalFlags.File, a.Config.GlobalFlags.Exclude, pathGenOpts{ search: a.Config.LocalFlags.PathSearch, withDescr: a.Config.LocalFlags.PathWithDescr, withTypes: a.Config.LocalFlags.PathWithTypes, withPrefix: a.Config.LocalFlags.PathWithPrefix, pathType: a.Config.LocalFlags.PathPathType, stateOnly: a.Config.LocalFlags.PathState, configOnly: a.Config.LocalFlags.PathConfig, }, ) } func (a *App) InitPathFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&a.Config.LocalFlags.PathPathType, "path-type", "", "xpath", "path type xpath or gnmi") cmd.Flags().BoolVarP(&a.Config.LocalFlags.PathWithDescr, "descr", "", false, "print leaf description") cmd.Flags().BoolVarP(&a.Config.LocalFlags.PathWithPrefix, "with-prefix", "", false, "include module/submodule prefix in path elements") cmd.Flags().BoolVarP(&a.Config.LocalFlags.PathWithTypes, "types", "", false, "print leaf type") cmd.Flags().BoolVarP(&a.Config.LocalFlags.PathSearch, "search", "", false, "search through path list") cmd.Flags().BoolVarP(&a.Config.LocalFlags.PathState, "state-only", "", false, "generate paths only for YANG leafs representing state data") cmd.Flags().BoolVarP(&a.Config.LocalFlags.PathConfig, "config-only", "", false, "generate paths only for YANG leafs representing config data") cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } func collectSchemaNodes(e *yang.Entry, leafOnly bool) []*yang.Entry { if e == nil { return []*yang.Entry{} } collected := make([]*yang.Entry, 0, 128) for _, child := range e.Dir { collected = append(collected, collectSchemaNodes(child, leafOnly)...) } if e.Parent != nil { switch { case e.Dir == nil && e.ListAttr != nil: // leaf-list fallthrough case e.Dir == nil: // leaf f := &yang.Entry{ Parent: e.Parent, Node: e.Node, Name: e.Name, Description: e.Description, Default: e.Default, Units: e.Units, Kind: e.Kind, Config: e.Config, Prefix: e.Prefix, Mandatory: e.Mandatory, Dir: e.Dir, Key: e.Key, Type: e.Type, Exts: e.Exts, ListAttr: e.ListAttr, Extra: make(map[string][]any), } for k, v := range e.Extra { f.Extra[k] = v } collected = append(collected, f) case e.ListAttr != nil: // list fallthrough default: // container if !leafOnly { collected = append(collected, e) } if len(e.Extra["if-feature"]) > 0 { for _, myleaf := range collected { if myleaf.Extra["if-feature"] == nil { myleaf.Extra["if-feature"] = e.Extra["if-feature"] continue } LOOP: for _, f := range e.Extra["if-feature"] { for _, mlf := range myleaf.Extra["if-feature"] { if ff, ok := f.(*yang.Value); ok && ff != nil { if mlff, ok := mlf.(*yang.Value); ok && mlff != nil { if ff.Source == nil || mlff.Source == nil { continue LOOP } if ff.Source.Argument == mlff.Source.Argument { continue LOOP } myleaf.Extra["if-feature"] = append(myleaf.Extra["if-feature"], f) } } } } } } } } return collected } func (a *App) generatePath(entry *yang.Entry, pType string) *generatedPath { gp := new(generatedPath) for e := entry; e != nil && e.Parent != nil; e = e.Parent { if e.IsCase() || e.IsChoice() { continue } elementName := e.Name prefixedElementName := e.Name if e.Prefix != nil { if e.Prefix.Parent != nil { prefixedElementName = fmt.Sprintf("%s:%s", e.Prefix.Parent.NName(), prefixedElementName) } else { prefixedElementName = fmt.Sprintf("%s:%s", e.Prefix.NName(), prefixedElementName) } } if e.Key != "" { for _, k := range strings.Fields(e.Key) { elementName = fmt.Sprintf("%s[%s=*]", elementName, k) prefixedElementName = fmt.Sprintf("%s[%s=*]", prefixedElementName, k) } } gp.Path = fmt.Sprintf("/%s%s", elementName, gp.Path) if e.Prefix != nil { gp.PathWithPrefix = fmt.Sprintf("/%s%s", prefixedElementName, gp.PathWithPrefix) } } if ifFeature, ok := entry.Extra["if-feature"]; ok && ifFeature != nil { APPEND: for _, feature := range ifFeature { f, ok := feature.(*yang.Value) if !ok { continue } for _, ef := range gp.FeatureList { if ef == f.Source.Argument { continue APPEND } } gp.FeatureList = append(gp.FeatureList, strings.Split(f.Source.Argument, " and ")...) } } gp.Description = entry.Description if entry.Type != nil { gp.Type = entry.Type.Name if gp.Type == "enumeration" { gp.EnumValues = entry.Type.Enum.Names() } } else if entry.IsList() { gp.Type = "[list]" } else { gp.Type = "[container]" } if entry.IsLeafList() { gp.Default = strings.Join(entry.DefaultValues(), ", ") } else { gp.Default, _ = entry.SingleDefaultValue() } gp.IsState = isState(entry) gp.Namespace = entry.Namespace().NName() if pType == "gnmi" { gnmiPath, err := path.ParsePath(gp.Path) if err != nil { fmt.Fprintf(os.Stderr, "path: %s could not be changed to gnmi format: %v\n", gp.Path, err) } gp.Path = gnmiPath.String() } return gp } func (a *App) generateTypeInfo(e *yang.Entry) string { if e == nil || e.Type == nil { return "unknown type" } t := e.Type rstr := fmt.Sprintf("- type: %s", t.Kind) switch t.Kind { case yang.Ybits: data := getAnnotation(e, "bits") if data != nil { rstr += fmt.Sprintf(" %v", data) } case yang.Yenum: data := getAnnotation(e, "enum") if data != nil { rstr += fmt.Sprintf(" %v", data) } case yang.Yleafref: rstr += fmt.Sprintf(" %q", t.Path) case yang.Yidentityref: rstr += fmt.Sprintf(" %q", t.IdentityBase.Name) if a.Config.LocalFlags.PathWithPrefix { data := getAnnotation(e, "prefix-qualified-identities") if data != nil { rstr += fmt.Sprintf(" %v", data) } } else { identities := make([]string, 0, 64) for i := range t.IdentityBase.Values { identities = append(identities, t.IdentityBase.Values[i].Name) } rstr += fmt.Sprintf(" %v", identities) } case yang.Yunion: unionlist := make([]string, 0, len(t.Type)) for i := range t.Type { unionlist = append(unionlist, t.Type[i].Name) } rstr += fmt.Sprintf(" %v", unionlist) default: } rstr += "\n" if t.Root != nil { data := getAnnotation(e, "root.type") if data != nil && t.Kind.String() != data.(string) { rstr += fmt.Sprintf("- root.type: %v\n", data) } } if t.Units != "" { rstr += fmt.Sprintf("- units: %s\n", t.Units) } if t.Default != "" { rstr += fmt.Sprintf("- default: %q\n", t.Default) } if t.FractionDigits != 0 { rstr += fmt.Sprintf("- fraction-digits: %d\n", t.FractionDigits) } if len(t.Length) > 0 { rstr += fmt.Sprintf("- length: %s\n", t.Length) } if t.Kind == yang.YinstanceIdentifier && !t.OptionalInstance { rstr += "- required\n" } if len(t.Pattern) > 0 { rstr += fmt.Sprintf("- pattern: %s\n", strings.Join(t.Pattern, "|")) } b := yang.BaseTypedefs[t.Kind.String()].YangType if len(t.Range) > 0 && !t.Range.Equal(b.Range) { rstr += fmt.Sprintf("- range: %s\n", t.Range) } return rstr } func getAnnotation(entry *yang.Entry, name string) any { if entry.Annotation != nil { data, ok := entry.Annotation[name] if ok { return data } } return nil } func isState(e *yang.Entry) bool { if e.Config == yang.TSFalse { return true } if e.Parent != nil { return isState(e.Parent) } return false } // collapsePrefixes removes prefixes from path element names and keys func collapsePrefixes(p string) string { gp, err := path.ParsePath(p) if err != nil { return p } parentPrefix := "" for _, pe := range gp.Elem { currentPrefix, name := getPrefixElem(pe.Name) if parentPrefix == "" || parentPrefix != currentPrefix { // first elem or updating parent prefix parentPrefix = currentPrefix } else if currentPrefix == parentPrefix { pe.Name = name } } return fmt.Sprintf("/%s", path.GnmiPathToXPath(gp, false)) } // takes a path element name or a key name // and returns the prefix and name func getPrefixElem(pe string) (string, string) { if pe == "" { return "", "" } pes := strings.SplitN(pe, ":", 2) if len(pes) > 1 { return pes[0], pes[1] } return "", pes[0] } ================================================ FILE: pkg/app/path_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import "testing" var collapseTestSet = map[string][]string{ "1": { "", "/", }, "2": { "/prefix1:elem1[key1=*]/prefix1:elem2/prefix2:elem3/prefix2:elem4", "/prefix1:elem1[key1=*]/elem2/prefix2:elem3/elem4", }, "3": { "/prefix1:elem1[key1=*]/prefix1:elem2/prefix2:elem3/prefix2:elem4", "/prefix1:elem1[key1=*]/elem2/prefix2:elem3/elem4", }, "4": { "/fake_prefix:", "/fake_prefix:", }, "5": { "/:fake_prefix", "/:fake_prefix", }, "6": { "/elem1/prefix1:elem2/prefix1:elem3", "/elem1/prefix1:elem2/elem3", }, } func TestCollapsePrefixes(t *testing.T) { for name, item := range collapseTestSet { t.Run(name, func(t *testing.T) { r := collapsePrefixes(item[0]) if r != item[1] { t.Logf("failed at item %q", name) t.Logf("expected: %q", item[1]) t.Logf(" got: %q", r) t.Fail() } }) } } ================================================ FILE: pkg/app/plugins.go ================================================ package app import "github.com/openconfig/gnmic/pkg/formatters/plugin_manager" func (a *App) initPluginManager() error { pc, err := a.Config.GetPluginsConfig() if err != nil { return err } if pc == nil { return nil } a.pm = plugin_manager.New(pc, a.Logger.Writer()) return a.pm.Load() } func (a *App) CleanupPlugins() { if a.pm == nil { return } a.pm.Cleanup() } ================================================ FILE: pkg/app/pprof.go ================================================ package app import ( "net/http" _ "net/http/pprof" //nolint:gosec // Import for pprof, only enabled via CLI flag "time" ) type pprofServer struct { err chan error } func newPprofServer() *pprofServer { return &pprofServer{ err: make(chan error, 1), } } func (p *pprofServer) Start(address string) { go func() { server := &http.Server{ Addr: address, ReadHeaderTimeout: 10 * time.Second, } if err := server.ListenAndServe(); err != nil { p.err <- err } close(p.err) }() } func (p *pprofServer) ErrChan() <-chan error { return p.err } ================================================ FILE: pkg/app/processor.go ================================================ package app import ( "bytes" "encoding/json" "errors" "fmt" "time" "github.com/AlekSi/pointer" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/file" "github.com/openconfig/gnmic/pkg/formatters" promcom "github.com/openconfig/gnmic/pkg/outputs/prometheus_output" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "github.com/spf13/cobra" ) func (a *App) ProcessorPreRunE(cmd *cobra.Command, args []string) error { a.Config.SetLocalFlagsFromFile(cmd) err := a.initPluginManager() if err != nil { return err } return nil } func (a *App) ProcessorRunE(cmd *cobra.Command, args []string) error { actionsConfig, err := a.Config.GetActions() if err != nil { return fmt.Errorf("failed reading actions config: %v", err) } pConfig, err := a.Config.GetEventProcessors() if err != nil { return fmt.Errorf("failed reading event processors config: %v", err) } tcs, err := a.Config.GetTargets() if err != nil { if !errors.Is(err, config.ErrNoTargetsFound) { return err } } // initialize processors evps, err := formatters.MakeEventProcessors( a.Logger, a.Config.LocalFlags.ProcessorName, pConfig, tcs, actionsConfig, ) if err != nil { return err } // read input file inputBytes, err := file.ReadFile(cmd.Context(), a.Config.LocalFlags.ProcessorInput) if err != nil { return err } evInput := make([][]*formatters.EventMsg, 0) msgs := bytes.Split(inputBytes, []byte(a.Config.LocalFlags.ProcessorInputDelimiter)) for i, bg := range msgs { if len(bg) == 0 { continue } mevs := make([]map[string]any, 0) err = json.Unmarshal(bg, &mevs) if err != nil { return fmt.Errorf("failed json Unmarshal at msg index %d: %s: %v", i, bg, err) } evs := make([]*formatters.EventMsg, 0, len(mevs)) for _, mev := range mevs { ev, err := formatters.EventFromMap(mev) if err != nil { return err } evs = append(evs, ev) } evInput = append(evInput, evs) } rrevs := make([][]*formatters.EventMsg, 0, len(evInput)) for _, evs := range evInput { revs := evs for _, p := range evps { revs = p.Apply(revs...) } rrevs = append(rrevs, revs) } if len(a.Config.LocalFlags.ProcessorOutput) != 0 { b, err := a.promFormat(rrevs, a.Config.LocalFlags.ProcessorOutput) if err != nil { return err } fmt.Println(string(b)) return nil } numEvOut := len(rrevs) for i, rev := range rrevs { b, err := json.MarshalIndent(rev, "", " ") if err != nil { return err } fmt.Println(string(b)) if i == numEvOut-1 { break } } return nil } func (a *App) InitProcessorFlags(cmd *cobra.Command) { cmd.ResetFlags() cmd.Flags().StringVarP(&a.Config.LocalFlags.ProcessorInput, "input", "", "", "processors input") cmd.MarkFlagRequired("input") cmd.Flags().StringVarP(&a.Config.LocalFlags.ProcessorInputDelimiter, "delimiter", "", "\n", "processors input delimiter") cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.ProcessorName, "name", "", nil, "list of processors to apply to the input") cmd.MarkFlagRequired("name") cmd.Flags().StringVarP(&a.Config.LocalFlags.ProcessorOutput, "output", "", "", "output name") } func (a *App) promFormat(rrevs [][]*formatters.EventMsg, outName string) ([]byte, error) { // read output config outputPath := "outputs/" + outName outputConfig := a.Config.FileConfig.GetStringMap(outputPath) if outputConfig == nil { return nil, fmt.Errorf("unknown output name: %s", outName) } outType := a.Config.FileConfig.GetString(outputPath + "/type") if outType != "prometheus" && outType != "remote_write" { return nil, fmt.Errorf("output %q must be of type 'prometheus' or 'remote_write'", outName) } mb := &promcom.MetricBuilder{ Prefix: a.Config.FileConfig.GetString(outputPath + "/metric-prefix"), AppendSubscriptionName: a.Config.FileConfig.GetBool(outputPath + "/append-subscription-name"), StringsAsLabels: a.Config.FileConfig.GetBool(outputPath + "/strings-as-labels"), OverrideTimestamps: a.Config.FileConfig.GetBool(outputPath + "/override-timestamps"), ExportTimestamps: a.Config.FileConfig.GetBool(outputPath + "/export-timestamps"), } b := new(bytes.Buffer) now := time.Now() for _, revs := range rrevs { for _, ev := range revs { pms := mb.MetricsFromEvent(ev, now) for _, pm := range pms { m := &dto.Metric{} err := pm.Write(m) if err != nil { return nil, err } _, err = expfmt.MetricFamilyToText(b, &dto.MetricFamily{ Name: pointer.ToString(pm.Name), Help: pointer.ToString("gNMIc generated metric"), Type: dto.MetricType_UNTYPED.Enum(), Metric: []*dto.Metric{m}, }) if err != nil { return nil, err } } } } return b.Bytes(), nil } ================================================ FILE: pkg/app/prompt.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "fmt" "os" "path/filepath" "strings" "github.com/mitchellh/go-homedir" "github.com/nsf/termbox-go" "github.com/spf13/cobra" "github.com/spf13/pflag" ) func (a *App) PromptRunE(cmd *cobra.Command, args []string) error { err := a.generateYangSchema(a.Config.GlobalFlags.File, a.Config.GlobalFlags.Exclude) if err != nil { a.Logger.Printf("failed to load paths from yang: %v", err) if !a.Config.Log { fmt.Fprintf(os.Stderr, "ERR: failed to load paths from yang: %v\n", err) } } a.PromptMode = true // load history a.PromptHistory = make([]string, 0, 256) home, err := homedir.Dir() if err != nil { if a.Config.Debug { a.Logger.Printf("failed to get home directory: %v", err) } return nil } content, err := os.ReadFile(filepath.Join(home, ".gnmic.history")) if err != nil { if a.Config.Debug { a.Logger.Printf("failed to read history file: %v", err) } return nil } history := strings.Split(string(content), "\n") for i := range history { if history[i] != "" { a.PromptHistory = append(a.PromptHistory, history[i]) } } return nil } // PreRun resolve the glob patterns and checks if --max-suggestions is bigger that the terminal height and lowers it if needed. func (a *App) PromptPreRunE(cmd *cobra.Command, args []string) error { a.Config.SetLocalFlagsFromFile(cmd) err := a.yangFilesPreProcessing() if err != nil { return err } err = termbox.Init() if err != nil { return fmt.Errorf("could not initialize a terminal box: %v", err) } _, h := termbox.Size() termbox.Close() // set max suggestions to terminal height-1 if the supplied value is greater if uint(a.Config.LocalFlags.PromptMaxSuggestions) > uint(h) { if h > 1 { a.Config.LocalFlags.PromptMaxSuggestions = uint16(h - 2) } else { a.Config.LocalFlags.PromptMaxSuggestions = 0 } } return nil } func (a *App) InitPromptFlags(cmd *cobra.Command) { cmd.Flags().Uint16Var(&a.Config.LocalFlags.PromptMaxSuggestions, "max-suggestions", 10, "terminal suggestion max list size") cmd.Flags().StringVar(&a.Config.LocalFlags.PromptPrefixColor, "prefix-color", "dark_blue", "terminal prefix color") cmd.Flags().StringVar(&a.Config.LocalFlags.PromptSuggestionsBGColor, "suggestions-bg-color", "dark_blue", "suggestion box background color") cmd.Flags().StringVar(&a.Config.LocalFlags.PromptDescriptionBGColor, "description-bg-color", "dark_gray", "description box background color") cmd.Flags().BoolVar(&a.Config.LocalFlags.PromptSuggestAllFlags, "suggest-all-flags", false, "suggest local as well as inherited flags of subcommands") cmd.Flags().BoolVar(&a.Config.LocalFlags.PromptDescriptionWithPrefix, "description-with-prefix", false, "show YANG module prefix in XPATH suggestion description") cmd.Flags().BoolVar(&a.Config.LocalFlags.PromptDescriptionWithTypes, "description-with-types", false, "show YANG types in XPATH suggestion description") cmd.Flags().BoolVar(&a.Config.LocalFlags.PromptSuggestWithOrigin, "suggest-with-origin", false, "suggest XPATHs with origin prepended ") cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } ================================================ FILE: pkg/app/proxy.go ================================================ // © 2024 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "errors" "fmt" "io" "strconv" "strings" "sync" "time" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/server" "github.com/openconfig/gnmic/pkg/api/target" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/grpctunnel/tunnel" "github.com/spf13/cobra" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" ) type targetSubscribeResponse struct { name string rsp *gnmi.SubscribeResponse } func (a *App) ProxyPreRunE(cmd *cobra.Command, args []string) error { a.Config.SetLocalFlagsFromFile(cmd) a.createCollectorDialOpts() return nil } func (a *App) ProxyRunE(cmd *cobra.Command, args []string) error { err := a.Config.GetGNMIServer() if err != nil { return err } err = a.Config.GetAPIServer() if err != nil { return err } err = a.Config.GetLoader() if err != nil { return err } err = a.initTunnelServer(tunnel.ServerConfig{ AddTargetHandler: a.tunServerAddTargetSubscribeHandler, DeleteTargetHandler: a.tunServerDeleteTargetHandler, RegisterHandler: a.tunServerRegisterHandler, Handler: a.tunServerHandler, }) if err != nil { return err } _, err = a.Config.GetTargets() if errors.Is(err, config.ErrNoTargetsFound) { if len(a.Config.FileConfig.GetStringMap("loader")) == 0 && !a.Config.UseTunnelServer { return fmt.Errorf("failed reading targets config: %v", err) } } else if err != nil { return fmt.Errorf("failed reading targets config: %v", err) } a.startAPIServer() go a.startLoaderProxy(cmd.Context()) go a.registerGNMIServer(cmd.Context(), "isProxy=true") return a.startGNMIProxyServer(cmd.Context()) } func (a *App) startGNMIProxyServer(ctx context.Context) error { s, err := server.New(server.Config{ Address: a.Config.GnmiServer.Address, MaxUnaryRPC: a.Config.GnmiServer.MaxUnaryRPC, MaxStreamingRPC: a.Config.GnmiServer.MaxSubscriptions, MaxRecvMsgSize: a.Config.GnmiServer.MaxRecvMsgSize, MaxSendMsgSize: a.Config.GnmiServer.MaxSendMsgSize, MaxConcurrentStreams: a.Config.GnmiServer.MaxConcurrentStreams, TCPKeepalive: a.Config.GnmiServer.TCPKeepalive, Keepalive: a.Config.GnmiServer.GRPCKeepalive.Convert(), HealthEnabled: true, RateLimit: a.Config.GnmiServer.RateLimit, Timeout: a.Config.GnmiServer.Timeout, TLS: a.Config.GnmiServer.TLS, }, server.WithLogger(a.Logger), server.WithRegistry(a.reg), server.WithGetHandler(a.proxyGetHandler), server.WithSetHandler(a.proxySetHandler), server.WithSubscribeHandler(a.proxySubscribeHandler)) if err != nil { return err } return s.Start(ctx) } func (a *App) proxyGetHandler(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) { targetName := req.GetPrefix().GetTarget() pr, _ := peer.FromContext(ctx) a.Logger.Printf("received Get request from %q to target %q", pr.Addr, targetName) targets, err := a.selectTargets(ctx, targetName) if err != nil { return nil, status.Errorf(codes.Internal, "could not find targets: %v", err) } numTargets := len(targets) if numTargets == 0 { return nil, status.Errorf(codes.NotFound, "unknown target %q", targetName) } results := make(chan *gnmi.Notification) errChan := make(chan error, numTargets) response := &gnmi.GetResponse{ // assume one notification target Notification: make([]*gnmi.Notification, 0, numTargets), } done := make(chan struct{}) ctx, cancel := context.WithCancel(ctx) defer cancel() go func() { for { select { case notif, ok := <-results: if !ok { close(done) return } response.Notification = append(response.Notification, notif) case <-ctx.Done(): return } } }() wg := new(sync.WaitGroup) wg.Add(numTargets) for name, t := range targets { go func(name string, t *target.Target) { defer wg.Done() creq := proto.Clone(req).(*gnmi.GetRequest) if creq.GetPrefix() == nil { creq.Prefix = new(gnmi.Path) } if creq.GetPrefix().GetTarget() == "" || creq.GetPrefix().GetTarget() == "*" { creq.Prefix.Target = name } res, err := t.Get(ctx, creq) if err != nil { a.Logger.Printf("target %q err: %v", name, err) errChan <- fmt.Errorf("target %q err: %v", name, err) return } for _, n := range res.GetNotification() { if n.GetPrefix() == nil { n.Prefix = new(gnmi.Path) } if n.GetPrefix().GetTarget() == "" { n.Prefix.Target = name } results <- n } }(name, t) } wg.Wait() close(results) close(errChan) for err := range errChan { if err != nil { return nil, status.Errorf(codes.Internal, "%v", err) } } <-done if a.Config.Debug { a.Logger.Printf("sending GetResponse to %q: %+v", pr.Addr, response) } return response, nil } func (a *App) proxySetHandler(ctx context.Context, req *gnmi.SetRequest) (*gnmi.SetResponse, error) { numUpdates := len(req.GetUpdate()) numReplaces := len(req.GetReplace()) numDeletes := len(req.GetDelete()) numUnionReplace := len(req.GetUnionReplace()) if numUpdates+numReplaces+numDeletes+numUnionReplace == 0 { return nil, status.Errorf(codes.InvalidArgument, "missing update/replace/delete path(s)") } targetName := req.GetPrefix().GetTarget() pr, _ := peer.FromContext(ctx) a.Logger.Printf("received Set request from %q to target %q", pr.Addr, targetName) targets, err := a.selectTargets(ctx, targetName) if err != nil { return nil, status.Errorf(codes.Internal, "could not find targets: %v", err) } numTargets := len(targets) if numTargets == 0 { return nil, status.Errorf(codes.NotFound, "unknown target(s) %q", targetName) } results := make(chan *gnmi.UpdateResult) errChan := make(chan error, numTargets) response := &gnmi.SetResponse{ // assume one update per target, per update/replace/delete Response: make([]*gnmi.UpdateResult, 0, numTargets*(numUpdates+numReplaces+numDeletes+numUnionReplace)), } done := make(chan struct{}) ctx, cancel := context.WithCancel(ctx) defer cancel() go func() { for { select { case upd, ok := <-results: if !ok { response.Timestamp = time.Now().UnixNano() close(done) return } response.Response = append(response.Response, upd) case <-ctx.Done(): return } } }() wg := new(sync.WaitGroup) wg.Add(numTargets) for name, t := range targets { go func(name string, t *target.Target) { defer wg.Done() creq := proto.Clone(req).(*gnmi.SetRequest) if creq.GetPrefix() == nil { creq.Prefix = new(gnmi.Path) } if creq.GetPrefix().GetTarget() == "" || creq.GetPrefix().GetTarget() == "*" { creq.Prefix.Target = name } res, err := t.Set(ctx, creq) if err != nil { a.Logger.Printf("target %q err: %v", name, err) errChan <- fmt.Errorf("target %q err: %v", name, err) return } for _, upd := range res.GetResponse() { upd.Path.Target = name results <- upd } }(name, t) } wg.Wait() close(results) close(errChan) for err := range errChan { if err != nil { return nil, status.Errorf(codes.Internal, "%v", err) } } <-done a.Logger.Printf("sending SetResponse to %q: %+v", pr.Addr, response) return response, nil } func (a *App) proxySubscribeHandler(req *gnmi.SubscribeRequest, stream gnmi.GNMI_SubscribeServer) error { switch req.GetRequest().(type) { case *gnmi.SubscribeRequest_Poll: return status.Errorf(codes.InvalidArgument, "invalid request type: %T", req.GetRequest()) case *gnmi.SubscribeRequest_Subscribe: } switch req.GetSubscribe().GetMode() { case gnmi.SubscriptionList_ONCE: case gnmi.SubscriptionList_STREAM: case gnmi.SubscriptionList_POLL: return status.Errorf(codes.Unimplemented, "subscribe mode POLL not implemented by the proxy") default: return status.Errorf(codes.InvalidArgument, "unknown subscribe request mode: %v", req.GetSubscribe().GetMode()) } ctx := stream.Context() targetName := getTargetFromSubscribeRequest(req) targets, err := a.selectTargets(ctx, targetName) if err != nil { return status.Errorf(codes.Internal, "could not find target(s): %v", err) } numTargets := len(targets) if numTargets == 0 { return status.Errorf(codes.NotFound, "unknown target(s) %q", targetName) } switch req.GetSubscribe().GetMode() { case gnmi.SubscriptionList_ONCE: return a.proxySubscribeONCEHandler(req, stream, targets) case gnmi.SubscriptionList_STREAM: return a.proxySubscribeSTREAMHandler(req, stream, targets) } return nil } func (a *App) proxySubscribeONCEHandler(req *gnmi.SubscribeRequest, stream gnmi.GNMI_SubscribeServer, targets map[string]*target.Target) error { ctx := stream.Context() numTargets := len(targets) results := make(chan *targetSubscribeResponse) errChan := make(chan error, numTargets) done := make(chan struct{}) stop := make(chan struct{}) go func() { defer close(done) syncs := make(map[string]struct{}) for { select { case <-ctx.Done(): return case r, ok := <-results: if !ok { return } switch r.rsp.Response.(type) { case *gnmi.SubscribeResponse_Update: if r.rsp.GetUpdate().GetPrefix() == nil { r.rsp.GetUpdate().Prefix = new(gnmi.Path) } if r.rsp.GetUpdate().GetPrefix().GetTarget() == "" { r.rsp.GetUpdate().GetPrefix().Target = r.name } err := stream.Send(r.rsp) if err != nil { close(stop) a.Logger.Printf("proxy stream send failed: %v", err) return } case *gnmi.SubscribeResponse_SyncResponse: syncs[r.name] = struct{}{} if len(syncs) >= numTargets { // send a single sync and stop err := stream.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}) if err != nil { a.Logger.Printf("proxy stream send Sync response failed: %v", err) } return } } } } }() wg := new(sync.WaitGroup) wg.Add(numTargets) for name, t := range targets { go func(name string, t *target.Target) { defer wg.Done() ctx, cancel := context.WithCancel(ctx) defer cancel() creq := proto.Clone(req).(*gnmi.SubscribeRequest) if creq.GetSubscribe().GetPrefix() == nil { creq.GetSubscribe().Prefix = new(gnmi.Path) } if creq.GetSubscribe().GetPrefix().GetTarget() == "" || creq.GetSubscribe().GetPrefix().GetTarget() == "*" { creq.GetSubscribe().Prefix.Target = name } resCh, errCh := t.SubscribeOnceChan(ctx, creq) for { select { case <-ctx.Done(): return case <-stop: cancel() return case r, ok := <-resCh: if !ok { return } results <- &targetSubscribeResponse{ name: name, rsp: r, } case err := <-errCh: if errors.Is(err, io.EOF) { a.Logger.Printf("target %q: closed stream(EOF)", t.Config.Name) } else { errChan <- err } return } } }(name, t) } wg.Wait() close(results) close(errChan) for err := range errChan { if err != nil { return status.Errorf(codes.Internal, "%v", err) } } <-done return nil } func (a *App) proxySubscribeSTREAMHandler(req *gnmi.SubscribeRequest, stream gnmi.GNMI_SubscribeServer, targets map[string]*target.Target) error { ctx := stream.Context() numTargets := len(targets) results := make(chan *targetSubscribeResponse) errChan := make(chan error, numTargets) done := make(chan struct{}) // used to stop target subscriptions if // the northbound subscription stops. stop := make(chan struct{}) go func() { defer close(done) for { select { case <-ctx.Done(): return case r, ok := <-results: if !ok { return } switch r.rsp.Response.(type) { case *gnmi.SubscribeResponse_Update: if r.rsp.GetUpdate().GetPrefix() == nil { r.rsp.GetUpdate().Prefix = new(gnmi.Path) } if r.rsp.GetUpdate().GetPrefix().GetTarget() == "" { r.rsp.GetUpdate().GetPrefix().Target = r.name } } err := stream.Send(r.rsp) if err != nil { close(stop) a.Logger.Printf("proxy stream send failed: %v", err) return } } } }() pr, _ := peer.FromContext(ctx) wg := new(sync.WaitGroup) wg.Add(numTargets) for name, t := range targets { go func(name string, t *target.Target) { defer wg.Done() ctx, cancel := context.WithCancel(ctx) defer cancel() creq := proto.Clone(req).(*gnmi.SubscribeRequest) if creq.GetSubscribe().GetPrefix() == nil { creq.GetSubscribe().Prefix = new(gnmi.Path) } if creq.GetSubscribe().GetPrefix().GetTarget() == "" || creq.GetSubscribe().GetPrefix().GetTarget() == "*" { creq.GetSubscribe().Prefix.Target = name } subName := pr.Addr.String() + "-" + name + "-" + strconv.Itoa(time.Now().Nanosecond()) rspCh, errCh := t.SubscribeStreamChan(ctx, creq, subName) defer t.StopSubscription(subName) for { select { case <-ctx.Done(): return case <-stop: cancel() return case r, ok := <-rspCh: if !ok { return } results <- &targetSubscribeResponse{ name: name, rsp: r, } case err, ok := <-errCh: if !ok { return } errChan <- err return } } }(name, t) } wg.Wait() close(results) close(errChan) for err := range errChan { if err != nil { return status.Errorf(codes.Internal, "%v", err) } } <-done return nil } func getTargetFromSubscribeRequest(req *gnmi.SubscribeRequest) string { switch req.GetRequest().(type) { case *gnmi.SubscribeRequest_Poll: case *gnmi.SubscribeRequest_Subscribe: return req.GetSubscribe().GetPrefix().GetTarget() } return "" } func (a *App) selectTargets(ctx context.Context, tn string) (map[string]*target.Target, error) { targets := make(map[string]*target.Target) a.operLock.Lock() defer a.operLock.Unlock() a.configLock.Lock() defer a.configLock.Unlock() if tn == "" || tn == "*" { for n, tc := range a.Config.Targets { targetName := utils.GetHost(n) if t, ok := a.Targets[targetName]; ok { targets[targetName] = t continue } t, err := a.createTarget(ctx, tc) if err != nil { return nil, err } a.Targets[targetName] = t targets[n] = t } return targets, nil } targetsNames := strings.Split(tn, ",") for i := range targetsNames { for n, t := range a.Targets { if utils.GetHost(n) == targetsNames[i] { targets[n] = t } } } if len(targets) == len(targetsNames) { return targets, nil } OUTER: for i := range targetsNames { for n, tc := range a.Config.Targets { targetName := utils.GetHost(n) if _, ok := targets[targetName]; !ok && targetName == targetsNames[i] { t, err := a.createTarget(ctx, tc) if err != nil { return nil, err } a.Targets[targetName] = t targets[n] = t continue OUTER } } return nil, status.Errorf(codes.NotFound, "target %q is not known", targetsNames[i]) } return targets, nil } func (a *App) createTarget(ctx context.Context, tc *types.TargetConfig) (*target.Target, error) { t := target.NewTarget(tc) targetDialOpts := a.dialOpts if a.Config.UseTunnelServer { targetDialOpts = append(targetDialOpts, grpc.WithContextDialer(a.tunDialerFn(ctx, tc)), ) t.Config.Address = t.Config.Name } err := t.CreateGNMIClient(ctx, targetDialOpts...) if err != nil { return nil, err } return t, nil } ================================================ FILE: pkg/app/routes.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "net/http" "github.com/gorilla/mux" ) func (a *App) routes() { apiV1 := a.router.PathPrefix("/api/v1").Subrouter() a.clusterRoutes(apiV1) a.configRoutes(apiV1) a.targetRoutes(apiV1) a.healthRoutes(apiV1) a.adminRoutes(apiV1) } func (a *App) clusterRoutes(r *mux.Router) { r.HandleFunc("/cluster", a.handleClusteringGet).Methods(http.MethodGet) r.HandleFunc("/cluster/rebalance", a.handleClusterRebalance).Methods(http.MethodPost) r.HandleFunc("/cluster/leader", a.handleClusteringLeaderGet).Methods(http.MethodGet) r.HandleFunc("/cluster/leader", a.handleClusteringLeaderDelete).Methods(http.MethodDelete) r.HandleFunc("/cluster/members", a.handleClusteringMembersGet).Methods(http.MethodGet) r.HandleFunc("/cluster/members/{id}/drain", a.handleClusteringDrainInstance).Methods(http.MethodPost) } func (a *App) configRoutes(r *mux.Router) { // config r.HandleFunc("/config", a.handleConfig).Methods(http.MethodGet) // config/targets r.HandleFunc("/config/targets", a.handleConfigTargetsGet).Methods(http.MethodGet) r.HandleFunc("/config/targets/{id}", a.handleConfigTargetsGet).Methods(http.MethodGet) r.HandleFunc("/config/targets", a.handleConfigTargetsPost).Methods(http.MethodPost) r.HandleFunc("/config/targets/{id}", a.handleConfigTargetsDelete).Methods(http.MethodDelete) r.HandleFunc("/config/targets/{id}/subscriptions", a.handleConfigTargetsSubscriptions).Methods(http.MethodPatch) // config/subscriptions r.HandleFunc("/config/subscriptions", a.handleConfigSubscriptions).Methods(http.MethodGet) // config/outputs r.HandleFunc("/config/outputs", a.handleConfigOutputs).Methods(http.MethodGet) // config/inputs r.HandleFunc("/config/inputs", a.handleConfigInputs).Methods(http.MethodGet) // config/processors r.HandleFunc("/config/processors", a.handleConfigProcessors).Methods(http.MethodGet) // config/clustering r.HandleFunc("/config/clustering", a.handleConfigClustering).Methods(http.MethodGet) // config/api-server r.HandleFunc("/config/api-server", a.handleConfigAPIServer).Methods(http.MethodGet) // config/gnmi-server r.HandleFunc("/config/gnmi-server", a.handleConfigGNMIServer).Methods(http.MethodGet) } func (a *App) targetRoutes(r *mux.Router) { // targets r.HandleFunc("/targets", a.handleTargetsGet).Methods(http.MethodGet) r.HandleFunc("/targets/{id}", a.handleTargetsGet).Methods(http.MethodGet) r.HandleFunc("/targets/{id}", a.handleTargetsPost).Methods(http.MethodPost) r.HandleFunc("/targets/{id}", a.handleTargetsDelete).Methods(http.MethodDelete) } func (a *App) healthRoutes(r *mux.Router) { r.HandleFunc("/healthz", a.handleHealthzGet).Methods(http.MethodGet) } func (a *App) adminRoutes(r *mux.Router) { r.HandleFunc("/admin/shutdown", a.handleAdminShutdown).Methods(http.MethodPost) } ================================================ FILE: pkg/app/set-to-notifs.go ================================================ // Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package app import ( "fmt" "os" "github.com/openconfig/ygot/gnmidiff" "github.com/openconfig/ygot/gnmidiff/gnmiparse" "github.com/spf13/cobra" "github.com/spf13/pflag" ) // InitDiffSetToNotifsFlags used to init or reset newDiffSetRequestCmd // flags for gnmic-prompt mode func (a *App) InitDiffSetToNotifsFlags(cmd *cobra.Command) { cmd.ResetFlags() cmd.Flags().StringVarP(&a.Config.LocalFlags.DiffSetToNotifsSet, "setrequest", "", "", "reference gNMI SetRequest textproto file for comparing against stored notifications from a device") cmd.Flags().StringVarP(&a.Config.LocalFlags.DiffSetToNotifsResponse, "response", "", "", "gNMI Notifications textproto file (can be GetResponse or SubscribeResponse stream) for comparing against the reference SetRequest") cmd.MarkFlagRequired("setrequest") cmd.MarkFlagRequired("response") cmd.Flags().BoolVarP(&a.Config.LocalFlags.DiffSetToNotifsFull, "full", "f", false, "show common values") cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", "diff-set-to-notifs", flag.Name), flag) }) } func (a *App) DiffSetToNotifsRunE(cmd *cobra.Command, args []string) error { defer a.InitDiffSetRequestFlags(cmd) format := gnmidiff.Format{ Full: a.Config.LocalFlags.DiffSetToNotifsFull, } setreq, err := gnmiparse.SetRequestFromFile(a.Config.LocalFlags.DiffSetToNotifsSet) if err != nil { return err } notifs, err := gnmiparse.NotifsFromFile(a.Config.LocalFlags.DiffSetToNotifsResponse) if err != nil { return err } diff, err := gnmidiff.DiffSetRequestToNotifications(setreq, notifs, nil) if err != nil { return err } fmt.Fprint(os.Stderr, diff.Format(format)) return nil } ================================================ FILE: pkg/app/set.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "fmt" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/grpctunnel/tunnel" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/config" ) func (a *App) SetPreRunE(cmd *cobra.Command, args []string) error { a.Config.SetLocalFlagsFromFile(cmd) err := a.Config.ValidateSetInput() if err != nil { return err } a.createCollectorDialOpts() return a.initTunnelServer(tunnel.ServerConfig{ AddTargetHandler: a.tunServerAddTargetHandler, DeleteTargetHandler: a.tunServerDeleteTargetHandler, RegisterHandler: a.tunServerRegisterHandler, Handler: a.tunServerHandler, }) } func (a *App) SetRunE(cmd *cobra.Command, args []string) error { defer a.InitSetFlags(cmd) if a.Config.Format == formatEvent { return fmt.Errorf("format event not supported for Set RPC") } ctx, cancel := context.WithCancel(context.Background()) defer cancel() // setupCloseHandler(cancel) targetsConfig, err := a.GetTargets() if err != nil { return fmt.Errorf("failed getting targets config: %v", err) } if !a.PromptMode { for _, tc := range targetsConfig { a.AddTargetConfig(tc) } } err = a.Config.ReadSetRequestTemplate() if err != nil { return fmt.Errorf("failed reading set request files: %v", err) } numTargets := len(a.Config.Targets) a.errCh = make(chan error, numTargets*2) a.wg.Add(numTargets) for _, tc := range a.Config.Targets { go a.SetRequest(ctx, tc) } a.wg.Wait() return a.checkErrors() } func (a *App) SetRequest(ctx context.Context, tc *types.TargetConfig) { defer a.wg.Done() reqs, err := a.Config.CreateSetRequest(tc.Name) if err != nil { a.logError(fmt.Errorf("target %q: failed to create set request: %v", tc.Name, err)) return } for _, req := range reqs { a.setRequest(ctx, tc, req) } } func (a *App) setRequest(ctx context.Context, tc *types.TargetConfig, req *gnmi.SetRequest) { a.Logger.Printf("sending gNMI SetRequest: prefix='%v', delete='%v', replace='%v', update='%v', extension='%v' to %s", req.Prefix, req.Delete, req.Replace, req.Update, req.Extension, tc.Name) if a.Config.PrintRequest || a.Config.SetDryRun { err := a.PrintMsg(tc.Name, "Set Request:", req) if err != nil { a.logError(fmt.Errorf("target %q: %v", tc.Name, err)) } } if a.Config.SetDryRun { return } response, err := a.ClientSet(ctx, tc, req) if err != nil { a.logError(fmt.Errorf("target %q set request failed: %v", tc.Name, err)) return } err = a.PrintMsg(tc.Name, "Set Response:", response) if err != nil { a.logError(fmt.Errorf("target %q: %v", tc.Name, err)) } } // InitSetFlags used to init or reset setCmd flags for gnmic-prompt mode func (a *App) InitSetFlags(cmd *cobra.Command) { cmd.ResetFlags() cmd.Flags().StringVarP(&a.Config.LocalFlags.SetPrefix, "prefix", "", "", "set request prefix") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetDelete, "delete", "", []string{}, "set request path to be deleted") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetReplace, "replace", "", []string{}, fmt.Sprintf("set request path:::type:::value to be replaced, type must be one of %v", config.ValueTypes)) cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUpdate, "update", "", []string{}, fmt.Sprintf("set request path:::type:::value to be updated, type must be one of %v", config.ValueTypes)) cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUnionReplace, "union-replace", "", []string{}, fmt.Sprintf("set request path:::type:::value to be union-replaced, type must be one of %v", config.ValueTypes)) cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetReplacePath, "replace-path", "", []string{}, "set request path to be replaced") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetReplaceValue, "replace-value", "", []string{}, "set replace request value") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetReplaceFile, "replace-file", "", []string{}, "set replace request value in a json/yaml file") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUpdatePath, "update-path", "", []string{}, "set request path to be updated") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUpdateFile, "update-file", "", []string{}, "set update request value in a json/yaml file") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUpdateValue, "update-value", "", []string{}, "set update request value") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUnionReplacePath, "union-replace-path", "", []string{}, "set request path for a union_replace") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUnionReplaceValue, "union-replace-value", "", []string{}, "set request union_replace value") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUnionReplaceFile, "union-replace-file", "", []string{}, "set request union_replace value in a json/yaml file") cmd.Flags().StringVarP(&a.Config.LocalFlags.SetDelimiter, "delimiter", "", ":::", "set update/replace delimiter between path, type, value") cmd.Flags().StringVarP(&a.Config.LocalFlags.SetTarget, "target", "", "", "set request target") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetRequestFile, "request-file", "", []string{}, "set request template file(s)") cmd.Flags().StringVarP(&a.Config.LocalFlags.SetRequestVars, "request-vars", "", "", "set request variables file") cmd.Flags().BoolVarP(&a.Config.LocalFlags.SetDryRun, "dry-run", "", false, "prints the set request without initiating a gRPC connection") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetRequestProtoFile, "set-proto-request-file", "", []string{}, "set request from prototext file") cmd.Flags().BoolVarP(&a.Config.LocalFlags.SetNoTrim, "no-trim", "", false, "won't trim the input files") // cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetReplaceCli, "replace-cli", "", []string{}, "a cli command to be sent as a set replace request") cmd.Flags().StringVarP(&a.Config.LocalFlags.SetReplaceCliFile, "replace-cli-file", "", "", "path to a file containing a list of commands that will be sent as a set replace request") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SetUpdateCli, "update-cli", "", []string{}, "a cli command to be sent as a set update request") cmd.Flags().StringVarP(&a.Config.LocalFlags.SetUpdateCliFile, "update-cli-file", "", "", "path to a file containing a list of commands that will be sent as a set update request") cmd.Flags().StringVarP(&a.Config.LocalFlags.SetCommitId, "commit-id", "", "", "commit ID value") cmd.Flags().BoolVarP(&a.Config.LocalFlags.SetCommitRequest, "commit-request", "", false, "start a commit confirmed transaction") cmd.Flags().BoolVarP(&a.Config.LocalFlags.SetCommitConfirm, "commit-confirm", "", false, "confirm the commit ID") cmd.Flags().BoolVarP(&a.Config.LocalFlags.SetCommitCancel, "commit-cancel", "", false, "cancel the commit") cmd.Flags().DurationVarP(&a.Config.LocalFlags.SetCommitRollbackDuration, "rollback-duration", "", 0, "set the commit rollback duration") cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } ================================================ FILE: pkg/app/setrequest.go ================================================ // Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package app import ( "fmt" "os" "github.com/openconfig/ygot/gnmidiff" "github.com/openconfig/ygot/gnmidiff/gnmiparse" "github.com/spf13/cobra" "github.com/spf13/pflag" ) // InitDiffSetRequestFlags used to init or reset diffSetRequestCmd // flags for gnmic-prompt mode func (a *App) InitDiffSetRequestFlags(cmd *cobra.Command) { cmd.ResetFlags() cmd.Flags().StringVarP(&a.Config.LocalFlags.DiffSetRequestRef, "ref", "", "", "reference gNMI SetRequest textproto file for comparing against the new SetRequest") cmd.Flags().StringVarP(&a.Config.LocalFlags.DiffSetRequestNew, "new", "", "", "new gNMI SetRequest textproto file for comparing against the reference SetRequest") cmd.MarkFlagRequired("ref") cmd.MarkFlagRequired("new") cmd.Flags().BoolVarP(&a.Config.LocalFlags.DiffSetRequestFull, "full", "f", false, "show common values between the two SetRequests") cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", "diff-setrequest", flag.Name), flag) }) } func (a *App) DiffSetRequestRunE(cmd *cobra.Command, args []string) error { defer a.InitDiffSetRequestFlags(cmd) format := gnmidiff.Format{ Full: a.Config.LocalFlags.DiffSetRequestFull, } srA, err := gnmiparse.SetRequestFromFile(a.Config.LocalFlags.DiffSetRequestRef) if err != nil { return err } srB, err := gnmiparse.SetRequestFromFile(a.Config.LocalFlags.DiffSetRequestNew) if err != nil { return err } diff, err := gnmidiff.DiffSetRequest(srA, srB, nil) if err != nil { return err } fmt.Fprint(os.Stdout, diff.Format(format)) return nil } ================================================ FILE: pkg/app/subscribe.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "errors" "fmt" "io" "sort" "strings" "sync" "time" "github.com/manifoldco/promptui" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/grpctunnel/tunnel" "github.com/spf13/cobra" "github.com/spf13/pflag" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/target" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/outputs" ) const ( initLockerRetryTimer = 1 * time.Second ) func (a *App) SubscribePreRunE(cmd *cobra.Command, args []string) error { a.Config.SetLocalFlagsFromFile(cmd) err := a.initPluginManager() if err != nil { return err } a.createCollectorDialOpts() return nil } func (a *App) SubscribeRunE(cmd *cobra.Command, args []string) error { defer a.InitSubscribeFlags(cmd) // prompt mode if a.PromptMode { return a.SubscribeRunPrompt(cmd, args) } // subCfg, err := a.Config.GetSubscriptions(cmd) if err != nil { return fmt.Errorf("failed reading subscriptions config: %v", err) } err = a.readConfigs() if err != nil { return err } err = a.Config.GetClustering() if err != nil { return err } err = a.Config.GetGNMIServer() if err != nil { return err } err = a.Config.GetAPIServer() if err != nil { return err } err = a.Config.GetLoader() if err != nil { return err } numInputs := len(a.Config.Inputs) if len(subCfg) == 0 && numInputs == 0 { return errors.New("no subscriptions or inputs configuration found") } // only once mode subscriptions requested if allSubscriptionsModeOnce(subCfg) { return a.SubscribeRunONCE(cmd, args) } // only poll mode subscriptions requested if allSubscriptionsModePoll(subCfg) { return a.SubscribeRunPoll(cmd, args) } // stream subscriptions err = a.initTunnelServer(tunnel.ServerConfig{ AddTargetHandler: a.tunServerAddTargetSubscribeHandler, DeleteTargetHandler: a.tunServerDeleteTargetHandler, RegisterHandler: a.tunServerRegisterHandler, Handler: a.tunServerHandler, }) if err != nil { return err } _, err = a.Config.GetTargets() if errors.Is(err, config.ErrNoTargetsFound) { if !a.Config.LocalFlags.SubscribeWatchConfig && len(a.Config.FileConfig.GetStringMap("loader")) == 0 && !a.Config.UseTunnelServer && numInputs == 0 { return fmt.Errorf("failed reading targets config: %v", err) } } else if err != nil { return fmt.Errorf("failed reading targets config: %v", err) } // for { err := a.InitLocker() if err != nil { a.Logger.Printf("failed to init locker: %v", err) time.Sleep(initLockerRetryTimer) continue } break } a.startAPIServer() a.startGnmiServer() go a.startCluster() a.startIO() if a.Config.LocalFlags.SubscribeWatchConfig { go a.watchConfig() } for range a.ctx.Done() { return a.ctx.Err() } return nil } func (a *App) subscribeStream(ctx context.Context, tc *types.TargetConfig) { defer a.wg.Done() a.TargetSubscribeStream(ctx, tc) } func (a *App) subscribeOnce(ctx context.Context, tc *types.TargetConfig) { defer a.wg.Done() err := a.TargetSubscribeOnce(ctx, tc) if err != nil { a.logError(err) } } func (a *App) subscribePoll(ctx context.Context, tc *types.TargetConfig) { defer a.wg.Done() a.TargetSubscribePoll(ctx, tc) } // InitSubscribeFlags used to init or reset subscribeCmd flags for gnmic-prompt mode func (a *App) InitSubscribeFlags(cmd *cobra.Command) { cmd.ResetFlags() cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribePrefix, "prefix", "", "", "subscribe request prefix") cmd.Flags().StringArrayVarP(&a.Config.LocalFlags.SubscribePath, "path", "", []string{}, "subscribe request paths") //cmd.MarkFlagRequired("path") cmd.Flags().Uint32VarP(&a.Config.LocalFlags.SubscribeQos, "qos", "q", 0, "qos marking") cmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeUpdatesOnly, "updates-only", "", false, "only updates to current state should be sent") cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeMode, "mode", "", "stream", "one of: once, stream, poll") cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeStreamMode, "stream-mode", "", "target-defined", "one of: on-change, sample, target-defined") cmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeSampleInterval, "sample-interval", "i", 0, "sample interval as a decimal number and a suffix unit, such as \"10s\" or \"1m30s\"") cmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeSuppressRedundant, "suppress-redundant", "", false, "suppress redundant update if the subscribed value didn't not change") cmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeHeartbeatInterval, "heartbeat-interval", "", 0, "heartbeat interval in case suppress-redundant is enabled") cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.SubscribeModel, "model", "", []string{}, "subscribe request used model(s)") cmd.Flags().BoolVar(&a.Config.LocalFlags.SubscribeQuiet, "quiet", false, "suppress stdout printing") cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeTarget, "target", "", "", "subscribe request target") cmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeSetTarget, "set-target", "", false, "set target name in gNMI Path prefix") cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.SubscribeName, "name", "n", []string{}, "reference subscriptions by name, must be defined in gnmic config file") cmd.Flags().StringSliceVarP(&a.Config.LocalFlags.SubscribeOutput, "output", "", []string{}, "reference to output groups by name, must be defined in gnmic config file") cmd.Flags().BoolVarP(&a.Config.LocalFlags.SubscribeWatchConfig, "watch-config", "", false, "watch configuration changes, add or delete subscribe targets accordingly") cmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeBackoff, "backoff", "", 0, "backoff time between subscribe requests") cmd.Flags().DurationVarP(&a.Config.LocalFlags.SubscribeLockRetry, "lock-retry", "", 5*time.Second, "time to wait between target lock attempts") cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeHistorySnapshot, "history-snapshot", "", "", "sets the snapshot time in a historical subscription, nanoseconds since Unix epoch or RFC3339 format") cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeHistoryStart, "history-start", "", "", "sets the start time in a historical range subscription, nanoseconds since Unix epoch or RFC3339 format") cmd.Flags().StringVarP(&a.Config.LocalFlags.SubscribeHistoryEnd, "history-end", "", "", "sets the end time in a historical range subscription, nanoseconds since Unix epoch or RFC3339 format") cmd.Flags().Uint32VarP(&a.Config.LocalFlags.SubscribeDepth, "depth", "", 0, "depth extension value") // cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } func (a *App) readConfigs() error { var err error _, err = a.Config.GetOutputs() if err != nil { return fmt.Errorf("failed reading outputs config: %v", err) } _, err = a.Config.GetInputs() if err != nil { return fmt.Errorf("failed reading inputs config: %v", err) } _, err = a.Config.GetActions() if err != nil { return fmt.Errorf("failed reading actions config: %v", err) } _, err = a.Config.GetEventProcessors() if err != nil { return fmt.Errorf("failed reading event processors config: %v", err) } _, err = a.LoadProtoFiles() if err != nil { return fmt.Errorf("failed loading proto files: %v", err) } return nil } const ( subscriptionModeONCE = "ONCE" subscriptionModePOLL = "POLL" ) func (a *App) StartTargetsManager(ctx context.Context) { defer func() { for _, o := range a.Outputs { o.Close() } }() for t := range a.targetsChan { if a.Config.Debug { a.Logger.Printf("starting target %+v", t) } if t == nil { continue } a.operLock.RLock() _, ok := a.activeTargets[t.Config.Name] a.operLock.RUnlock() if ok { if a.Config.Debug { a.Logger.Printf("target %q listener already active", t.Config.Name) } continue } a.operLock.Lock() a.activeTargets[t.Config.Name] = struct{}{} a.operLock.Unlock() a.Logger.Printf("starting target %q listener", t.Config.Name) go func(t *target.Target) { numOnceSubscriptions := t.NumberOfOnceSubscriptions() remainingOnceSubscriptions := numOnceSubscriptions numSubscriptions := len(t.Subscriptions) rspChan, errChan := t.ReadSubscriptions() for { select { case rsp := <-rspChan: subscribeResponseReceivedCounter.WithLabelValues(t.Config.Name, rsp.SubscriptionConfig.Name).Add(1) if a.Config.Debug { a.Logger.Printf("target %q: gNMI Subscribe Response: %+v", t.Config.Name, rsp) } err := t.DecodeProtoBytes(rsp.Response) if err != nil { a.Logger.Printf("target %q: failed to decode proto bytes: %v", t.Config.Name, err) continue } m := outputs.Meta{ "source": t.Config.Name, "format": a.Config.Format, "subscription-name": rsp.SubscriptionName, } if rsp.SubscriptionConfig.Target != "" { m["subscription-target"] = rsp.SubscriptionConfig.Target } for k, v := range t.Config.EventTags { m[k] = v } // Allow overridden outputs per subscription // If both target and subscription have a specified Output, the subscription's Output will be used var outs []string if len(rsp.SubscriptionConfig.Outputs) > 0 { outs = rsp.SubscriptionConfig.Outputs } else { outs = t.Config.Outputs } a.export(ctx, rsp.Response, m, outs...) if remainingOnceSubscriptions > 0 { if a.subscriptionMode(rsp.SubscriptionName) == subscriptionModeONCE { switch rsp.Response.Response.(type) { case *gnmi.SubscribeResponse_SyncResponse: remainingOnceSubscriptions-- } } } if remainingOnceSubscriptions == 0 && numSubscriptions == numOnceSubscriptions { a.operLock.Lock() delete(a.activeTargets, t.Config.Name) a.operLock.Unlock() return } case tErr := <-errChan: if errors.Is(tErr.Err, io.EOF) { a.Logger.Printf("target %q: subscription %s closed stream(EOF)", t.Config.Name, tErr.SubscriptionName) } else { subscribeResponseFailedCounter.WithLabelValues(t.Config.Name, tErr.SubscriptionName).Inc() a.Logger.Printf("target %q: subscription %s rcv error: %v", t.Config.Name, tErr.SubscriptionName, tErr.Err) } if remainingOnceSubscriptions > 0 { if a.subscriptionMode(tErr.SubscriptionName) == subscriptionModeONCE { remainingOnceSubscriptions-- } } if remainingOnceSubscriptions == 0 && numSubscriptions == numOnceSubscriptions { a.operLock.Lock() delete(a.activeTargets, t.Config.Name) a.operLock.Unlock() return } case <-t.StopChan: a.operLock.Lock() delete(a.activeTargets, t.Config.Name) a.operLock.Unlock() a.Logger.Printf("target %q: listener stopped", t.Config.Name) return case <-ctx.Done(): a.operLock.Lock() delete(a.activeTargets, t.Config.Name) a.operLock.Unlock() return } } }(t) } for range ctx.Done() { return } } func (a *App) export(ctx context.Context, rsp *gnmi.SubscribeResponse, m outputs.Meta, outs ...string) { if rsp == nil { return } go a.updateCache(ctx, rsp, m) wg := new(sync.WaitGroup) // target has no explicitly defined outputs if len(outs) == 0 { wg.Add(len(a.Outputs)) for _, o := range a.Outputs { go func(o outputs.Output) { defer wg.Done() defer a.operLock.RUnlock() a.operLock.RLock() o.Write(ctx, rsp, m) }(o) } wg.Wait() return } // write to the outputs defined under the target for _, name := range outs { a.operLock.RLock() if o, ok := a.Outputs[name]; ok { wg.Add(1) go func(o outputs.Output) { defer wg.Done() o.Write(ctx, rsp, m) }(o) } a.operLock.RUnlock() } wg.Wait() } func (a *App) updateCache(ctx context.Context, rsp *gnmi.SubscribeResponse, m outputs.Meta) { if a.c == nil { return } r := proto.Clone(rsp).(*gnmi.SubscribeResponse) switch r := r.Response.(type) { case *gnmi.SubscribeResponse_Update: if r.Update.GetPrefix() == nil { r.Update.Prefix = new(gnmi.Path) } if r.Update.GetPrefix().GetTarget() == "" { r.Update.Prefix.Target = utils.GetHost(m["source"]) } target := r.Update.GetPrefix().GetTarget() if target == "" { a.Logger.Printf("response missing target") return } if a.Config.Debug { a.Logger.Printf("updating target %q cache", target) } sub := m["subscription-name"] a.c.Write(ctx, sub, &gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: r.Update}}) } } func (a *App) subscriptionMode(name string) string { if sub, ok := a.Config.Subscriptions[name]; ok { return strings.ToUpper(sub.Mode) } return "" } // polledSubscriptionsTargets returns a map of target name to a list of subscription names that have Mode == POLL func (a *App) polledSubscriptionsTargets() map[string][]string { result := make(map[string][]string) for tn, target := range a.Targets { for _, sub := range target.Subscriptions { if strings.ToUpper(sub.Mode) == subscriptionModePOLL { if result[tn] == nil { result[tn] = make([]string, 0) } result[tn] = append(result[tn], sub.Name) } } } return result } func (a *App) handlePolledSubscriptions() error { polledTargetsSubscriptions := a.polledSubscriptionsTargets() if len(polledTargetsSubscriptions) == 0 { return nil } mo := &formatters.MarshalOptions{ Multiline: true, Indent: " ", Format: a.Config.Format, CalculateLatency: a.Config.GlobalFlags.CalculateLatency, } // handle initial responses if updates-only is not set if !a.Config.SubscribeUpdatesOnly { for targetName := range polledTargetsSubscriptions { a.operLock.RLock() t, ok := a.Targets[targetName] a.operLock.RUnlock() if !ok { return fmt.Errorf("unknown target name %q", targetName) } rspCh, errCh := t.ReadSubscriptions() SUBS: for { select { case rsp := <-rspCh: b, err := mo.Marshal(rsp.Response, nil) if err != nil { return fmt.Errorf("target '%s', subscription '%s': poll response formatting error: %v", targetName, rsp.SubscriptionName, err) } fmt.Println(string(b)) switch rsp := rsp.Response.Response.(type) { case *gnmi.SubscribeResponse_SyncResponse: fmt.Printf("received sync response '%t' from '%s'\n", rsp.SyncResponse, targetName) break SUBS // current target done sending initial updates } case tErr := <-errCh: if tErr.Err != nil { return fmt.Errorf("target '%s', subscription '%s': poll response error: %v", targetName, tErr.SubscriptionName, tErr.Err) } case <-a.ctx.Done(): return a.ctx.Err() } } } } pollTargets := make([]string, 0, len(polledTargetsSubscriptions)) for t := range polledTargetsSubscriptions { pollTargets = append(pollTargets, t) } sort.Slice(pollTargets, func(i, j int) bool { return pollTargets[i] < pollTargets[j] }) s := promptui.Select{ Label: "select target to poll", Items: pollTargets, HideSelected: true, } waitChan := make(chan struct{}, 1) waitChan <- struct{}{} OUTER: for { select { case <-waitChan: _, name, err := s.Run() if err != nil { fmt.Printf("failed selecting target to poll: %v\n", err) continue } ss := promptui.Select{ Label: "select subscription to poll", Items: polledTargetsSubscriptions[name], HideSelected: true, } _, subName, err := ss.Run() if err != nil { fmt.Printf("failed selecting subscription to poll: %v\n", err) continue } err = a.clientSubscribePoll(a.Context(), name, subName) if err != nil && err != io.EOF { fmt.Printf("target '%s', subscription '%s': poll response error:%v\n", name, subName, err) continue } a.operLock.RLock() t, ok := a.Targets[name] a.operLock.RUnlock() if !ok { return fmt.Errorf("unknown target name %q", name) } rspCh, errCh := t.ReadSubscriptions() for { select { case <-a.Context().Done(): return a.Context().Err() case tErr := <-errCh: if tErr.Err != nil { fmt.Printf("received error from target '%s': %v\n", name, err) waitChan <- struct{}{} continue OUTER } case rsp, ok := <-rspCh: if !ok { waitChan <- struct{}{} continue OUTER } if rsp == nil { fmt.Printf("received empty response from target '%s'\n", name) continue } switch rsp := rsp.Response.Response.(type) { case *gnmi.SubscribeResponse_SyncResponse: fmt.Printf("received sync response '%t' from '%s'\n", rsp.SyncResponse, name) waitChan <- struct{}{} continue OUTER } b, err := mo.Marshal(rsp.Response, nil) if err != nil { fmt.Printf("target '%s', subscription '%s': poll response formatting error:%v\n", name, subName, err) fmt.Println(rsp.Response) continue } fmt.Println(string(b)) } } case <-a.ctx.Done(): return a.Context().Err() } } } func (a *App) startIO() { go a.StartTargetsManager(a.ctx) a.InitOutputs(a.ctx) a.InitInputs(a.ctx) if !a.inCluster() { go a.startLoader(a.ctx) var limiter *time.Ticker if a.Config.LocalFlags.SubscribeBackoff > 0 { limiter = time.NewTicker(a.Config.LocalFlags.SubscribeBackoff) } if !a.Config.UseTunnelServer { for _, tc := range a.Config.Targets { a.wg.Add(1) go a.subscribeStream(a.ctx, tc) if limiter != nil { <-limiter.C } } } if limiter != nil { limiter.Stop() } a.wg.Wait() } } func allSubscriptionsModeOnce(subs map[string]*types.SubscriptionConfig) bool { if len(subs) == 0 { return false } for _, sub := range subs { if strings.ToUpper(sub.Mode) != "ONCE" { return false } } return true } func allSubscriptionsModePoll(subs map[string]*types.SubscriptionConfig) bool { if len(subs) == 0 { return false } for _, sub := range subs { if strings.ToUpper(sub.Mode) != "POLL" { return false } } return true } ================================================ FILE: pkg/app/subscribe_once.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "fmt" "time" "github.com/openconfig/grpctunnel/tunnel" "github.com/spf13/cobra" ) func (a *App) SubscribeRunONCE(_ *cobra.Command, _ []string) error { a.c = nil // todo: err := a.initTunnelServer(tunnel.ServerConfig{ AddTargetHandler: a.tunServerAddTargetHandler, DeleteTargetHandler: a.tunServerDeleteTargetHandler, RegisterHandler: a.tunServerRegisterHandler, Handler: a.tunServerHandler, }) if err != nil { return fmt.Errorf("failed to init tunnel server: %v", err) } _, err = a.GetTargets() if err != nil { return fmt.Errorf("failed reading targets config: %v", err) } err = a.readConfigs() if err != nil { return err } // a.InitOutputs(a.ctx) var limiter *time.Ticker if a.Config.LocalFlags.SubscribeBackoff > 0 { limiter = time.NewTicker(a.Config.LocalFlags.SubscribeBackoff) } numTargets := len(a.Config.Targets) a.errCh = make(chan error, numTargets) a.wg.Add(numTargets) for _, tc := range a.Config.Targets { go a.subscribeOnce(a.ctx, tc) if limiter != nil { <-limiter.C } } if limiter != nil { limiter.Stop() } a.wg.Wait() return a.checkErrors() } ================================================ FILE: pkg/app/subscribe_poll.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "fmt" "github.com/openconfig/grpctunnel/tunnel" "github.com/spf13/cobra" ) func (a *App) SubscribeRunPoll(cmd *cobra.Command, args []string) error { err := a.initTunnelServer(tunnel.ServerConfig{ AddTargetHandler: a.tunServerAddTargetHandler, DeleteTargetHandler: a.tunServerDeleteTargetHandler, RegisterHandler: a.tunServerRegisterHandler, Handler: a.tunServerHandler, }) if err != nil { return fmt.Errorf("failed to init tunnel server: %v", err) } _, err = a.GetTargets() if err != nil { return fmt.Errorf("failed reading targets config: %v", err) } err = a.readConfigs() if err != nil { return err } go a.StartTargetsManager(a.ctx) a.wg.Add(len(a.Config.Targets)) for _, tc := range a.Config.Targets { go a.subscribePoll(a.ctx, tc) } a.wg.Wait() return a.handlePolledSubscriptions() } ================================================ FILE: pkg/app/subscribe_prompt.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "fmt" "time" "github.com/openconfig/gnmic/pkg/api/types" "github.com/spf13/cobra" ) func (a *App) SubscribeRunPrompt(cmd *cobra.Command, args []string) error { // stop running subscriptions for _, t := range a.Targets { t.StopSubscriptions() } // reset subscriptions config map a.Config.Subscriptions = make(map[string]*types.SubscriptionConfig) // read targets _, err := a.Config.GetTargets() if err != nil { return fmt.Errorf("failed reading targets config: %v", err) } subCfg, err := a.Config.GetSubscriptions(cmd) if err != nil { return fmt.Errorf("failed reading subscriptions config: %v", err) } // only once mode subscriptions requested if allSubscriptionsModeOnce(subCfg) { return a.SubscribeRunONCE(cmd, args) } // only poll mode subscriptions requested if allSubscriptionsModePoll(subCfg) { return a.SubscribeRunPoll(cmd, args) } // stream+once mode subscriptions err = a.readConfigs() if err != nil { return err } go a.StartTargetsManager(a.ctx) a.InitOutputs(a.ctx) var limiter *time.Ticker if a.Config.LocalFlags.SubscribeBackoff > 0 { limiter = time.NewTicker(a.Config.LocalFlags.SubscribeBackoff) } a.wg.Add(len(a.Config.Targets)) for _, tc := range a.Config.Targets { go a.subscribeStream(a.ctx, tc) if limiter != nil { <-limiter.C } } if limiter != nil { limiter.Stop() } a.wg.Wait() return nil } ================================================ FILE: pkg/app/target.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "fmt" "github.com/fullstorydev/grpcurl" "github.com/openconfig/gnmic/pkg/api/target" "github.com/openconfig/gnmic/pkg/api/types" ) // initTarget initializes a new target given its name. // it assumes that the configLock as well as the operLock // are acquired. func (a *App) initTarget(tc *types.TargetConfig) (*target.Target, error) { t, ok := a.Targets[tc.Name] if !ok { t := target.NewTarget(tc) for _, subName := range tc.Subscriptions { if sub, ok := a.Config.Subscriptions[subName]; ok { t.Subscriptions[subName] = sub } } if len(t.Subscriptions) == 0 { for n, sub := range a.Config.Subscriptions { t.Subscriptions[n] = sub } } err := a.parseProtoFiles(t) if err != nil { return nil, err } a.Targets[t.Config.Name] = t return t, nil } return t, nil } func (a *App) stopTarget(ctx context.Context, name string) error { if a.Targets == nil { return nil } a.operLock.Lock() defer a.operLock.Unlock() if _, ok := a.Targets[name]; !ok { return fmt.Errorf("target %q does not exist", name) } a.Logger.Printf("stopping target %q", name) t := a.Targets[name] t.StopSubscriptions() delete(a.Targets, name) if a.locker == nil { return nil } return a.locker.Unlock(ctx, a.targetLockKey(name)) } func (a *App) DeleteTarget(ctx context.Context, name string) error { if a.Targets == nil { return nil } if !a.targetConfigExists(name) { return fmt.Errorf("target %q does not exist", name) } a.configLock.Lock() delete(a.Config.Targets, name) a.configLock.Unlock() a.Logger.Printf("target %q deleted from config", name) // delete from oper map a.operLock.Lock() defer a.operLock.Unlock() if cfn, ok := a.targetsLockFn[name]; ok { cfn() } if a.c != nil { a.c.DeleteTarget(name) } if t, ok := a.Targets[name]; ok { delete(a.Targets, name) t.Close() if a.locker != nil { return a.locker.Unlock(ctx, a.targetLockKey(name)) } } return nil } // UpdateTargetConfig updates the subscriptions for an existing target func (a *App) UpdateTargetSubscription(ctx context.Context, name string, subs []string) error { a.configLock.Lock() for _, subName := range subs { if _, ok := a.Config.Subscriptions[subName]; !ok { a.configLock.Unlock() return fmt.Errorf("subscription %q does not exist", subName) } } targetConfig := a.Config.Targets[name] targetConfig.Subscriptions = subs a.configLock.Unlock() if err := a.stopTarget(ctx, name); err != nil { return err } go a.TargetSubscribeStream(ctx, targetConfig) return nil } // AddTargetConfig adds a *TargetConfig to the configuration map func (a *App) AddTargetConfig(tc *types.TargetConfig) { a.Logger.Printf("adding target %s", tc) _, ok := a.Config.Targets[tc.Name] if ok { return } if tc.BufferSize <= 0 { tc.BufferSize = a.Config.TargetBufferSize } if tc.RetryTimer <= 0 { tc.RetryTimer = a.Config.Retry } a.configLock.Lock() defer a.configLock.Unlock() a.Config.Targets[tc.Name] = tc } func (a *App) parseProtoFiles(t *target.Target) error { if len(t.Config.ProtoFiles) == 0 { t.RootDesc = a.rootDesc return nil } a.Logger.Printf("target %q loading proto files...", t.Config.Name) descSource, err := grpcurl.DescriptorSourceFromProtoFiles(t.Config.ProtoDirs, t.Config.ProtoFiles...) if err != nil { a.Logger.Printf("failed to load proto files: %v", err) return err } t.RootDesc, err = descSource.FindSymbol("Nokia.SROS.root") if err != nil { a.Logger.Printf("target %q could not get symbol 'Nokia.SROS.root': %v", t.Config.Name, err) return err } a.Logger.Printf("target %q loaded proto files", t.Config.Name) return nil } func (a *App) targetConfigExists(name string) bool { a.configLock.RLock() _, ok := a.Config.Targets[name] a.configLock.RUnlock() return ok } ================================================ FILE: pkg/app/tree.go ================================================ package app import ( "fmt" "strings" "github.com/spf13/cobra" "github.com/spf13/pflag" ) func (a *App) InitTreeFlags(cmd *cobra.Command) { cmd.ResetFlags() // cmd.Flags().BoolVar(&a.Config.TreeFlat, "flat", false, "print flat commands tree") cmd.Flags().BoolVar(&a.Config.TreeDetails, "details", false, "print commands flags") // cmd.Flags().VisitAll(func(flag *pflag.Flag) { a.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } func (a *App) RunETree(cmd *cobra.Command, args []string) error { if a.Config.TreeFlat { treeFlat(a.RootCmd, "") return nil } a.tree(a.RootCmd, "") return nil } func (a *App) tree(c *cobra.Command, indent string) error { fmt.Printf("%s", c.Use) if !c.HasSubCommands() { if c.HasLocalFlags() && a.Config.TreeDetails { sections := make([]string, 0) c.LocalFlags().VisitAll(func(flag *pflag.Flag) { flagSection := "" if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { flagSection = fmt.Sprintf("[-%s | --%s]", flag.Shorthand, flag.Name) } else { flagSection = fmt.Sprintf("[--%s]", flag.Name) } sections = append(sections, flagSection) }) fmt.Printf(" %s", strings.Join(sections, " ")) } } fmt.Printf("\n") subCmds := c.Commands() numSubCommands := len(subCmds) for i, subC := range subCmds { add := " │ " if i == numSubCommands-1 { fmt.Print(indent + " └─── ") add = " " } else { fmt.Print(indent + " ├─── ") } err := a.tree(subC, indent+add) if err != nil { return err } } return nil } func treeFlat(c *cobra.Command, prefix string) { prefix += " " + c.Use fmt.Println(prefix) for _, subC := range c.Commands() { treeFlat(subC, prefix) } } ================================================ FILE: pkg/app/tunnel.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "fmt" "io" "net" "regexp" "strings" "time" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" tpb "github.com/openconfig/grpctunnel/proto/tunnel" "github.com/openconfig/grpctunnel/tunnel" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" ) func (a *App) initTunnelServer(tsc tunnel.ServerConfig) error { if !a.Config.UseTunnelServer { return nil } err := a.Config.GetTunnelServer() if err != nil { return err } go func() { err = a.startTunnelServer(tsc) if err != nil { a.Logger.Printf("failed to start tunnel server: %v", err) } }() return nil } func (a *App) startTunnelServer(tsc tunnel.ServerConfig) error { if a.Config.TunnelServer == nil { return nil } var err error a.tunServer, err = tunnel.NewServer(tsc) if err != nil { a.Logger.Printf("failed to create a tunnel server: %v", err) return err } // create tunnel server options opts, err := a.gRPCTunnelServerOpts() if err != nil { a.Logger.Printf("failed to build gRPC tunnel server options: %v", err) return err } a.grpcTunnelSrv = grpc.NewServer(opts...) // register the tunnel service with the grpc server tpb.RegisterTunnelServer(a.grpcTunnelSrv, a.tunServer) // var l net.Listener network := "tcp" addr := a.Config.TunnelServer.Address if strings.HasPrefix(a.Config.TunnelServer.Address, "unix://") { network = "unix" addr = strings.TrimPrefix(addr, "unix://") } ctx, cancel := context.WithCancel(a.ctx) for { l, err = net.Listen(network, addr) if err != nil { a.Logger.Printf("failed to start gRPC tunnel server listener: %v", err) time.Sleep(time.Second) continue } break } go func() { err = a.grpcTunnelSrv.Serve(l) if err != nil { a.Logger.Printf("gRPC tunnel server shutdown: %v", err) } cancel() }() defer a.grpcTunnelSrv.Stop() for range ctx.Done() { } return ctx.Err() } func (a *App) gRPCTunnelServerOpts() ([]grpc.ServerOption, error) { opts := make([]grpc.ServerOption, 0) if a.Config.TunnelServer.EnableMetrics && a.reg != nil { grpcMetrics := grpc_prometheus.NewServerMetrics() opts = append(opts, grpc.StreamInterceptor(grpcMetrics.StreamServerInterceptor()), grpc.UnaryInterceptor(grpcMetrics.UnaryServerInterceptor()), ) a.reg.MustRegister(grpcMetrics) } if a.Config.TunnelServer.TLS == nil { return opts, nil } tlscfg, err := utils.NewTLSConfig( a.Config.TunnelServer.TLS.CaFile, a.Config.TunnelServer.TLS.CertFile, a.Config.TunnelServer.TLS.KeyFile, a.Config.TunnelServer.TLS.ClientAuth, false, true, ) if err != nil { return nil, err } if tlscfg != nil { opts = append(opts, grpc.Creds(credentials.NewTLS(tlscfg))) } return opts, nil } func (a *App) tunServerAddTargetHandler(tt tunnel.Target) error { a.Logger.Printf("tunnel server discovered target %+v", tt) tc := a.getTunnelTargetMatch(tt) if tc == nil { a.Logger.Printf("target %+v ignored", tt) return nil } a.ttm.Lock() a.tunTargets[tt] = struct{}{} a.ttm.Unlock() return nil } func (a *App) tunServerAddTargetSubscribeHandler(tt tunnel.Target) error { a.Logger.Printf("tunnel server discovered target %+v", tt) tc := a.getTunnelTargetMatch(tt) if tc == nil { a.Logger.Printf("target %+v ignored", tt) return nil } a.ttm.Lock() a.tunTargets[tt] = struct{}{} a.AddTargetConfig(tc) a.ttm.Unlock() a.operLock.Lock() t, err := a.initTarget(tc) a.operLock.Unlock() if err != nil { return err } a.targetsChan <- t a.wg.Add(1) go a.subscribeStream(a.ctx, tc) return nil } func (a *App) tunServerDeleteTargetHandler(tt tunnel.Target) error { a.Logger.Printf("tunnel server target %+v deregister request", tt) a.ttm.Lock() defer a.ttm.Unlock() if cfn, ok := a.tunTargetCfn[tt]; ok { cfn() delete(a.tunTargetCfn, tt) delete(a.tunTargets, tt) if err := a.DeleteTarget(a.ctx, tt.ID); err != nil { a.Logger.Printf("failed deleting tunnel target %q: %v", tt.ID, err) } } return nil } func (a *App) tunServerRegisterHandler(ss tunnel.ServerSession) error { return nil } func (a *App) tunServerHandler(ss tunnel.ServerSession, rwc io.ReadWriteCloser) error { return nil } // tunDialerFn is used to build a grpc Option that sets a custom dialer for tunnel targets. func (a *App) tunDialerFn(ctx context.Context, tc *types.TargetConfig) func(context.Context, string) (net.Conn, error) { return func(_ context.Context, _ string) (net.Conn, error) { tt := tunnel.Target{ID: tc.Name, Type: tc.TunnelTargetType} a.ttm.RLock() _, ok := a.tunTargets[tt] a.ttm.RUnlock() if !ok { return nil, fmt.Errorf("unknown tunnel target %+v", tt) } a.Logger.Printf("dialing tunnel connection for tunnel target %q", tc.Name) conn, err := tunnel.ServerConn(ctx, a.tunServer, &tt) if err != nil { a.Logger.Printf("failed dialing tunnel connection for target %q: %v", tc.Name, err) } return conn, err } } func (a *App) getTunnelTargetMatch(tt tunnel.Target) *types.TargetConfig { if len(a.Config.TunnelServer.Targets) == 0 { // no target matches defined, accept only GNMI_GNOI type if tt.Type == "GNMI_GNOI" { // create a default target config tc := &types.TargetConfig{Name: tt.ID, TunnelTargetType: tt.Type} err := a.Config.SetTargetConfigDefaults(tc) if err != nil { a.Logger.Printf("failed to set target %q config defaults: %v", tt.ID, err) return nil } tc.Address = tc.Name return tc } return nil } for _, tm := range a.Config.TunnelServer.Targets { // check if the discovered target matches one of the configured types ok, err := regexp.MatchString(tm.Type, tt.Type) if err != nil { a.Logger.Printf("regex %q eval failed with string %q: %v", tm.Type, tt.Type, err) continue } if !ok { continue } // check if the discovered target matches one of the configured IDs ok, err = regexp.MatchString(tm.ID, tt.ID) if err != nil { a.Logger.Printf("regex %q eval failed with string %q: %v", tm.ID, tt.ID, err) continue } if !ok { continue } // target has a match if a.Config.Debug { a.Logger.Printf("target %+v matches %+v", tt, tm) } tc := new(types.TargetConfig) *tc = tm.Config tc.Name = tt.ID tc.TunnelTargetType = tt.Type err = a.Config.SetTargetConfigDefaults(tc) if err != nil { a.Logger.Printf("failed to set target %q config defaults: %v", tt.ID, err) continue } tc.Address = tc.Name return tc } return nil } ================================================ FILE: pkg/app/utils.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "fmt" "strings" "github.com/openconfig/gnmi/proto/gnmi" ) func (a *App) printCapResponse(printPrefix string, msg *gnmi.CapabilityResponse) { sb := strings.Builder{} sb.WriteString("gNMI version: ") sb.WriteString(msg.GNMIVersion) sb.WriteString("\n") if a.Config.LocalFlags.CapabilitiesVersion { return } sb.WriteString("supported models:\n") for _, sm := range msg.SupportedModels { sb.WriteString(" - ") sb.WriteString(sm.GetName()) sb.WriteString(", ") sb.WriteString(sm.GetOrganization()) sb.WriteString(", ") sb.WriteString(sm.GetVersion()) sb.WriteString("\n") } sb.WriteString("supported encodings:\n") for _, se := range msg.SupportedEncodings { sb.WriteString(" - ") sb.WriteString(se.String()) sb.WriteString("\n") } fmt.Fprintf(a.out, "%s\n", indent(printPrefix, sb.String())) } func indent(prefix, s string) string { if prefix == "" { return s } prefix = "\n" + strings.TrimRight(prefix, "\n") lines := strings.Split(s, "\n") return strings.TrimLeft(fmt.Sprintf("%s%s", prefix, strings.Join(lines, prefix)), "\n") } ================================================ FILE: pkg/app/version.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "encoding/json" "fmt" "io" "net/http" "os" "os/exec" "time" "github.com/openconfig/gnmic/pkg/version" "github.com/spf13/cobra" ) var downloadURL = "https://github.com/openconfig/gnmic/raw/main/install.sh" func (a *App) VersionRun(cmd *cobra.Command, args []string) { if a.Config.Format != "json" { fmt.Printf("version : %s\n", version.Version) fmt.Printf(" commit : %s\n", version.Commit) fmt.Printf(" date : %s\n", version.Date) fmt.Printf(" gitURL : %s\n", version.GitURL) fmt.Printf(" docs : https://gnmic.openconfig.net\n") return } b, err := json.Marshal(map[string]string{ "version": version.Version, "commit": version.Commit, "date": version.Date, "gitURL": version.GitURL, "docs": "https://gnmic.openconfig.net", }) // need indent? use jq if err != nil { a.Logger.Printf("failed: %v", err) if !a.Config.Log { fmt.Printf("failed: %v\n", err) } return } fmt.Println(string(b)) } func (a *App) VersionUpgradeRun(cmd *cobra.Command, args []string) error { f, err := os.CreateTemp("", "gnmic") defer os.Remove(f.Name()) if err != nil { return err } err = downloadFile(downloadURL, f) if err != nil { return err } var c *exec.Cmd switch a.Config.LocalFlags.UpgradeUsePkg { case true: c = exec.Command("bash", f.Name(), "--use-pkg") case false: c = exec.Command("bash", f.Name()) } c.Stdout = os.Stdout c.Stderr = os.Stderr err = c.Run() if err != nil { return err } return nil } // downloadFile will download a file from a URL and write its content to a file func downloadFile(url string, file *os.File) error { client := http.Client{Timeout: 30 * time.Second} // Get the data req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return err } ctx, cancel := context.WithCancel(context.Background()) defer cancel() resp, err := client.Do(req.WithContext(ctx)) if err != nil { return err } defer resp.Body.Close() // Write the body to file _, err = io.Copy(file, resp.Body) if err != nil { return err } return nil } ================================================ FILE: pkg/cache/cache.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package cache import ( "context" "fmt" "log" "sync" "time" "github.com/openconfig/gnmi/proto/gnmi" "google.golang.org/protobuf/proto" ) type CacheType string const ( cacheType_OC CacheType = "oc" cacheType_Redis CacheType = "redis" cacheType_NATS CacheType = "nats" cacheType_JS CacheType = "jetstream" ) const ( ReadMode_Once = "once" ReadMode_StreamOnChange = "stream_on_change" ReadMode_StreamSample = "stream_sample" ) type Cache interface { // Write inserts the proto.Message (SubscribeResponse) into the cache under a subscription called `sub` Write(ctx context.Context, sub string, m proto.Message) // ReadAll, reads entries from the local cache, return the entries grouped by subscription name. ReadAll() (map[string][]*gnmi.Notification, error) // Read, reads a single path value from the cache filtering by subscription and target name Read(sub, target string, p *gnmi.Path) (map[string][]*gnmi.Notification, error) // Subscribes to the local cache and returns the notification over a channel Subscribe(ctx context.Context, so *ReadOpts) chan *Notification // Stops the cache Stop() // DeleteTarget deletes the target from the cache by name DeleteTarget(name string) // SetLogger sets a logger for the cache SetLogger(l *log.Logger) } type Config struct { Type CacheType `mapstructure:"type,omitempty" json:"type,omitempty"` Address string `mapstructure:"address,omitempty" json:"address,omitempty"` Timeout time.Duration `mapstructure:"timeout,omitempty" json:"timeout,omitempty"` Expiration time.Duration `mapstructure:"expiration,omitempty" json:"expiration,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` // NATS, JS and Redis cfg options Username string `mapstructure:"username,omitempty" json:"username,omitempty"` Password string `mapstructure:"password,omitempty" json:"password,omitempty"` // JS cfg options MaxBytes int64 `mapstructure:"max-bytes,omitempty" json:"max-bytes,omitempty"` MaxMsgsPerSubscription int64 `mapstructure:"max-msgs-per-subscription,omitempty" json:"max-msgs-per-subscription,omitempty"` FetchBatchSize int `mapstructure:"fetch-batch-size,omitempty" json:"fetch-batch-size,omitempty"` FetchWaitTime time.Duration `mapstructure:"fetch-wait-time,omitempty" json:"fetch-wait-time,omitempty"` } func (c *Config) setDefaults() { if c.Address == "" { switch c.Type { case cacheType_Redis: c.Address = defaultRedisAddress case cacheType_JS, cacheType_NATS: c.Address = defaultNATSAddress } } if c.Timeout == 0 { c.Timeout = defaultTimeout } if c.Expiration == 0 { c.Expiration = defaultExpiration } if c.Type != cacheType_JS { return } if c.MaxMsgsPerSubscription <= 0 { c.MaxMsgsPerSubscription = defaultMaxMsgs } if c.MaxBytes <= 0 { c.MaxBytes = defaultMaxBytes } if c.FetchBatchSize <= 0 { c.FetchBatchSize = defaultFetchBatchSize } if c.FetchWaitTime <= 0 { c.FetchWaitTime = defaultFetchWaitTime } } func New(c *Config, opts ...Option) (Cache, error) { if c == nil { c = &Config{Type: cacheType_OC} } if c.Type == "" { c.Type = cacheType_OC } switch c.Type { case cacheType_OC: return newGNMICache(c, "", opts...), nil case cacheType_NATS: return newNATSCache(c, opts...) case cacheType_JS: return newJetStreamCache(c, opts...) case cacheType_Redis: return newRedisCache(c, opts...) default: return nil, fmt.Errorf("unknown cache type: %q", c.Type) } } type ReadOpts struct { Subscription string Target string Paths []*gnmi.Path Mode string SampleInterval time.Duration HeartbeatInterval time.Duration SuppressRedundant bool UpdatesOnly bool OverrideTS bool m *sync.RWMutex lastSent map[string]*gnmi.TypedValue } func (ro *ReadOpts) setDefaults() { if ro.Target == "" { ro.Target = "*" } if ro.Mode == "" { ro.Mode = ReadMode_StreamOnChange } if len(ro.Paths) == 0 { ro.Paths = []*gnmi.Path{{}} } if ro.Mode == ReadMode_StreamSample && ro.SampleInterval <= 0 { ro.SampleInterval = 10 * time.Second } if ro.SuppressRedundant { ro.m = new(sync.RWMutex) ro.lastSent = make(map[string]*gnmi.TypedValue) } } type Notification struct { Name string Notification *gnmi.Notification Err error } ================================================ FILE: pkg/cache/go.mod ================================================ module github.com/openconfig/gnmic/pkg/cache go 1.24.12 require ( github.com/go-redis/redis/v8 v8.11.5 github.com/nats-io/nats-server/v2 v2.12.4 github.com/nats-io/nats.go v1.49.0 github.com/openconfig/gnmi v0.14.1 github.com/openconfig/gnmic/pkg/api v0.1.10 google.golang.org/protobuf v1.36.11 ) require ( bitbucket.org/creachadair/stringset v0.0.14 // indirect github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/golang/glog v1.2.5 // indirect github.com/google/go-tpm v0.9.8 // indirect github.com/klauspost/compress v1.18.3 // indirect github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 // indirect github.com/nats-io/jwt/v2 v2.8.0 // indirect github.com/nats-io/nkeys v0.4.15 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/onsi/gomega v1.27.10 // indirect golang.org/x/crypto v0.48.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sys v0.41.0 // indirect golang.org/x/text v0.34.0 // indirect golang.org/x/time v0.14.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda // indirect google.golang.org/grpc v1.78.0 // indirect ) ================================================ FILE: pkg/cache/go.sum ================================================ bitbucket.org/creachadair/stringset v0.0.14 h1:t1ejQyf8utS4GZV/4fM+1gvYucggZkfhb+tMobDxYOE= bitbucket.org/creachadair/stringset v0.0.14/go.mod h1:Ej8fsr6rQvmeMDf6CCWMWGb14H9mz8kmDgPPTdiVT0w= github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op h1:Ucf+QxEKMbPogRO5guBNe5cgd9uZgfoJLOYs8WWhtjM= github.com/antithesishq/antithesis-sdk-go v0.5.0-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-tpm v0.9.8 h1:slArAR9Ft+1ybZu0lBwpSmpwhRXaa85hWtMinMyRAWo= github.com/google/go-tpm v0.9.8/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw= github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 h1:KGuD/pM2JpL9FAYvBrnBBeENKZNh6eNtjqytV6TYjnk= github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/nats-io/jwt/v2 v2.8.0 h1:K7uzyz50+yGZDO5o772eRE7atlcSEENpL7P+b74JV1g= github.com/nats-io/jwt/v2 v2.8.0/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA= github.com/nats-io/nats-server/v2 v2.12.4 h1:ZnT10v2LU2Xcoiy8ek9X6Se4YG8EuMfIfvAEuFVx1Ts= github.com/nats-io/nats-server/v2 v2.12.4/go.mod h1:5MCp/pqm5SEfsvVZ31ll1088ZTwEUdvRX1Hmh/mTTDg= github.com/nats-io/nats.go v1.49.0 h1:yh/WvY59gXqYpgl33ZI+XoVPKyut/IcEaqtsiuTJpoE= github.com/nats-io/nats.go v1.49.0/go.mod h1:fDCn3mN5cY8HooHwE2ukiLb4p4G4ImmzvXyJt+tGwdw= github.com/nats-io/nkeys v0.4.15 h1:JACV5jRVO9V856KOapQ7x+EY8Jo3qw1vJt/9Jpwzkk4= github.com/nats-io/nkeys v0.4.15/go.mod h1:CpMchTXC9fxA5zrMo4KpySxNjiDVvr8ANOSZdiNfUrs= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/openconfig/gnmi v0.14.1 h1:qKMuFvhIRR2/xxCOsStPQ25aKpbMDdWr3kI+nP9bhMs= github.com/openconfig/gnmi v0.14.1/go.mod h1:whr6zVq9PCU8mV1D0K9v7Ajd3+swoN6Yam9n8OH3eT0= github.com/openconfig/gnmic/pkg/api v0.1.10 h1:zU57bogHrnraDFCYDnxHZB8Hcd53bWx1fDkRTPw/R2w= github.com/openconfig/gnmic/pkg/api v0.1.10/go.mod h1:6PntONfjCMq3XzsDfWMkLeoVuBRbkm2foQO5m6PeYo0= github.com/openconfig/goyang v1.6.0 h1:JjnPbLY1/y28VyTO67LsEV0TaLWNiZyDcsppGq4F4is= github.com/openconfig/goyang v1.6.0/go.mod h1:sdNZi/wdTZyLNBNfgLzmmbi7kISm7FskMDKKzMY+x1M= github.com/openconfig/grpctunnel v0.1.0 h1:EN99qtlExZczgQgp5ANnHRC/Rs62cAG+Tz2BQ5m/maM= github.com/openconfig/grpctunnel v0.1.0/go.mod h1:G04Pdu0pml98tdvXrvLaU+EBo3PxYfI9MYqpvdaEHLo= github.com/openconfig/ygot v0.29.20 h1:XHLpwCN91QuKc2LAvnEqtCmH8OuxgLlErDhrdl2mJw8= github.com/openconfig/ygot v0.29.20/go.mod h1:K8HbrPm/v8/emtGQ9+RsJXx6UPKC5JzS/FqK7pN+tMo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0= google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= ================================================ FILE: pkg/cache/jetstream_cache.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package cache import ( "context" "errors" "fmt" "log" "os" "regexp" "sort" "strings" "sync" "time" "google.golang.org/protobuf/proto" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats.go" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/utils" ) const ( loggingPrefixJetStream = "[cache:jetstream] " reconnectTimer = 5 * time.Second defaultFetchBatchSize = 100 defaultFetchWaitTime = 100 * time.Millisecond defaultExpiration = time.Minute defaultMaxMsgs = 1024 * 1024 defaultMaxBytes = 1024 * 1024 * 1024 defaultNATSAddress = "127.0.0.1" jetStreamSyncName = "gnmic-jetstream-cache" ) type jetStreamCache struct { cfg *Config ns *server.Server nc *nats.Conn js nats.JetStreamContext cfn context.CancelFunc streamChan chan string // configured remote address or locally started server address addr string oc *gnmiCache m *sync.RWMutex streams map[string]struct{} logger *log.Logger } func newJetStreamCache(cfg *Config, opts ...Option) (*jetStreamCache, error) { if cfg == nil { cfg = new(Config) } cfg.setDefaults() var err error c := &jetStreamCache{ cfg: cfg, oc: newGNMICache(cfg, "jetstream", opts...), streamChan: make(chan string), m: new(sync.RWMutex), streams: make(map[string]struct{}), } for _, opt := range opts { opt(c) } if c.cfg.Address == defaultNATSAddress { sopts := &server.Options{ Host: cfg.Address, Port: -1, JetStream: true, NoSigs: true, } c.ns, err = server.NewServer(sopts) if err != nil { return nil, err } } if c.logger == nil { c.logger = log.New(os.Stderr, loggingPrefixJetStream, utils.DefaultLoggingFlags) } c.start() ctx, cancel := context.WithCancel(context.Background()) c.cfn = cancel go c.sync(ctx) return c, nil } func (c *jetStreamCache) SetLogger(logger *log.Logger) { if logger != nil && c.logger != nil { c.logger.SetOutput(logger.Writer()) c.logger.SetFlags(logger.Flags()) c.logger.SetPrefix(loggingPrefixJetStream) } } func (c *jetStreamCache) start() { START: if c.ns != nil { go c.ns.Start() if !c.ns.ReadyForConnections(reconnectTimer) { c.ns.Shutdown() c.logger.Printf("failed to start cache, retrying") goto START } } c.addr = c.cfg.Address if c.ns != nil { c.addr = c.ns.ClientURL() } var err error opts := []nats.Option{ nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { c.logger.Printf("NATS error: %v", err) }), nats.DisconnectHandler(func(_ *nats.Conn) { c.logger.Println("Disconnected from NATS") }), nats.ClosedHandler(func(_ *nats.Conn) { c.logger.Println("NATS connection is closed") }), } if c.cfg.Username != "" && c.cfg.Password != "" { opts = append(opts, nats.UserInfo(c.cfg.Username, c.cfg.Password)) } CONNECT: if c.nc != nil { c.nc.Close() } c.nc, err = nats.Connect(c.addr, opts...) if err != nil { c.logger.Printf("failed to connect: %v", err) time.Sleep(reconnectTimer) goto CONNECT } c.js, err = c.nc.JetStream() if err != nil { c.logger.Printf("failed to create stream: %v", err) time.Sleep(reconnectTimer) goto CONNECT } } func (c *jetStreamCache) createStream(streamName string, subjects []string) error { stream, err := c.js.StreamInfo(streamName) if err != nil { if !errors.Is(err, nats.ErrStreamNotFound) { return err } } if c.cfg.Debug { c.logger.Printf("found stream %q: %v", streamName, stream != nil) } if stream == nil { c.logger.Printf("creating stream %q and subjects %q", streamName, subjects) _, err = c.js.AddStream( &nats.StreamConfig{ Name: streamName, Subjects: subjects, MaxMsgs: c.cfg.MaxMsgsPerSubscription, MaxBytes: c.cfg.MaxBytes, Discard: nats.DiscardOld, MaxAge: c.cfg.Expiration, Storage: nats.MemoryStorage, }) return err } return nil } func (c *jetStreamCache) Write(ctx context.Context, subscriptionName string, m proto.Message) { c.writeRemoteJS(ctx, subscriptionName, m) // publish the subscription name to nats for other gnmic instances var ok bool c.m.RLock() defer func() { c.m.RUnlock() if !ok { c.m.Lock() c.streams[subscriptionName] = struct{}{} c.m.Unlock() _ = c.nc.Publish(cacheSubjects, []byte(subscriptionName)) } }() _, ok = c.streams[subscriptionName] } func (c *jetStreamCache) writeRemoteJS(ctx context.Context, subscriptionName string, m proto.Message) { switch m := m.ProtoReflect().Interface().(type) { case *gnmi.SubscribeResponse: switch rsp := m.GetResponse().(type) { case *gnmi.SubscribeResponse_Update: targetName := rsp.Update.GetPrefix().GetTarget() if targetName == "" { c.logger.Printf("subscription=%q: response missing target: %v", subscriptionName, rsp) return } // check if a stream with the same name as the subscription is being created or has been created c.m.RLock() _, ok := c.streams[subscriptionName] c.m.RUnlock() if !ok { // add the subscription as a stream and create it in NATS if it doesn't exist c.m.Lock() c.streams[subscriptionName] = struct{}{} err := c.createStream(subscriptionName, []string{fmt.Sprintf("%s.>", subscriptionName)}) if err != nil { delete(c.streams, subscriptionName) c.m.Unlock() c.logger.Printf("failed to create stream: %v", err) return } c.m.Unlock() c.streamChan <- subscriptionName } // wait in case the stream is being created c.m.RLock() defer c.m.RUnlock() err := c.publishNotificationJS(ctx, subscriptionName, targetName, m) if err != nil { c.logger.Print(err) } } } } func (c *jetStreamCache) publishNotificationJS(ctx context.Context, subscriptionName, targetName string, r *gnmi.SubscribeResponse) error { ctx, cancel := context.WithTimeout(ctx, c.cfg.Timeout) defer cancel() subjectName, err := subjectName(subscriptionName, targetName, r) if err != nil { return fmt.Errorf("failed to build a subject name: %w", err) } b, err := proto.Marshal(r) if err != nil { return fmt.Errorf("failed to marshal proto message: %w", err) } _, err = c.js.Publish(subjectName, b, nats.Context(ctx)) if err != nil { return fmt.Errorf("failed to publish to JetStream cache: %w", err) } return nil } func (c *jetStreamCache) sync(ctx context.Context) { c.logger.Printf("start JetStream sync") // this map keeps track of streams already queued streams := make(map[string]struct{}) go func() { START: subjectSub, err := c.nc.Subscribe(cacheSubjects, func(m *nats.Msg) { subj := string(m.Data) c.streamChan <- subj }) if err != nil { time.Sleep(time.Second) goto START } defer subjectSub.Unsubscribe() for range ctx.Done() { } }() for { select { case <-ctx.Done(): return case cc := <-c.streamChan: if _, ok := streams[cc]; !ok { c.logger.Printf("start JetStream stream %q sync", cc) streams[cc] = struct{}{} go c.syncStream(ctx, cc) } } } } func (c *jetStreamCache) syncStream(ctx context.Context, subject string) { START: sub, err := c.js.Subscribe(fmt.Sprintf("%s.>", subject), func(msg *nats.Msg) { m := new(gnmi.SubscribeResponse) err := proto.Unmarshal([]byte(msg.Data), m) if err != nil { c.logger.Printf("failed to unmarshal proto msg: %v", err) return } _ = msg.Ack() c.oc.Write(ctx, subject, m) }, nats.DeliverNew(), nats.Durable(jetStreamSyncName), ) if err != nil { time.Sleep(time.Second) goto START } defer sub.Unsubscribe() for range ctx.Done() { } } // Read // func (c *jetStreamCache) ReadAll() (map[string][]*gnmi.Notification, error) { return c.oc.ReadAll() } func (c *jetStreamCache) Read(sub, target string, p *gnmi.Path) (map[string][]*gnmi.Notification, error) { return c.oc.read(sub, target, p), nil } func (c *jetStreamCache) Subscribe(ctx context.Context, ro *ReadOpts) chan *Notification { return c.oc.Subscribe(ctx, ro) } func (c *jetStreamCache) Stop() { c.cfn() if c.nc != nil { c.nc.Close() } if c.ns != nil { c.ns.Shutdown() } } func (c *jetStreamCache) DeleteTarget(name string) { c.oc.DeleteTarget(name) } var stringBuilderPool = sync.Pool{ New: func() any { return new(strings.Builder) }, } func subjectName(streamName, target string, m proto.Message) (string, error) { sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() sb.WriteString(streamName) sb.WriteString(".") if target != "" { sb.WriteString(target) sb.WriteString(".") } switch rsp := m.(type) { case *gnmi.SubscribeResponse: switch rsp := rsp.Response.(type) { case *gnmi.SubscribeResponse_Update: var prefixSubject string if rsp.Update.GetPrefix() != nil { prefixSubject = gNMIPathToSubject(rsp.Update.GetPrefix(), subjectOpts{WithKeys: true, WithWildcard: false})[0] } var pathSubject string if len(rsp.Update.GetUpdate()) > 0 { pathSubject = gNMIPathToSubject(rsp.Update.GetUpdate()[0].GetPath(), subjectOpts{WithKeys: true, WithWildcard: false})[0] } if prefixSubject != "" { sb.WriteString(prefixSubject) sb.WriteString(".") } if pathSubject != "" { sb.WriteString(pathSubject) } } } return sb.String(), nil } type subjectOpts struct { WithKeys bool WithWildcard bool } func gNMIPathToSubject(p *gnmi.Path, opts subjectOpts) []string { if p == nil { return []string{""} } sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() if p.GetOrigin() != "" { fmt.Fprintf(sb, "%s.", p.GetOrigin()) } for i, e := range p.GetElem() { if i > 0 { sb.WriteString(".") } sb.WriteString(e.Name) if opts.WithKeys { if len(e.Key) > 0 { // sort keys by name kNames := make([]string, 0, len(e.Key)) for k := range e.Key { kNames = append(kNames, k) } sort.Strings(kNames) for _, k := range kNames { sk := sanitizeKey(e.GetKey()[k]) fmt.Fprintf(sb, ".{%s=%s}", k, sk) } } } } subj := sb.String() if subj == "" && opts.WithWildcard { return []string{".>"} } result := []string{subj} if opts.WithWildcard { result = append(result, subj+".>") } return result } const ( dotReplChar = "^" spaceReplChar = "~" ) var regDot = regexp.MustCompile(`\.`) var regSpace = regexp.MustCompile(`\s`) func sanitizeKey(k string) string { s := regDot.ReplaceAllString(k, dotReplChar) return regSpace.ReplaceAllString(s, spaceReplChar) } ================================================ FILE: pkg/cache/jetstream_cache_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package cache import ( "context" "log" "testing" "time" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmi/proto/gnmi" ) func Test_natsCache_Write(t *testing.T) { type fields struct { cfg *Config } type args struct { ctx context.Context subscriptionName string m proto.Message } tests := []struct { name string fields fields args args }{ { name: "test1", fields: fields{ cfg: &Config{ Type: cacheType_JS, }, }, args: args{ ctx: context.TODO(), subscriptionName: "sub1", m: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Prefix: &gnmi.Path{ Target: "router1", }, Timestamp: time.Now().UnixNano(), Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, { Name: "description", }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{ AsciiVal: "interface_description", }, }, }, }, }, }, }, }, }, { name: "test2", fields: fields{ cfg: &Config{ Type: cacheType_JS, }, }, args: args{ ctx: context.TODO(), subscriptionName: "sub1", m: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Prefix: &gnmi.Path{ Target: "router1", }, Timestamp: time.Now().UnixNano(), Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, { Name: "description", }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{ AsciiVal: "interface_description", }, }, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, { Name: "statistics", }, { Name: "in-octets", }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{ AsciiVal: "42", }, }, }, }, }, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c, err := New(tt.fields.cfg, WithLogger(log.Default())) if err != nil { t.Fatal(err) } c.Write(tt.args.ctx, tt.args.subscriptionName, tt.args.m) rs, err := c.ReadAll() if err != nil { t.Fatal(err) } for s, ns := range rs { t.Logf("sub %s, read %d msgs: %+v", s, len(ns), ns) } }) } } ================================================ FILE: pkg/cache/nats_cache.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package cache import ( "context" "fmt" "log" "os" "sync" "time" "google.golang.org/protobuf/proto" "github.com/nats-io/nats-server/v2/server" "github.com/nats-io/nats.go" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/utils" ) const ( loggingPrefixNATS = "[cache:nats] " cacheSubjects = "gnmic.cache.subjects" subjectCacheResetPeriod = 30 * time.Second ) type natsCache struct { cfg *Config oc *gnmiCache ns *server.Server nc *nats.Conn cfn context.CancelFunc subjectChan chan string // configured remote address or locally started server address addr string m *sync.RWMutex subjects map[string]struct{} logger *log.Logger } func newNATSCache(cfg *Config, opts ...Option) (*natsCache, error) { if cfg == nil { cfg = new(Config) } cfg.setDefaults() var err error c := &natsCache{ cfg: cfg, oc: newGNMICache(cfg, "nats", opts...), subjectChan: make(chan string), m: new(sync.RWMutex), subjects: make(map[string]struct{}), } for _, opt := range opts { opt(c) } if c.cfg.Address == defaultNATSAddress { sopts := &server.Options{ Host: cfg.Address, Port: -1, NoSigs: true, } c.ns, err = server.NewServer(sopts) if err != nil { return nil, err } } if c.logger == nil { c.logger = log.New(os.Stderr, loggingPrefixNATS, utils.DefaultLoggingFlags) } c.start() ctx, cancel := context.WithCancel(context.Background()) c.cfn = cancel go c.sync(ctx) return c, nil } func (c *natsCache) start() { START: if c.ns != nil { go c.ns.Start() if !c.ns.ReadyForConnections(reconnectTimer) { c.ns.Shutdown() c.logger.Printf("failed to start cache, retrying") goto START } } c.addr = c.cfg.Address if c.ns != nil { c.addr = c.ns.ClientURL() } var err error opts := []nats.Option{ nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { c.logger.Printf("NATS error: %v", err) }), nats.DisconnectHandler(func(_ *nats.Conn) { c.logger.Println("Disconnected from NATS") }), nats.ClosedHandler(func(_ *nats.Conn) { c.logger.Println("NATS connection is closed") }), nats.Timeout(c.cfg.Timeout), } if c.cfg.Username != "" && c.cfg.Password != "" { opts = append(opts, nats.UserInfo(c.cfg.Username, c.cfg.Password)) } CONNECT: if c.nc != nil { c.nc.Close() } c.nc, err = nats.Connect(c.addr, opts...) if err != nil { c.logger.Printf("failed to connect: %v", err) time.Sleep(reconnectTimer) goto CONNECT } } func (c *natsCache) sync(ctx context.Context) { c.logger.Printf("start NATS sync") // this map keeps track of subjects already queued subjects := make(map[string]struct{}) go func() { ticker := time.NewTicker(subjectCacheResetPeriod) START: subjectSub, err := c.nc.Subscribe(cacheSubjects, func(m *nats.Msg) { subj := string(m.Data) c.subjectChan <- subj }) if err != nil { time.Sleep(time.Second) goto START } defer subjectSub.Unsubscribe() for { select { case <-ctx.Done(): return case <-ticker.C: c.m.Lock() c.subjects = make(map[string]struct{}) c.m.Unlock() } } }() for { select { case <-ctx.Done(): return case cc := <-c.subjectChan: if _, ok := subjects[cc]; !ok { c.logger.Printf("start NATS topic %q sync", cc) subjects[cc] = struct{}{} go c.syncSubject(ctx, cc) } } } } func (c *natsCache) syncSubject(ctx context.Context, subject string) { START: sub, err := c.nc.Subscribe(fmt.Sprintf("%s.>", subject), func(msg *nats.Msg) { m := new(gnmi.SubscribeResponse) err := proto.Unmarshal([]byte(msg.Data), m) if err != nil { c.logger.Printf("failed to unmarshal proto msg: %v", err) return } c.oc.Write(ctx, subject, m) }) if err != nil { time.Sleep(time.Second) goto START } defer sub.Unsubscribe() for range ctx.Done() { } } func (c *natsCache) Write(ctx context.Context, subscriptionName string, m proto.Message) { // write the msg to nats c.writeRemoteNATS(ctx, subscriptionName, m) // publish the subscription name to nats for other gnmic instances var ok bool c.m.RLock() defer func() { c.m.RUnlock() if !ok { c.m.Lock() c.subjects[subscriptionName] = struct{}{} c.m.Unlock() _ = c.nc.Publish(cacheSubjects, []byte(subscriptionName)) } }() _, ok = c.subjects[subscriptionName] } func (c *natsCache) writeRemoteNATS(ctx context.Context, subscriptionName string, m proto.Message) { switch m := m.ProtoReflect().Interface().(type) { case *gnmi.SubscribeResponse: switch rsp := m.GetResponse().(type) { case *gnmi.SubscribeResponse_Update: targetName := rsp.Update.GetPrefix().GetTarget() if targetName == "" { c.logger.Printf("subscription=%q: response missing target: %v", subscriptionName, rsp) return } c.subjectChan <- subscriptionName var err error err = c.publishNotificationNATS(ctx, subscriptionName, targetName, m) if err != nil { c.logger.Print(err) } } } } func (c *natsCache) publishNotificationNATS(_ context.Context, subscriptionName, targetName string, r *gnmi.SubscribeResponse) error { b, err := proto.Marshal(r) if err != nil { return fmt.Errorf("failed to marshal proto message: %w", err) } err = c.nc.Publish(fmt.Sprintf("%s.%s", subscriptionName, targetName), b) if err != nil { return fmt.Errorf("failed to publish to NATS cache: %w", err) } return nil } func (c *natsCache) ReadAll() (map[string][]*gnmi.Notification, error) { return c.oc.ReadAll() } func (c *natsCache) Read(sub, target string, p *gnmi.Path) (map[string][]*gnmi.Notification, error) { return c.oc.read(sub, target, p), nil } func (c *natsCache) Subscribe(ctx context.Context, ro *ReadOpts) chan *Notification { return c.oc.Subscribe(ctx, ro) } func (c *natsCache) Stop() { c.cfn() if c.nc != nil { c.nc.Close() } if c.ns != nil { c.ns.Shutdown() } } func (c *natsCache) SetLogger(logger *log.Logger) { if logger != nil && c.logger != nil { c.logger.SetOutput(logger.Writer()) c.logger.SetFlags(logger.Flags()) c.logger.SetPrefix(loggingPrefixNATS) } } func (c *natsCache) DeleteTarget(name string) { c.oc.DeleteTarget(name) } ================================================ FILE: pkg/cache/oc_cache.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package cache import ( "context" "io" "log" "strings" "sync" "time" ocCache "github.com/openconfig/gnmi/cache" "github.com/openconfig/gnmi/ctree" "github.com/openconfig/gnmi/match" "github.com/openconfig/gnmi/path" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmi/subscribe" gpath "github.com/openconfig/gnmic/pkg/api/path" "github.com/openconfig/gnmic/pkg/api/utils" "google.golang.org/protobuf/proto" ) const ( loggingPrefixOC = "[cache:oc] " defaultTimeout = 10 * time.Second ) type gnmiCache struct { m *sync.Mutex caches map[string]*subCache // match *match.Match logger *log.Logger expiration time.Duration debug bool } type subCache struct { c *ocCache.Cache match *match.Match } func (gc *gnmiCache) loadConfig(gcc *Config) { gc.expiration = gcc.Expiration gc.logger = log.New(io.Discard, loggingPrefixOC, utils.DefaultLoggingFlags) gc.debug = gcc.Debug } func newGNMICache(cfg *Config, loggingPrefix string, opts ...Option) *gnmiCache { if cfg == nil { cfg = new(Config) } gc := &gnmiCache{ m: new(sync.Mutex), // match: match.New(), caches: make(map[string]*subCache), } cfg.setDefaults() gc.loadConfig(cfg) for _, opt := range opts { opt(gc) } if gc.logger != nil { if loggingPrefix == "" { loggingPrefix = "oc" } gc.logger.SetPrefix(loggingPrefixOC) } return gc } func (gc *subCache) update(n *ctree.Leaf) { switch v := n.Value().(type) { case *gnmi.Notification: pathElems := path.ToStrings(v.GetPrefix(), true) subscribe.UpdateNotification(gc.match, n, v, pathElems) default: // gc.logger.Printf("unexpected update type: %T", v) } } func (gc *gnmiCache) SetLogger(logger *log.Logger) { if logger != nil && gc.logger != nil { gc.logger.SetOutput(logger.Writer()) gc.logger.SetFlags(logger.Flags()) } } func (gc *gnmiCache) Write(ctx context.Context, measName string, m proto.Message) { var err error switch rsp := m.ProtoReflect().Interface().(type) { case *gnmi.SubscribeResponse: switch rsp := rsp.GetResponse().(type) { case *gnmi.SubscribeResponse_Update: target := rsp.Update.GetPrefix().GetTarget() if target == "" { gc.logger.Printf("subscription=%q: response missing target: %v", measName, rsp) return } // if the update does not have a prefix path, // check that each update has a path. if len(rsp.Update.GetPrefix().GetElem()) == 0 { for _, upd := range rsp.Update.GetUpdate() { if len(upd.GetPath().GetElem()) == 0 { gc.logger.Printf("write fail: received an update with en empty path: %v", upd) return } } } gc.m.Lock() sCache, ok := gc.caches[measName] if !ok { sCache = &subCache{ c: ocCache.New(nil), match: match.New(), } sCache.c.SetClient(sCache.update) sCache.c.Add(target) gc.logger.Printf("target %q added to local cache %q", target, measName) gc.caches[measName] = sCache } if !sCache.c.HasTarget(target) { sCache.c.Add(target) gc.logger.Printf("target %q added to local cache %q", target, measName) } gc.m.Unlock() // do not write updates with nil values to cache. notif := &gnmi.Notification{ Timestamp: rsp.Update.GetTimestamp(), Prefix: rsp.Update.GetPrefix(), Update: make([]*gnmi.Update, 0, len(rsp.Update.GetUpdate())), Delete: rsp.Update.GetDelete(), Atomic: rsp.Update.GetAtomic(), } for _, upd := range rsp.Update.GetUpdate() { if upd.Val == nil { continue } notif.Update = append(notif.Update, upd) } if len(notif.Update) == 0 && len(notif.Delete) == 0 { return } err = sCache.c.GnmiUpdate(notif) if err != nil { gc.logger.Printf("failed to update gNMI cache: %v", err) return } return } } } func (gc *gnmiCache) ReadAll() (map[string][]*gnmi.Notification, error) { return gc.read("", "*", nil), nil } func (gc *gnmiCache) Read(sub, target string, p *gnmi.Path) (map[string][]*gnmi.Notification, error) { return gc.read(sub, target, p), nil } func (gc *gnmiCache) Subscribe(ctx context.Context, ro *ReadOpts) chan *Notification { if ro == nil { ro = new(ReadOpts) } ro.setDefaults() ch := make(chan *Notification) go gc.subscribe(ctx, ro, ch) return ch } func (gc *gnmiCache) subscribe(ctx context.Context, ro *ReadOpts, ch chan *Notification) { defer close(ch) switch ro.Mode { case ReadMode_Once: gc.handleSingleQuery(ctx, ro, ch) case ReadMode_StreamOnChange: // default: ro.SuppressRedundant = false gc.handleOnChangeQuery(ctx, ro, ch) case ReadMode_StreamSample: gc.handleSampledQuery(ctx, ro, ch) } } func (gc *gnmiCache) handleSingleQuery(ctx context.Context, ro *ReadOpts, ch chan *Notification) { if gc.debug { gc.logger.Printf("running single query for target %q", ro.Target) } caches := gc.getCaches(ro.Subscription) if gc.debug { gc.logger.Printf("single query got %d caches", len(caches)) } wg := new(sync.WaitGroup) wg.Add(len(caches)) for name, c := range caches { go func(name string, c *subCache) { defer wg.Done() if !c.c.HasTarget(ro.Target) { if gc.debug { gc.logger.Printf("subscription-cache %q doesn't have target: %q", name, ro.Target) } return } for _, p := range ro.Paths { fp, err := path.CompletePath(p, nil) if err != nil { gc.logger.Printf("failed to generate CompletePath from %v", p) ch <- &Notification{Name: name, Err: err} return } err = c.c.Query(ro.Target, fp, func(_ []string, l *ctree.Leaf, _ interface{}) error { if err != nil { return err } switch gl := l.Value().(type) { case *gnmi.Notification: if ro.OverrideTS { // override timestamp gl = proto.Clone(gl).(*gnmi.Notification) gl.Timestamp = time.Now().UnixNano() } //no suppress redundant, send to channel and return if !ro.SuppressRedundant { ch <- &Notification{Name: name, Notification: gl} return nil } // suppress redundant part if ro.lastSent == nil { ro.lastSent = make(map[string]*gnmi.TypedValue) ro.m = new(sync.RWMutex) } prefix := gpath.GnmiPathToXPath(gl.GetPrefix(), true) target := gl.GetPrefix().GetTarget() for _, upd := range gl.GetUpdate() { p := gpath.GnmiPathToXPath(upd.GetPath(), true) valXPath := strings.Join([]string{target, prefix, p}, "/") ro.m.RLock() sv, ok := ro.lastSent[valXPath] ro.m.RUnlock() if !ok || !proto.Equal(sv, upd.Val) { ch <- &Notification{ Name: name, Notification: &gnmi.Notification{ Timestamp: gl.GetTimestamp(), Prefix: gl.GetPrefix(), Update: []*gnmi.Update{upd}, }, } ro.m.Lock() ro.lastSent[valXPath] = upd.Val ro.m.Unlock() } } if gl.GetDelete() != nil { ch <- &Notification{ Name: name, Notification: &gnmi.Notification{ Timestamp: gl.GetTimestamp(), Prefix: gl.GetPrefix(), Delete: gl.GetDelete(), }, } } return nil } return nil }) if err != nil { gc.logger.Printf("target %q failed internal cache query: %v", ro.Target, err) ch <- &Notification{Name: name, Err: err} return } } }(name, c) } wg.Wait() } func (gc *gnmiCache) handleSampledQuery(ctx context.Context, ro *ReadOpts, ch chan *Notification) { if !ro.UpdatesOnly { gc.handleSingleQuery(ctx, ro, ch) } ticker := time.NewTicker(ro.SampleInterval) defer ticker.Stop() for { select { case <-ctx.Done(): gc.logger.Printf("periodic query to target %q stopped: %v", ro.Target, ctx.Err()) return case <-ticker.C: gc.handleSingleQuery(ctx, ro, ch) } } } func (gc *gnmiCache) handleOnChangeQuery(ctx context.Context, ro *ReadOpts, ch chan *Notification) { caches := gc.getCaches(ro.Subscription) numCaches := len(caches) gc.logger.Printf("on-change query got %d cache(s)", numCaches) wg := new(sync.WaitGroup) wg.Add(numCaches) for name, c := range caches { go func(name string, c *subCache) { defer wg.Done() if !c.c.HasTarget(ro.Target) { if gc.debug { gc.logger.Printf("subscription-cache %q doesn't have target: %q", name, ro.Target) } return } for _, p := range ro.Paths { cp, err := path.CompletePath(p, nil) if err != nil { gc.logger.Printf("failed to generate CompletePath from %v", p) ch <- &Notification{Name: name, Err: err} return } // handle updates only if !ro.UpdatesOnly { err = c.c.Query(ro.Target, cp, func(_ []string, l *ctree.Leaf, _ interface{}) error { switch gl := l.Value().(type) { case *gnmi.Notification: ch <- &Notification{Name: name, Notification: gl} } return nil }) if err != nil { gc.logger.Printf("failed to run cache query for target %q and path %q: %v", ro.Target, cp, err) ch <- &Notification{Name: name, Err: err} return } } // main on-change subscription fp := make([]string, 0, len(cp)+1) fp = append(fp, ro.Target) fp = append(fp, cp...) // set callback mc := &matchClient{name: name, ch: ch} remove := c.match.AddQuery(fp, mc) defer remove() // handle on-change heartbeat if ro.HeartbeatInterval > 0 { // run a sampled query using heartbeat interval as sample interval gc.handleSampledQuery(ctx, &ReadOpts{ Subscription: ro.Subscription, Target: ro.Target, Paths: ro.Paths, Mode: ReadMode_StreamSample, SampleInterval: ro.HeartbeatInterval, OverrideTS: ro.OverrideTS, }, ch) } } for range ctx.Done() { } }(name, c) } wg.Wait() } func (gc *gnmiCache) Stop() {} func (gc *gnmiCache) read(sub, target string, p *gnmi.Path) map[string][]*gnmi.Notification { notificationChan := make(chan *Notification) notifications := make(map[string][]*gnmi.Notification, 0) doneCh := make(chan struct{}) // this go routine will collect all the notifications // from the cache queries go func() { for nn := range notificationChan { if _, ok := notifications[nn.Name]; !ok { notifications[nn.Name] = make([]*gnmi.Notification, 0) } notifications[nn.Name] = append(notifications[nn.Name], nn.Notification) } close(doneCh) }() if sub == "*" { sub = "" } now := time.Now() wg := new(sync.WaitGroup) caches := gc.getCaches(sub) wg.Add(len(caches)) for name, c := range caches { go func(c *subCache, name string) { defer wg.Done() cp, err := path.CompletePath(p, nil) if err != nil { gc.logger.Printf("failed to generate CompletePath from %v", p) return } err = c.c.Query(target, cp, func(_ []string, _ *ctree.Leaf, v interface{}) error { if err != nil { return err } switch notif := v.(type) { case *gnmi.Notification: if gc.expiration > 0 && time.Unix(0, notif.Timestamp).Before(now.Add(time.Duration(-gc.expiration))) { return nil } notificationChan <- &Notification{ Name: name, Notification: notif, } } return nil }) if err != nil { gc.logger.Printf("failed cache query:%v", err) return } }(c, name) } wg.Wait() close(notificationChan) // wait for notifications to be appended to the array <-doneCh return notifications } func (gc *gnmiCache) getCaches(names ...string) map[string]*subCache { gc.m.Lock() defer gc.m.Unlock() caches := make(map[string]*subCache) numCaches := len(names) if numCaches == 0 || (numCaches == 1 && names[0] == "") { for n, c := range gc.caches { caches[n] = c } return caches } for _, n := range names { if c, ok := gc.caches[n]; ok { caches[n] = c } } return caches } func (gc *gnmiCache) DeleteTarget(name string) { caches := gc.getCaches() for _, c := range caches { c.c.Remove(name) } } // match client type matchClient struct { name string ch chan *Notification } func (m *matchClient) Update(n interface{}) { switch n := n.(type) { case *ctree.Leaf: switch v := n.Value().(type) { case *gnmi.Notification: m.ch <- &Notification{ Name: m.name, Notification: v, } } } } ================================================ FILE: pkg/cache/oc_cache_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package cache import ( "context" "log" "testing" "time" "github.com/openconfig/gnmi/proto/gnmi" ) func Test_gnmiCache_read(t *testing.T) { type input struct { measName string target string m *gnmi.SubscribeResponse } type fields struct { inputs []input } type args struct { sub string target string p *gnmi.Path } tests := []struct { name string fields fields args args want map[string][]*gnmi.Notification expectedRespCount int }{ { name: "test1", fields: fields{ inputs: []input{ { measName: "sub1", target: "t1", m: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Prefix: &gnmi.Path{Target: "t1"}, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: "srl1"}, }, }, }, }, }, }, }, { measName: "sub1", target: "t1", m: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Prefix: &gnmi.Path{Target: "t1"}, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, {Name: "admin-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: "enable"}, }, }, }, }, }, }, }, }, }, args: args{ sub: "sub1", target: "*", p: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }}, }, want: map[string][]*gnmi.Notification{}, expectedRespCount: 1, }, { name: "test2", fields: fields{ inputs: []input{ { measName: "sub1", target: "t1", m: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Prefix: &gnmi.Path{Target: "t1"}, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: "srl1"}, }, }, }, }, }, }, }, { measName: "sub1", target: "t1", m: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Prefix: &gnmi.Path{Target: "t1"}, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, {Name: "admin-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: "enable"}, }, }, }, }, }, }, }, }, }, args: args{ sub: "sub1", target: "*", p: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, {Name: "admin-state"}, }}, }, want: map[string][]*gnmi.Notification{}, expectedRespCount: 1, }, { name: "readAll_same_subscription", fields: fields{ inputs: []input{ { measName: "sub1", target: "t1", m: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Prefix: &gnmi.Path{Target: "t1"}, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: "srl1"}, }, }, }, }, }, }, }, { measName: "sub1", target: "t1", m: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Prefix: &gnmi.Path{Target: "t1"}, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, {Name: "admin-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: "enable"}, }, }, }, }, }, }, }, }, }, args: args{ sub: "", target: "*", p: nil, }, want: map[string][]*gnmi.Notification{}, expectedRespCount: 2, }, { name: "readAll", fields: fields{ inputs: []input{ { measName: "sub1", target: "t1", m: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Prefix: &gnmi.Path{Target: "t1"}, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "system"}, {Name: "name"}, {Name: "host-name"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: "srl1"}, }, }, }, }, }, }, }, { measName: "sub2", target: "t1", m: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Prefix: &gnmi.Path{Target: "t1"}, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, {Name: "admin-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: "enable"}, }, }, }, }, }, }, }, }, }, args: args{ sub: "", target: "*", p: nil, }, want: map[string][]*gnmi.Notification{}, expectedRespCount: 2, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { gc := newGNMICache(&Config{}, "oc", WithLogger(log.Default())) for _, in := range tt.fields.inputs { gc.Write(context.TODO(), in.measName, in.m) } rsp := gc.read(tt.args.sub, tt.args.target, tt.args.p) if _, ok := rsp[tt.args.sub]; !ok && tt.args.sub != "" { t.Errorf("%s: response does not contain the expected subscription name", tt.name) } var rspCount int if tt.args.sub == "" { for _, rsps := range rsp { rspCount += len(rsps) } } else { rspCount = len(rsp[tt.args.sub]) } if tt.expectedRespCount != rspCount { t.Errorf("%s: unexpected response count, got %d, expected %d", tt.name, rspCount, tt.expectedRespCount) } }) } } ================================================ FILE: pkg/cache/options.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package cache import "log" type Option func(Cache) func WithLogger(logger *log.Logger) Option { return func(c Cache) { c.SetLogger(logger) } } ================================================ FILE: pkg/cache/redis_cache.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package cache import ( "context" "fmt" "log" "os" "sync" "time" "google.golang.org/protobuf/proto" redis "github.com/go-redis/redis/v8" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/utils" ) const ( loggingPrefixRedis = "[cache:redis] " cacheChannelsChannel = "gnmic_cache_channels" defaultRedisAddress = "127.0.0.1:6379" ) type redisCache struct { cfg *Config oc *gnmiCache cfn context.CancelFunc c *redis.Client channelChan chan string m *sync.RWMutex channels map[string]struct{} logger *log.Logger } func newRedisCache(cfg *Config, opts ...Option) (*redisCache, error) { if cfg == nil { cfg = &Config{ Type: cacheType_Redis, Address: defaultRedisAddress, } } cfg.setDefaults() c := &redisCache{ cfg: cfg, oc: newGNMICache(cfg, "redis", opts...), channelChan: make(chan string), m: new(sync.RWMutex), channels: make(map[string]struct{}), } for _, opt := range opts { opt(c) } if c.logger == nil { c.logger = log.New(os.Stderr, loggingPrefixRedis, utils.DefaultLoggingFlags) } CLIENT: c.c = redis.NewClient(&redis.Options{ Addr: cfg.Address, Username: cfg.Username, Password: cfg.Password, DB: 0, }) ctx, cancel := context.WithCancel(context.Background()) c.cfn = cancel pong, err := c.c.Ping(ctx).Result() if err != nil { c.logger.Printf("failed to connect to redis: %v", err) time.Sleep(time.Second) goto CLIENT } c.logger.Printf("ping result: %s", pong) go c.sync(ctx) return c, nil } func (c *redisCache) SetLogger(logger *log.Logger) { if logger != nil && c.logger != nil { c.logger.SetOutput(logger.Writer()) c.logger.SetFlags(logger.Flags()) c.logger.SetPrefix(loggingPrefixRedis) } } func (c *redisCache) Write(ctx context.Context, subscriptionName string, m proto.Message) { // write the msg to redis c.writeRemoteREDIS(ctx, subscriptionName, m) // publish the subscription name to redis for other gnmic instances var ok bool c.m.RLock() defer func() { c.m.RUnlock() if !ok { c.m.Lock() c.channels[subscriptionName] = struct{}{} c.m.Unlock() c.c.Publish(ctx, cacheChannelsChannel, []byte(subscriptionName)) } }() _, ok = c.channels[subscriptionName] } func (c *redisCache) writeRemoteREDIS(ctx context.Context, subscriptionName string, m proto.Message) { switch m := m.ProtoReflect().Interface().(type) { case *gnmi.SubscribeResponse: switch rsp := m.GetResponse().(type) { case *gnmi.SubscribeResponse_Update: targetName := rsp.Update.GetPrefix().GetTarget() if targetName == "" { c.logger.Printf("subscription=%q: response missing target: %v", subscriptionName, rsp) return } c.channelChan <- subscriptionName var err error err = c.publishNotificationREDIS(ctx, subscriptionName, targetName, m) if err != nil { c.logger.Print(err) } } } } func (c *redisCache) publishNotificationREDIS(ctx context.Context, subscriptionName, targetName string, r *gnmi.SubscribeResponse) error { ctx, cancel := context.WithTimeout(ctx, c.cfg.Timeout) defer cancel() b, err := proto.Marshal(r) if err != nil { return fmt.Errorf("failed to marshal proto message: %w", err) } status := c.c.Publish(ctx, fmt.Sprintf("%s.%s", subscriptionName, targetName), b) if status.Err() != nil { err = fmt.Errorf("failed to publish statusErr: %v", status.Err()) c.logger.Print(err) return err } _, err = status.Result() if err != nil { err = fmt.Errorf("failed to publish resultErr: %v", err) c.logger.Print(err) } return nil } func (c *redisCache) ReadAll() (map[string][]*gnmi.Notification, error) { return c.oc.ReadAll() } func (c *redisCache) Read(sub, target string, p *gnmi.Path) (map[string][]*gnmi.Notification, error) { return c.oc.read(sub, target, p), nil } func (c *redisCache) sync(ctx context.Context) { c.logger.Printf("start redis sync") // subscribe to cache channel updates // and periodically reset the local channels map. go func() { ticker := time.NewTicker(subjectCacheResetPeriod) channelSub := c.c.Subscribe(ctx, cacheChannelsChannel) defer channelSub.Close() for { select { case <-ctx.Done(): return case msg := <-channelSub.Channel(): // pass the channel name to start syncChannel func c.channelChan <- msg.Payload case <-ticker.C: // reset local channels map to re trigger broadcast c.m.Lock() c.channels = make(map[string]struct{}) c.m.Unlock() } } }() // keeps track of channels for which a syncChannel has been started channels := make(map[string]struct{}) for { select { case <-ctx.Done(): return case cc := <-c.channelChan: c.m.Lock() if _, ok := channels[cc]; !ok { channels[cc] = struct{}{} c.logger.Printf("starting redis channel %q sync", cc) go c.syncChannel(ctx, cc) } c.m.Unlock() } } } // syncChannel subscribes to redis channel updates and syncs the local cache func (c *redisCache) syncChannel(ctx context.Context, channel string) { sub := c.c.PSubscribe(ctx, fmt.Sprintf("%s*", channel)) defer sub.Close() i := 0 for { select { case msg := <-sub.Channel(): if len(msg.Payload) == 0 { continue } m := new(gnmi.SubscribeResponse) err := proto.Unmarshal([]byte(msg.Payload), m) if err != nil { c.logger.Printf("failed to unmarshal proto msg: %v", err) continue } c.oc.Write(ctx, channel, m) i++ case <-ctx.Done(): return } } } func (c *redisCache) Subscribe(ctx context.Context, ro *ReadOpts) chan *Notification { return c.oc.Subscribe(ctx, ro) } func (c *redisCache) Stop() { c.cfn() if c.c != nil { c.c.Close() } } func (c *redisCache) DeleteTarget(name string) { c.oc.DeleteTarget(name) } ================================================ FILE: pkg/cmd/capabilities/capabilities.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package capabilities import ( "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // capabilitiesCmd represents the capabilities command func New(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "capabilities", Aliases: []string{"cap"}, Short: "query targets gnmi capabilities", PreRunE: gApp.CapPreRunE, RunE: gApp.CapRunE, SilenceUsage: true, } gApp.InitCapabilitiesFlags(cmd) return cmd } ================================================ FILE: pkg/cmd/collector/collector.go ================================================ // © 2025 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package collector import ( "crypto/tls" "fmt" "net/http" "github.com/spf13/cobra" "github.com/openconfig/gnmic/pkg/app" "github.com/openconfig/gnmic/pkg/collector" "github.com/openconfig/gnmic/pkg/config" "github.com/zestor-dev/zestor/store" ) // New create the collector command tree. func New(gApp *app.App) *cobra.Command { c := collector.New(gApp.Context(), gApp.Store) cmd := &cobra.Command{ Use: "collect", Aliases: []string{"c", "coll", "collector"}, Short: "collect gNMI telemetry from targets", PreRunE: c.CollectorPreRunE, RunE: c.CollectorRunE, PostRun: func(cmd *cobra.Command, args []string) { gApp.CleanupPlugins() }, SilenceUsage: true, } c.InitCollectorFlags(cmd) cmd.AddCommand(newCollectorTargetsCmd(gApp)) cmd.AddCommand(newCollectorSubscriptionsCmd(gApp)) cmd.AddCommand(newCollectorOutputsCmd(gApp)) cmd.AddCommand(newCollectorProcessorsCmd(gApp)) cmd.AddCommand(newCollectorInputsCmd(gApp)) return cmd } func getAPIServerURL(store store.Store[any]) (string, error) { apiServerConfig, ok, err := store.Get("api-server", "api-server") if err != nil { return "", err } if !ok { return "", fmt.Errorf("api-server config not found") } apiCfg, ok := apiServerConfig.(*config.APIServer) if !ok { return "", fmt.Errorf("api-server config is required for collector command") } if apiCfg == nil { return "", fmt.Errorf("api-server config is required for collector command") } if apiCfg.TLS != nil { return "https://" + apiCfg.Address, nil } return "http://" + apiCfg.Address, nil } func getAPIServerClient(store store.Store[any]) (*http.Client, error) { apiServerConfig, ok, err := store.Get("api-server", "api-server") if err != nil { return nil, err } if !ok { return nil, fmt.Errorf("api-server config not found") } apiCfg, ok := apiServerConfig.(*config.APIServer) if !ok { return nil, fmt.Errorf("address not found") } if apiCfg.TLS != nil { return &http.Client{ Timeout: apiCfg.Timeout, Transport: &http.Transport{ TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, }, }, }, nil } return &http.Client{ Timeout: apiCfg.Timeout, }, nil } ================================================ FILE: pkg/cmd/collector/inputs.go ================================================ // © 2025 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package collector import ( "bytes" "encoding/json" "fmt" "io" "net/http" "os" "sort" "github.com/olekukonko/tablewriter" "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) func newCollectorInputsCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "inputs", Aliases: []string{"input", "in"}, Short: "manage inputs", SilenceUsage: true, } cmd.AddCommand( newCollectorInputsListCmd(gApp), newCollectorInputsGetCmd(gApp), newCollectorInputsSetCmd(gApp), newCollectorInputsDeleteCmd(gApp), ) return cmd } func newCollectorInputsListCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "list", Aliases: []string{"ls"}, Short: "list inputs", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } resp, err := client.Get(apiURL + "/api/v1/config/inputs") if err != nil { return err } defer resp.Body.Close() tb, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK { return fmt.Errorf("failed to list inputs, status code: %d: %s", resp.StatusCode, string(tb)) } // Parse the response as array of maps inputsResponse := make(map[string]interface{}, 0) err = json.Unmarshal(tb, &inputsResponse) if err != nil { return err } inputs := make([]map[string]interface{}, 0) for name, input := range inputsResponse { switch input := input.(type) { case map[string]any: input["name"] = name inputs = append(inputs, input) default: return fmt.Errorf("unknown input type: %T", input) } } // Display as horizontal table table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Name", "Type", "Format", "Event Processors"}) table.SetAutoWrapText(false) table.SetAutoFormatHeaders(true) table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetCenterSeparator("") table.SetColumnSeparator("") table.SetRowSeparator("") table.SetHeaderLine(false) table.SetBorder(false) table.SetTablePadding("\t") table.SetNoWhiteSpace(true) data := tableFormatInputsList(inputs) table.AppendBulk(data) table.Render() return nil }, } return cmd } func newCollectorInputsGetCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "get", Aliases: []string{"g", "show", "sh"}, Short: "get an input", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { name, err := cmd.Flags().GetString("name") if err != nil { return err } if name == "" { return fmt.Errorf("input name is required") } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } resp, err := client.Get(apiURL + "/api/v1/config/inputs/" + name) if err != nil { return err } defer resp.Body.Close() tb, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK { return fmt.Errorf("failed to get input, status code: %d: %s", resp.StatusCode, string(tb)) } // Parse the response as a map input := make(map[string]any) err = json.Unmarshal(tb, &input) if err != nil { return err } // Display as vertical table (key-value pairs) table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"PARAM", "VALUE"}) table.SetAutoWrapText(false) table.SetAutoFormatHeaders(false) table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetCenterSeparator("") table.SetColumnSeparator(":") table.SetRowSeparator("") table.SetHeaderLine(false) table.SetBorder(false) table.SetTablePadding("\t") table.SetNoWhiteSpace(true) table.SetColumnAlignment([]int{tablewriter.ALIGN_RIGHT, tablewriter.ALIGN_LEFT}) data := tableFormatInputVertical(input) table.AppendBulk(data) table.Render() return nil }, } cmd.Flags().StringP("name", "n", "", "input name") return cmd } func newCollectorInputsSetCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "set", Aliases: []string{"create", "cr"}, Short: "set an input", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { inputConfigFile, err := cmd.Flags().GetString("input") if err != nil { return err } if inputConfigFile == "" { return fmt.Errorf("input file is required") } b, err := os.ReadFile(inputConfigFile) if err != nil { return err } var inputConfig map[string]interface{} err = json.Unmarshal(b, &inputConfig) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } resp, err := client.Post(apiURL+"/api/v1/config/inputs", "application/json", bytes.NewBuffer(b)) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { tb, _ := io.ReadAll(resp.Body) return fmt.Errorf("failed to create input, status code: %d: %s", resp.StatusCode, string(tb)) } inputName := formatValue(inputConfig["name"]) fmt.Fprintf(os.Stderr, "Input '%s' created successfully\n", inputName) return nil }, } cmd.Flags().StringP("input", "i", "", "input config file") return cmd } func newCollectorInputsDeleteCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "delete", Aliases: []string{"d", "del", "rm"}, Short: "delete an input", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { name, err := cmd.Flags().GetString("name") if err != nil { return err } if name == "" { return fmt.Errorf("input name is required") } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } req, err := http.NewRequest(http.MethodDelete, apiURL+"/api/v1/config/inputs/"+name, nil) if err != nil { return err } resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { tb, _ := io.ReadAll(resp.Body) return fmt.Errorf("failed to delete input, status code: %d: %s", resp.StatusCode, string(tb)) } fmt.Fprintln(os.Stderr, "Input deleted successfully") return nil }, } cmd.Flags().StringP("name", "n", "", "input name") return cmd } // tableFormatOutputVertical formats a single output as vertical table (key-value pairs) func tableFormatInputVertical(input map[string]any) [][]string { data := make([][]string, 0) // Sort keys for consistent output keys := make([]string, 0, len(input)) for k := range input { keys = append(keys, k) } sort.Strings(keys) // Add each key-value pair for _, key := range keys { value := input[key] formattedValue := formatValue(value) data = append(data, []string{key, formattedValue}) } return data } // tableFormatInputsList formats multiple outputs as horizontal table (summary view) func tableFormatInputsList(inputs []map[string]any) [][]string { data := make([][]string, 0, len(inputs)) for _, input := range inputs { name := formatValue(input["name"]) inputType := formatValue(input["type"]) format := formatValue(input["format"]) // Handle event-processors eventProcessors := "-" if ep, ok := input["event-processors"]; ok { eventProcessors = formatValueShort(ep) } data = append(data, []string{ name, inputType, format, eventProcessors, }) } // Sort by name sort.Slice(data, func(i, j int) bool { return data[i][0] < data[j][0] }) return data } ================================================ FILE: pkg/cmd/collector/ouputs.go ================================================ // © 2025 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package collector import ( "bytes" "encoding/json" "fmt" "io" "net/http" "os" "sort" "github.com/olekukonko/tablewriter" "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) func newCollectorOutputsCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "outputs", Aliases: []string{"output", "out"}, Short: "manage outputs", RunE: func(cmd *cobra.Command, args []string) error { return nil }, } cmd.AddCommand( newCollectorOutputsListCmd(gApp), newCollectorOutputsGetCmd(gApp), newCollectorOutputsSetCmd(gApp), newCollectorOutputsDeleteCmd(gApp), ) return cmd } func newCollectorOutputsListCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "list", Aliases: []string{"ls"}, Short: "list outputs", RunE: func(cmd *cobra.Command, args []string) error { apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } resp, err := client.Get(apiURL + "/api/v1/config/outputs") if err != nil { return err } defer resp.Body.Close() tb, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK { return fmt.Errorf("failed to list outputs, status code: %d: %s", resp.StatusCode, string(tb)) } // Parse the response as array of maps outputsResponse := make(map[string]interface{}, 0) err = json.Unmarshal(tb, &outputsResponse) if err != nil { return err } outputs := make([]map[string]interface{}, 0) for name, output := range outputsResponse { switch output := output.(type) { case map[string]any: output["name"] = name outputs = append(outputs, output) default: return fmt.Errorf("unknown output type: %T", output) } } // Display as horizontal table table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Name", "Type", "Format", "Event Processors"}) table.SetAutoWrapText(false) table.SetAutoFormatHeaders(true) table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetCenterSeparator("") table.SetColumnSeparator("") table.SetRowSeparator("") table.SetHeaderLine(false) table.SetBorder(false) table.SetTablePadding("\t") table.SetNoWhiteSpace(true) data := tableFormatOutputsList(outputs) table.AppendBulk(data) table.Render() return nil }, } return cmd } func newCollectorOutputsGetCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "get", Aliases: []string{"g", "show", "sh"}, Short: "get an output", RunE: func(cmd *cobra.Command, args []string) error { name, err := cmd.Flags().GetString("name") if err != nil { return err } if name == "" { return fmt.Errorf("output name is required") } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } resp, err := client.Get(apiURL + "/api/v1/config/outputs/" + name) if err != nil { return err } defer resp.Body.Close() tb, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK { return fmt.Errorf("failed to get output, status code: %d: %s", resp.StatusCode, string(tb)) } // Parse the response as a map output := make(map[string]any) err = json.Unmarshal(tb, &output) if err != nil { return err } // Display as vertical table (key-value pairs) table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"PARAM", "VALUE"}) table.SetAutoWrapText(false) table.SetAutoFormatHeaders(false) table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetCenterSeparator("") table.SetColumnSeparator(":") table.SetRowSeparator("") table.SetHeaderLine(false) table.SetBorder(false) table.SetTablePadding("\t") table.SetNoWhiteSpace(true) table.SetColumnAlignment([]int{tablewriter.ALIGN_RIGHT, tablewriter.ALIGN_LEFT}) data := tableFormatOutputVertical(output) table.AppendBulk(data) table.Render() return nil }, } cmd.Flags().StringP("name", "n", "", "output name") return cmd } func newCollectorOutputsSetCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "set", Aliases: []string{"create", "cr"}, Short: "set an output", RunE: func(cmd *cobra.Command, args []string) error { inputConfig, err := cmd.Flags().GetString("input") if err != nil { return err } if inputConfig == "" { return fmt.Errorf("input file is required") } b, err := os.ReadFile(inputConfig) if err != nil { return err } var outputConfig map[string]interface{} err = json.Unmarshal(b, &outputConfig) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } resp, err := client.Post(apiURL+"/api/v1/config/outputs", "application/json", bytes.NewBuffer(b)) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { tb, _ := io.ReadAll(resp.Body) return fmt.Errorf("failed to create output, status code: %d: %s", resp.StatusCode, string(tb)) } outputName := formatValue(outputConfig["name"]) fmt.Fprintf(os.Stderr, "Output '%s' created successfully\n", outputName) return nil }, } cmd.Flags().StringP("input", "i", "", "output config file") return cmd } func newCollectorOutputsDeleteCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "delete", Aliases: []string{"d", "del", "rm"}, Short: "delete an output", RunE: func(cmd *cobra.Command, args []string) error { name, err := cmd.Flags().GetString("name") if err != nil { return err } if name == "" { return fmt.Errorf("output name is required") } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } req, err := http.NewRequest(http.MethodDelete, apiURL+"/api/v1/config/outputs/"+name, nil) if err != nil { return err } resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { tb, _ := io.ReadAll(resp.Body) return fmt.Errorf("failed to delete output, status code: %d: %s", resp.StatusCode, string(tb)) } fmt.Fprintln(os.Stderr, "Output deleted successfully") return nil }, } cmd.Flags().StringP("name", "n", "", "output name") return cmd } // tableFormatOutputVertical formats a single output as vertical table (key-value pairs) func tableFormatOutputVertical(output map[string]any) [][]string { data := make([][]string, 0) // Sort keys for consistent output keys := make([]string, 0, len(output)) for k := range output { keys = append(keys, k) } sort.Strings(keys) // Add each key-value pair for _, key := range keys { value := output[key] formattedValue := formatValue(value) data = append(data, []string{key, formattedValue}) } return data } // tableFormatOutputsList formats multiple outputs as horizontal table (summary view) func tableFormatOutputsList(outputs []map[string]any) [][]string { data := make([][]string, 0, len(outputs)) for _, output := range outputs { name := formatValue(output["name"]) outputType := formatValue(output["type"]) format := formatValue(output["format"]) // Handle event-processors eventProcessors := "-" if ep, ok := output["event-processors"]; ok { eventProcessors = formatValueShort(ep) } data = append(data, []string{ name, outputType, format, eventProcessors, }) } // Sort by name sort.Slice(data, func(i, j int) bool { return data[i][0] < data[j][0] }) return data } ================================================ FILE: pkg/cmd/collector/processors.go ================================================ package collector import ( "encoding/json" "fmt" "io" "net/http" "os" "github.com/hairyhenderson/yaml" "github.com/olekukonko/tablewriter" "github.com/openconfig/gnmic/pkg/app" apiserver "github.com/openconfig/gnmic/pkg/collector/api/server" "github.com/spf13/cobra" ) func newCollectorProcessorsCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "processors", Aliases: []string{"processor", "proc"}, Short: "manage processors", SilenceUsage: true, } cmd.AddCommand( newCollectorProcessorsListCmd(gApp), newCollectorProcessorsGetCmd(gApp), newCollectorProcessorsSetCmd(gApp), newCollectorProcessorsDeleteCmd(gApp), ) return cmd } func newCollectorProcessorsListCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "list", Aliases: []string{"ls"}, Short: "list processors", RunE: func(cmd *cobra.Command, args []string) error { detailsFlag, err := cmd.Flags().GetBool("details") if err != nil { return err } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } resp, err := client.Get(apiURL + "/api/v1/config/processors") if err != nil { return err } defer resp.Body.Close() tb, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK { return fmt.Errorf("failed to list processors, status code: %d: %s", resp.StatusCode, string(tb)) } processors := make([]apiserver.ProcessorConfigResponse, 0) err = json.Unmarshal(tb, &processors) if err != nil { return err } table := tablewriter.NewWriter(os.Stdout) if detailsFlag { table.SetHeader([]string{"Name", "Type", "Config"}) } else { table.SetHeader([]string{"Name", "Type"}) } table.SetAutoWrapText(false) table.SetAutoFormatHeaders(true) table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetCenterSeparator("") table.SetColumnSeparator("") table.SetRowSeparator("") table.SetHeaderLine(false) table.SetBorder(false) table.SetTablePadding("\t") table.SetNoWhiteSpace(true) data := tableFormatProcessorsList(processors, detailsFlag) table.AppendBulk(data) table.Render() return nil }, } cmd.Flags().BoolP("details", "", false, "show processors details") return cmd } func newCollectorProcessorsGetCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "get", Aliases: []string{"get"}, Short: "get a processor", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { name, err := cmd.Flags().GetString("name") if err != nil { return err } if name == "" { return fmt.Errorf("processor name is required") } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } resp, err := client.Get(apiURL + "/api/v1/config/processors/" + name) if err != nil { return err } defer resp.Body.Close() processorBytes, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK { return fmt.Errorf("failed to get processor, status code: %d: %s", resp.StatusCode, string(processorBytes)) } processor := new(apiserver.ProcessorConfigResponse) err = json.Unmarshal(processorBytes, processor) if err != nil { return err } table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Name", "Type", "Config"}) table.SetAutoWrapText(false) table.SetAutoFormatHeaders(true) table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetCenterSeparator("") table.SetColumnSeparator("") table.SetRowSeparator("") table.SetHeaderLine(false) table.SetBorder(false) table.SetTablePadding("\t") table.SetNoWhiteSpace(true) table.SetColumnAlignment([]int{tablewriter.ALIGN_RIGHT, tablewriter.ALIGN_LEFT}) data := tableFormatProcessorVertical(*processor) table.AppendBulk(data) table.Render() return nil }, } cmd.Flags().StringP("name", "n", "", "processor name") cmd.MarkFlagRequired("name") return cmd } func newCollectorProcessorsSetCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "set", Aliases: []string{"set", "create", "cr"}, Short: "set a processor", SilenceUsage: true, } return cmd } func newCollectorProcessorsDeleteCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "delete", Aliases: []string{"delete"}, Short: "delete a processor", SilenceUsage: true, } return cmd } func tableFormatProcessorsList(processors []apiserver.ProcessorConfigResponse, detailsFlag bool) [][]string { data := make([][]string, 0, len(processors)) for _, processor := range processors { if detailsFlag { data = append(data, []string{processor.Name, processor.Type, formatProcessorConfig(processor.Config)}) } else { data = append(data, []string{processor.Name, processor.Type}) } } return data } func tableFormatProcessorVertical(processor apiserver.ProcessorConfigResponse) [][]string { data := make([][]string, 0, 1) data = append(data, []string{processor.Name, processor.Type, formatProcessorConfig(processor.Config)}) return data } func formatProcessorConfig(config any) string { b, err := yaml.Marshal(config) if err != nil { return "" } return string(b) } ================================================ FILE: pkg/cmd/collector/subscriptions.go ================================================ // © 2025 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package collector import ( "bytes" "encoding/json" "fmt" "io" "net/http" "os" "path/filepath" "sort" "strings" "github.com/mitchellh/mapstructure" "github.com/olekukonko/tablewriter" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/app" apiserver "github.com/openconfig/gnmic/pkg/collector/api/server" "github.com/spf13/cobra" "gopkg.in/yaml.v2" ) func newCollectorSubscriptionsCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "subscriptions", Aliases: []string{"subscription", "sub"}, Short: "manage subscriptions", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { return nil }, } cmd.AddCommand( newCollectorSubscriptionsListCmd(gApp), newCollectorSubscriptionsGetCmd(gApp), newCollectorSubscriptionsSetCmd(gApp), newCollectorSubscriptionsDeleteCmd(gApp), ) return cmd } func newCollectorSubscriptionsListCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "list", Aliases: []string{"ls"}, Short: "list subscriptions", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } resp, err := client.Get(apiURL + "/api/v1/subscriptions") if err != nil { return err } defer resp.Body.Close() tb, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK { return fmt.Errorf("failed to list subscriptions, status code: %d: %s", resp.StatusCode, string(tb)) } // Parse the response subs := make([]*apiserver.SubscriptionResponse, 0) err = json.Unmarshal(tb, &subs) if err != nil { return err } // if len(subs) == 0 { // fmt.Println("No subscriptions found") // return nil // } // Sort by name sort.Slice(subs, func(i, j int) bool { return subs[i].Name < subs[j].Name }) // Display as horizontal table table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Name", "Prefix", "Paths", "Encoding", "Mode", "Sample Interval", "Targets", "Outputs"}) table.SetAutoWrapText(false) table.SetAutoFormatHeaders(true) table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetCenterSeparator("") table.SetColumnSeparator("") table.SetRowSeparator("") table.SetHeaderLine(false) table.SetBorder(false) table.SetTablePadding("\t") table.SetNoWhiteSpace(true) data := tableFormatSubscriptionsList(subs) table.AppendBulk(data) table.Render() return nil }, } return cmd } func newCollectorSubscriptionsGetCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "get", Aliases: []string{"g", "show", "sh"}, Short: "get a subscription", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { name, err := cmd.Flags().GetString("name") if err != nil { return err } if name == "" { return fmt.Errorf("subscription name is required") } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } resp, err := client.Get(apiURL + "/api/v1/subscriptions/" + name) if err != nil { return err } defer resp.Body.Close() tb, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK { return fmt.Errorf("failed to get subscription, status code: %d: %s", resp.StatusCode, string(tb)) } // Parse the response subs := new(apiserver.SubscriptionResponse) err = json.Unmarshal(tb, subs) if err != nil { return err } // Display as vertical table (key-value pairs) table := tablewriter.NewWriter(os.Stdout) table.SetAutoWrapText(false) table.SetAutoFormatHeaders(false) table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetCenterSeparator("") table.SetColumnSeparator(":") table.SetRowSeparator("") table.SetHeaderLine(false) table.SetBorder(false) table.SetTablePadding("\t") table.SetNoWhiteSpace(true) table.SetColumnAlignment([]int{tablewriter.ALIGN_RIGHT, tablewriter.ALIGN_LEFT}) data := tableFormatSubscriptionVertical(subs) table.AppendBulk(data) table.Render() return nil }, } cmd.Flags().StringP("name", "n", "", "subscription name") return cmd } func newCollectorSubscriptionsSetCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "set", Aliases: []string{"create", "cr"}, Short: "set a subscription", RunE: func(cmd *cobra.Command, args []string) error { inputConfig, err := cmd.Flags().GetString("input") if err != nil { return err } if inputConfig == "" { return fmt.Errorf("input file is required") } subConfig, b, err := readSubscriptionConfigFromFile(inputConfig) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } resp, err := client.Post(apiURL+"/api/v1/config/subscriptions", "application/json", bytes.NewBuffer(b)) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { tb, _ := io.ReadAll(resp.Body) return fmt.Errorf("failed to create subscription, status code: %d: %s", resp.StatusCode, string(tb)) } fmt.Fprintf(os.Stderr, "Subscription '%s' created successfully\n", subConfig.Name) return nil }, } cmd.Flags().StringP("input", "i", "", "subscription config file") return cmd } func newCollectorSubscriptionsDeleteCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "delete", Aliases: []string{"d", "del", "rm"}, Short: "delete a subscription", RunE: func(cmd *cobra.Command, args []string) error { name, err := cmd.Flags().GetString("name") if err != nil { return err } if name == "" { return fmt.Errorf("subscription name is required") } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } req, err := http.NewRequest(http.MethodDelete, apiURL+"/api/v1/config/subscriptions/"+name, nil) if err != nil { return err } resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { tb, _ := io.ReadAll(resp.Body) return fmt.Errorf("failed to delete subscription, status code: %d: %s", resp.StatusCode, string(tb)) } fmt.Fprintf(os.Stderr, "Subscription '%s' deleted successfully\n", name) return nil }, } cmd.Flags().StringP("name", "n", "", "subscription name") return cmd } // formatSubscriptionMode formats the mode and stream mode func formatSubscriptionMode(sub *types.SubscriptionConfig) string { if sub.Mode == "" { sub.Mode = "STREAM" } if strings.ToLower(sub.Mode) == "stream" && sub.StreamMode == "" { sub.StreamMode = "TARGET_DEFINED" } if strings.ToLower(sub.Mode) == "stream" && sub.StreamMode != "" && len(sub.StreamSubscriptions) == 0 { return fmt.Sprintf("%s/%s", strings.ToLower(sub.Mode), strings.ToLower(sub.StreamMode)) } if sub.Mode != "" { return strings.ToLower(sub.Mode) } return "-" } func formatSubscriptionConfigVertical(prefix string, cfg *types.SubscriptionConfig) [][]string { if cfg == nil { return [][]string{} } data := [][]string{ {prefix + "Prefix", formatValue(cfg.Prefix)}, {prefix + "Target", formatValue(cfg.Target)}, {prefix + "Set Target", fmt.Sprintf("%t", cfg.SetTarget)}, {prefix + "Paths", formatValue(cfg.Paths)}, {prefix + "Encoding", formatValue(cfg.Encoding)}, {prefix + "Mode", formatSubscriptionMode(cfg)}, {prefix + "Sample Interval", formatValue(cfg.SampleInterval)}, {prefix + "Heartbeat Interval", formatValue(cfg.HeartbeatInterval)}, {prefix + "Outputs", formatValue(cfg.Outputs)}, {prefix + "Models", formatValue(cfg.Models)}, {prefix + "QoS", formatValue(cfg.Qos)}, {prefix + "Depth", formatValue(cfg.Depth)}, {prefix + "Suppress Redundant", fmt.Sprintf("%t", cfg.SuppressRedundant)}, {prefix + "Updates Only", fmt.Sprintf("%t", cfg.UpdatesOnly)}, } // History section (if present) if cfg.History != nil { if !cfg.History.Snapshot.IsZero() { data = append(data, []string{prefix + "History Snapshot", cfg.History.Snapshot.String()}) } if !cfg.History.Start.IsZero() { data = append(data, []string{prefix + "History Start", cfg.History.Start.String()}) } if !cfg.History.End.IsZero() { data = append(data, []string{prefix + "History End", cfg.History.End.String()}) } } return data } func formatStreamSubscriptionConfigVertical(prefix string, cfg *types.SubscriptionConfig) [][]string { if cfg == nil { return [][]string{} } data := [][]string{ {prefix + "Paths", formatValue(cfg.Paths)}, {prefix + "Mode", formatSubscriptionMode(cfg)}, {prefix + "Sample Interval", formatValue(cfg.SampleInterval)}, {prefix + "Heartbeat Interval", formatValue(cfg.HeartbeatInterval)}, } return data } func tableFormatSubscriptionVertical(sub *apiserver.SubscriptionResponse) [][]string { if sub.Config == nil { return [][]string{{"Name", sub.Name}} } data := [][]string{ {"Name", sub.Name}, } // Main subscription config data = append(data, formatSubscriptionConfigVertical("", sub.Config)...) // Targets (top-level only) if len(sub.Targets) > 0 { targetNames := make([]string, 0, len(sub.Targets)) for name := range sub.Targets { targetNames = append(targetNames, name) } sort.Strings(targetNames) var targetInfo []string for _, name := range targetNames { targetInfo = append(targetInfo, fmt.Sprintf("%s (%s)", name, sub.Targets[name].State)) } data = append(data, []string{"Targets", strings.Join(targetInfo, "\n")}) } // Stream Subscriptions for _, sc := range sub.Config.StreamSubscriptions { header := []string{fmt.Sprintf("\tStream Subscription: %s", sc.Name), ""} data = append(data, []string{header[0], header[1]}) // Indent keys to show hierarchy cleanly data = append(data, formatStreamSubscriptionConfigVertical(" ", sc)...) } return data } // tableFormatSubscriptionsList formats multiple subscriptions as horizontal table (summary view) func tableFormatSubscriptionsList(subs []*apiserver.SubscriptionResponse) [][]string { data := make([][]string, 0, len(subs)) for _, sub := range subs { if sub.Config == nil { continue } // Add main subscription row data = append(data, formatSubscriptionRow( sub.Name, sub.Config, sub.Targets, )) // Add stream-subscriptions (children) for i, sc := range sub.Config.StreamSubscriptions { // Indent child name for visual grouping childName := "↳" + fmt.Sprintf("[%d]", i) + sub.Name data = append(data, formatSubscriptionRow( childName, sc, nil, // children have no targets )) } } return data } func formatSubscriptionRow( name string, cfg *types.SubscriptionConfig, targets map[string]*apiserver.TargetStateInfo, ) []string { // Paths paths := "-" if len(cfg.Paths) > 0 { paths = strings.Join(cfg.Paths, "\n") } // Targets summary targetsStr := "-" if len(targets) > 0 { names := make([]string, 0, len(targets)) for n := range targets { names = append(names, n) } sort.Strings(names) running, disabled := 0, 0 for _, n := range names { if targets[n].State == "running" { running++ } else { disabled++ } } targetsStr = fmt.Sprintf("%d/%d", running, len(targets)) if disabled > 0 { targetsStr += fmt.Sprintf(" (%d disabled)", disabled) } } return []string{ name, formatValue(cfg.Prefix), paths, formatSubscriptionEncoding(cfg.Encoding), formatSubscriptionMode(cfg), formatValue(cfg.SampleInterval), targetsStr, formatValueShort(cfg.Outputs), } } func readSubscriptionConfigFromFile(filename string) (*types.SubscriptionConfig, []byte, error) { b, err := os.ReadFile(filename) if err != nil { return nil, nil, err } cfg := make(map[string]any) switch strings.ToLower(filepath.Ext(filename)) { case ".json": err = json.Unmarshal(b, &cfg) case ".yaml", ".yml": err = yaml.Unmarshal(b, &cfg) default: return nil, nil, fmt.Errorf("unsupported file type: %s", filepath.Ext(filename)) } if err != nil { return nil, nil, err } subConfig := new(types.SubscriptionConfig) decoder, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), Result: subConfig, }) if err != nil { return nil, nil, err } err = decoder.Decode(cfg) if err != nil { return nil, nil, err } return subConfig, b, nil } func formatSubscriptionEncoding(encoding *string) string { if encoding == nil { return "json" } if *encoding == "" { return "json" } return formatValue(*encoding) } ================================================ FILE: pkg/cmd/collector/targets.go ================================================ // © 2025 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package collector import ( "bytes" "encoding/json" "fmt" "io" "net/http" "os" "path/filepath" "sort" "strings" "github.com/hairyhenderson/yaml" "github.com/mitchellh/mapstructure" "github.com/olekukonko/tablewriter" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/app" apiserver "github.com/openconfig/gnmic/pkg/collector/api/server" "github.com/spf13/cobra" ) func newCollectorTargetsCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "targets", Aliases: []string{"target", "tg"}, Short: "manage targets", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { return nil }, } cmd.AddCommand(newCollectorTargetsListCmd(gApp)) cmd.AddCommand(newCollectorTargetsGetCmd(gApp)) cmd.AddCommand(newCollectorTargetsSetCmd(gApp)) cmd.AddCommand(newCollectorTargetsDeleteCmd(gApp)) return cmd } func newCollectorTargetsListCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "list", Aliases: []string{"ls"}, Short: "list targets", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } resp, err := client.Get(apiURL + "/api/v1/targets") if err != nil { return err } defer resp.Body.Close() tb, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK { return fmt.Errorf("failed to list targets, status code: %d: %s", resp.StatusCode, string(tb)) } // Parse the response tc := make([]*apiserver.TargetResponse, 0) err = json.Unmarshal(tb, &tc) if err != nil { return err } // Display as horizontal table table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Name", "Address", "Username", "State", "Subscriptions", "Outputs", "Insecure", "Skip Verify"}) table.SetAutoWrapText(false) table.SetAutoFormatHeaders(true) table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetCenterSeparator("") table.SetColumnSeparator("") table.SetRowSeparator("") table.SetHeaderLine(false) table.SetBorder(false) table.SetTablePadding("\t") table.SetNoWhiteSpace(true) data := tableFormatTargetsList(tc) table.AppendBulk(data) table.Render() return nil }, } return cmd } func newCollectorTargetsGetCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "get", Aliases: []string{"g", "show", "sh"}, Short: "get a target", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { name, err := cmd.Flags().GetString("name") if err != nil { return err } if name == "" { return fmt.Errorf("target name is required") } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } resp, err := client.Get(apiURL + "/api/v1/targets/" + name) if err != nil { return err } defer resp.Body.Close() tb, err := io.ReadAll(resp.Body) if err != nil { return err } if resp.StatusCode != http.StatusOK { return fmt.Errorf("failed to get target, status code: %d: %s", resp.StatusCode, string(tb)) } tc := make([]*apiserver.TargetResponse, 0) err = json.Unmarshal(tb, &tc) if err != nil { return err } if len(tc) == 0 { return fmt.Errorf("no targets found") } // Display as vertical table (key-value pairs) table := tablewriter.NewWriter(os.Stdout) table.SetAutoWrapText(false) table.SetAutoFormatHeaders(false) table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetCenterSeparator("") table.SetColumnSeparator(":") table.SetRowSeparator("") table.SetHeaderLine(false) table.SetBorder(false) table.SetTablePadding("\t") table.SetNoWhiteSpace(true) table.SetColumnAlignment([]int{tablewriter.ALIGN_RIGHT, tablewriter.ALIGN_LEFT}) data := tableFormatTargetVertical(tc[0]) table.AppendBulk(data) table.Render() return nil }, } cmd.Flags().StringP("name", "n", "", "target name") return cmd } func newCollectorTargetsSetCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "set", Aliases: []string{"create", "cr"}, Short: "set a target", SilenceUsage: true, RunE: func(cmd *cobra.Command, args []string) error { inputConfig, err := cmd.Flags().GetString("input") if err != nil { return err } targetConfig, b, err := readTargetConfigFromFile(inputConfig) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } resp, err := client.Post(apiURL+"/api/v1/config/targets", "application/json", bytes.NewBuffer(b)) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { tb, _ := io.ReadAll(resp.Body) return fmt.Errorf("failed to create target, status code: %d: %s", resp.StatusCode, string(tb)) } fmt.Fprintf(os.Stderr, "Target '%s' created successfully\n", targetConfig.Name) return nil }, } cmd.Flags().StringP("input", "i", "", "target file input") return cmd } func newCollectorTargetsDeleteCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "delete", Aliases: []string{"d", "del", "rm"}, Short: "delete a target", RunE: func(cmd *cobra.Command, args []string) error { name, err := cmd.Flags().GetString("name") if err != nil { return err } if name == "" { return fmt.Errorf("target name is required") } apiURL, err := getAPIServerURL(gApp.Store) if err != nil { return err } client, err := getAPIServerClient(gApp.Store) if err != nil { return err } req, err := http.NewRequest(http.MethodDelete, apiURL+"/api/v1/config/targets/"+name, nil) if err != nil { return err } resp, err := client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { tb, _ := io.ReadAll(resp.Body) return fmt.Errorf("failed to delete target, status code: %d: %s", resp.StatusCode, string(tb)) } fmt.Fprintf(os.Stderr, "Target '%s' deleted successfully\n", name) return nil }, } cmd.Flags().StringP("name", "n", "", "target name") return cmd } // formatValue formats any value based on its type for table display func formatValue(v any) string { if v == nil { return "-" } switch val := v.(type) { case *string: if val == nil { return "-" } if *val == "" { return "-" } return *val case string: if val == "" { return "-" } return val case *bool: if val == nil { return "-" } return fmt.Sprintf("%t", *val) case bool: return fmt.Sprintf("%t", val) case *int: if val == nil { return "-" } return fmt.Sprintf("%d", *val) case int: if val == 0 { return "-" } return fmt.Sprintf("%d", val) case uint: if val == 0 { return "-" } return fmt.Sprintf("%d", val) case []string: if len(val) == 0 { return "-" } return strings.Join(val, ", ") case map[string]string: if len(val) == 0 { return "-" } var parts []string for k, v := range val { parts = append(parts, fmt.Sprintf("%s=%s", k, v)) } sort.Strings(parts) return strings.Join(parts, ", ") default: str := fmt.Sprintf("%v", val) if str == "" || str == "0s" || str == "" { return "-" } return str } } // formatValueShort formats value for list view (shorter version) func formatValueShort(v any) string { if v == nil { return "-" } switch val := v.(type) { case []string: if len(val) == 0 { return "-" } return fmt.Sprintf("%d", len(val)) case map[string]string: if len(val) == 0 { return "-" } return fmt.Sprintf("%d", len(val)) default: return formatValue(val) } } // tableFormatTargetVertical formats a single target as vertical table (key-value pairs) func tableFormatTargetVertical(target *apiserver.TargetResponse) [][]string { cfg := target.Config data := [][]string{ {"Name", target.Name}, {"State", target.State.State}, {"Address", formatValue(cfg.Address)}, {"Username", formatValue(cfg.Username)}, {"Password", formatValue(cfg.Password)}, {"Auth Scheme", formatValue(cfg.AuthScheme)}, {"Timeout", formatValue(cfg.Timeout)}, {"Insecure", formatValue(cfg.Insecure)}, {"Skip Verify", formatValue(cfg.SkipVerify)}, {"TLS CA", formatValue(cfg.TLSCA)}, {"TLS Cert", formatValue(cfg.TLSCert)}, {"TLS Key", formatValue(cfg.TLSKey)}, {"TLS Server Name", formatValue(cfg.TLSServerName)}, {"TLS Min Version", formatValue(cfg.TLSMinVersion)}, {"TLS Max Version", formatValue(cfg.TLSMaxVersion)}, {"TLS Version", formatValue(cfg.TLSVersion)}, {"Log TLS Secret", formatValue(cfg.LogTLSSecret)}, {"Subscriptions", formatValue(target.State.Subscriptions)}, {"Outputs", formatValue(cfg.Outputs)}, {"Buffer Size", formatValue(cfg.BufferSize)}, {"Retry Timer", formatValue(cfg.RetryTimer)}, {"Token", formatValue(cfg.Token)}, {"Proxy", formatValue(cfg.Proxy)}, {"Encoding", formatValue(cfg.Encoding)}, {"Tags", formatValue(cfg.Tags)}, {"Event Tags", formatValue(cfg.EventTags)}, {"Metadata", formatValue(cfg.Metadata)}, {"Gzip", formatValue(cfg.Gzip)}, {"Proto Files", formatValue(cfg.ProtoFiles)}, {"Proto Dirs", formatValue(cfg.ProtoDirs)}, {"Cipher Suites", formatValue(cfg.CipherSuites)}, {"TCP Keepalive", formatValue(cfg.TCPKeepalive)}, {"GRPC Keepalive", formatValue(cfg.GRPCKeepalive)}, {"Tunnel Target Type", formatValue(cfg.TunnelTargetType)}, } return data } // tableFormatTargetsList formats multiple targets as horizontal table (summary view) func tableFormatTargetsList(targets []*apiserver.TargetResponse) [][]string { data := make([][]string, 0, len(targets)) for _, target := range targets { data = append(data, []string{ target.Name, formatValue(target.Config.Address), formatValue(target.Config.Username), target.State.State, formatValueShort(target.State.Subscriptions), formatValueShort(target.Config.Outputs), formatValue(target.Config.Insecure), formatValue(target.Config.SkipVerify), }) } // Sort by name sort.Slice(data, func(i, j int) bool { return data[i][0] < data[j][0] }) return data } func readTargetConfigFromFile(filename string) (*types.TargetConfig, []byte, error) { b, err := os.ReadFile(filename) if err != nil { return nil, nil, err } cfg := make(map[string]any) switch strings.ToLower(filepath.Ext(filename)) { case ".json": err = json.Unmarshal(b, &cfg) case ".yaml", ".yml": err = yaml.Unmarshal(b, &cfg) default: return nil, nil, fmt.Errorf("unsupported file type: %s", filepath.Ext(filename)) } if err != nil { return nil, nil, err } targetConfig := new(types.TargetConfig) decoder, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), Result: targetConfig, }) if err != nil { return nil, nil, err } err = decoder.Decode(cfg) if err != nil { return nil, nil, err } return targetConfig, b, nil } ================================================ FILE: pkg/cmd/completion.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package cmd import ( "os" "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // newCompletionCmd creates completion command tree. func newCompletionCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "completion [bash|zsh|fish]", Short: "generate completion script", SilenceUsage: true, Long: `To load completions:, Bash: $ source <(gnmic completion bash) # To load completions for each session, execute once: # Linux: $ gnmic completion bash > /etc/bash_completion.d/gnmic # macOS: $ gnmic completion bash > /usr/local/etc/bash_completion.d/gnmic Zsh: # If shell completion is not already enabled in your environment, # you will need to enable it. You can execute the following once: $ echo "autoload -U compinit; compinit" >> ~/.zshrc # To load completions for each session, execute once: $ gnmic completion zsh > "${fpath[1]}/gnmic" # You will need to start a new shell for this setup to take effect. fish: $ gnmic completion fish | source # To load completions for each session, execute once: $ gnmic completion fish > ~/.config/fish/completions/gnmic.fish `, DisableFlagsInUseLine: true, ValidArgs: []string{"bash", "zsh", "fish"}, Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), Run: func(cmd *cobra.Command, args []string) { switch args[0] { case "bash": cmd.Root().GenBashCompletion(os.Stdout) case "zsh": cmd.Root().GenZshCompletion(os.Stdout) case "fish": cmd.Root().GenFishCompletion(os.Stdout, true) } }, } return cmd } ================================================ FILE: pkg/cmd/diff/diff.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package diff import ( "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // diffCmd represents the diff command func New(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "diff", Aliases: []string{"compare"}, Short: "run a diff comparison between targets", PreRunE: gApp.DiffPreRunE, RunE: gApp.DiffRunE, SilenceUsage: true, } gApp.InitDiffFlags(cmd) cmd.AddCommand(newDiffSetRequestCmd(gApp)) cmd.AddCommand(newDiffSetToNotifsCmd(gApp)) return cmd } // newDiffSetRequestCmd creates a new diff setrequest command. func newDiffSetRequestCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "setrequest", Short: "run a diff comparison between two setrequests in textproto format", RunE: gApp.DiffSetRequestRunE, SilenceUsage: true, } gApp.InitDiffSetRequestFlags(cmd) return cmd } func newDiffSetToNotifsCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "set-to-notifs", Short: "run a diff comparison between a SetRequest and a GetResponse or SubscribeResponse stream stored in textproto format", RunE: gApp.DiffSetToNotifsRunE, SilenceUsage: true, } gApp.InitDiffSetToNotifsFlags(cmd) return cmd } ================================================ FILE: pkg/cmd/generate/generate.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package generate import ( "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // generateCmd represents the generate command func New(gApp *app.App) *cobra.Command { genCmd := &cobra.Command{ Use: "generate", Aliases: []string{"gen"}, Short: "generate paths or JSON/YAML objects from YANG", PersistentPreRunE: gApp.GeneratePreRunE, RunE: gApp.GenerateRunE, SilenceUsage: true, } genCmd.AddCommand(newGenerateSetRequestCmd(gApp)) genCmd.AddCommand(newGeneratePathCmd(gApp)) gApp.InitGenerateFlags(genCmd) return genCmd } ================================================ FILE: pkg/cmd/generate/generatePath.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package generate import ( "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // newGeneratePathCmd represents the generate path command func newGeneratePathCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "path", Short: "generate xpath(s) from yang models", PreRunE: gApp.GeneratePathPreRunE, RunE: gApp.GeneratePathRunE, SilenceUsage: true, } gApp.InitGeneratePathFlags(cmd) return cmd } ================================================ FILE: pkg/cmd/generate/generateSetRequest.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package generate import ( "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // newGenerateSetRequestCmd represents the generate set-request command func newGenerateSetRequestCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "set-request", Aliases: []string{"sr", "sreq", "srq"}, Short: "generate Set Request file", PreRunE: func(cmd *cobra.Command, _ []string) error { gApp.Config.SetLocalFlagsFromFile(cmd) return nil }, RunE: gApp.GenerateSetRequestRunE, SilenceUsage: true, } gApp.InitGenerateSetRequestFlags(cmd) return cmd } ================================================ FILE: pkg/cmd/get/get.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package get import ( "github.com/spf13/cobra" "github.com/openconfig/gnmic/pkg/app" ) var DataType = [][2]string{ {"all", "all config/state/operational data"}, {"config", "data that the target considers to be read/write"}, {"state", "read-only data on the target"}, {"operational", "read-only data on the target that is related to software processes operating on the device, or external interactions of the device"}, } // getCmd represents the get command func New(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "get", Short: "run gnmi get on targets", Annotations: map[string]string{ "--path": "XPATH", "--prefix": "PREFIX", "--model": "MODEL", "--type": "STORE", }, PreRunE: gApp.GetPreRunE, RunE: gApp.GetRun, PostRun: func(cmd *cobra.Command, args []string) { gApp.CleanupPlugins() }, SilenceUsage: true, } gApp.InitGetFlags(cmd) return cmd } ================================================ FILE: pkg/cmd/getset/getset.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package getset import ( "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // getCmd represents the get command func New(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "getset", Aliases: []string{"gas", "gs"}, Short: "run gnmi get then set on targets", Annotations: map[string]string{ "--get": "XPATH", "--prefix": "PREFIX", "--type": "STORE", }, PreRunE: gApp.GetSetPreRunE, RunE: gApp.GetSetRunE, SilenceUsage: true, } gApp.InitGetSetFlags(cmd) return cmd } ================================================ FILE: pkg/cmd/listener/listener.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package listener import ( "context" "encoding/json" "fmt" "io" "net" "net/http" "os" "github.com/fullstorydev/grpcurl" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/jhump/protoreflect/desc" "github.com/jhump/protoreflect/dynamic" nokiasros "github.com/karimra/sros-dialout" "github.com/openconfig/gnmi/proto/gnmi" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/cobra" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/app" "github.com/openconfig/gnmic/pkg/outputs" ) // New returns the listen command tree. func New(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "listen", Short: "listens for telemetry dialout updates from the node", PreRunE: func(cmd *cobra.Command, _ []string) error { gApp.Config.SetLocalFlagsFromFile(cmd) if len(gApp.Config.Address) == 0 { return fmt.Errorf("no address specified") } if len(gApp.Config.Address) > 1 { fmt.Fprintf(os.Stderr, "multiple addresses specified, listening only on %s\n", gApp.Config.Address[0]) } return nil }, RunE: func(cmd *cobra.Command, _ []string) error { ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() server := new(dialoutTelemetryServer) server.ctx = ctx opts := []grpc.ServerOption{ grpc.MaxConcurrentStreams(gApp.Config.LocalFlags.ListenMaxConcurrentStreams), } if gApp.Config.MaxMsgSize > 0 { opts = append(opts, grpc.MaxRecvMsgSize(gApp.Config.MaxMsgSize)) } if gApp.Config.LocalFlags.ListenPrometheusAddress != "" { server.reg = prometheus.NewRegistry() grpcMetrics := grpc_prometheus.NewServerMetrics() opts = append(opts, grpc.StreamInterceptor(grpcMetrics.StreamServerInterceptor()), ) server.reg.MustRegister(grpcMetrics) } if len(gApp.Config.ProtoFile) > 0 { gApp.Logger.Printf("loading proto files...") descSource, err := grpcurl.DescriptorSourceFromProtoFiles(gApp.Config.ProtoDir, gApp.Config.ProtoFile...) if err != nil { gApp.Logger.Printf("failed to load proto files: %v", err) return err } server.rootDesc, err = descSource.FindSymbol("Nokia.SROS.root") if err != nil { gApp.Logger.Printf("could not get symbol 'Nokia.SROS.root': %v", err) return err } gApp.Logger.Printf("loaded proto files") } server.Outputs = make(map[string]outputs.Output) outCfgs, err := gApp.Config.GetOutputs() if err != nil { return err } for name, outConf := range outCfgs { if outType, ok := outConf["type"]; ok { if initializer, ok := outputs.Outputs[outType.(string)]; ok { out := initializer() go out.Init(ctx, name, outConf, outputs.WithLogger(gApp.Logger), outputs.WithName(gApp.Config.InstanceName), outputs.WithClusterName(gApp.Config.ClusterName), outputs.WithRegistry(server.reg), outputs.WithConfigStore(gApp.Store), ) server.Outputs[name] = out } } } defer func() { for _, o := range server.Outputs { o.Close() } }() server.listener, err = net.Listen("tcp", gApp.Config.Address[0]) if err != nil { return err } gApp.Logger.Printf("waiting for connections on %s", gApp.Config.Address[0]) if gApp.Config.TLSKey != "" && gApp.Config.TLSCert != "" { tlsConfig, err := utils.NewTLSConfig( gApp.Config.TLSCa, gApp.Config.TLSCert, gApp.Config.TLSKey, "request", false, true, ) if err != nil { return err } opts = append(opts, grpc.Creds(credentials.NewTLS(tlsConfig))) } server.grpcServer = grpc.NewServer(opts...) nokiasros.RegisterDialoutTelemetryServer(server.grpcServer, server) if gApp.Config.LocalFlags.ListenPrometheusAddress != "" { grpc_prometheus.Register(server.grpcServer) httpServer := &http.Server{ Handler: promhttp.HandlerFor(server.reg, promhttp.HandlerOpts{}), Addr: gApp.Config.LocalFlags.ListenPrometheusAddress, } go func() { if err := httpServer.ListenAndServe(); err != nil { gApp.Logger.Printf("Unable to start prometheus http server.") } }() defer httpServer.Close() } server.gApp = gApp server.grpcServer.Serve(server.listener) defer server.grpcServer.Stop() return nil }, SilenceUsage: true, } cmd.Flags().Uint32P("max-concurrent-streams", "", 256, "max concurrent streams gnmic can receive per transport") cmd.Flags().StringP("prometheus-address", "", "", "prometheus server address") gApp.Config.FileConfig.BindPFlag("listen-max-concurrent-streams", cmd.LocalFlags().Lookup("max-concurrent-streams")) gApp.Config.FileConfig.BindPFlag("listen-prometheus-address", cmd.LocalFlags().Lookup("prometheus-address")) return cmd } type dialoutTelemetryServer struct { listener net.Listener grpcServer *grpc.Server rootDesc desc.Descriptor Outputs map[string]outputs.Output ctx context.Context gApp *app.App reg *prometheus.Registry } func (s *dialoutTelemetryServer) Publish(stream nokiasros.DialoutTelemetry_PublishServer) error { peer, ok := peer.FromContext(stream.Context()) if ok && s.gApp.Config.Debug { b, err := json.Marshal(peer) if err != nil { s.gApp.Logger.Printf("failed to marshal peer data: %v", err) } else { s.gApp.Logger.Printf("received Publish RPC from peer=%s", string(b)) } } md, ok := metadata.FromIncomingContext(stream.Context()) if ok && s.gApp.Config.Debug { b, err := json.Marshal(md) if err != nil { s.gApp.Logger.Printf("failed to marshal context metadata: %v", err) } else { s.gApp.Logger.Printf("received http2_header=%s", string(b)) } } outMeta := outputs.Meta{} if sn, ok := md["subscription-name"]; ok { if len(sn) > 0 { outMeta["subscription-name"] = sn[0] } } else { s.gApp.Logger.Println("could not find subscription-name in http2 headers") } outMeta["source"] = peer.Addr.String() if systemName, ok := md["system-name"]; ok { if len(systemName) > 0 { outMeta["system-name"] = systemName[0] } } else { s.gApp.Logger.Println("could not find system-name in http2 headers") } for { subResp, err := stream.Recv() if err != nil { if err != io.EOF { s.gApp.Logger.Printf("gRPC dialout receive error: %v", err) } break } err = stream.Send(&nokiasros.PublishResponse{}) if err != nil { s.gApp.Logger.Printf("error sending publish response to server: %v", err) } switch resp := subResp.Response.(type) { case *gnmi.SubscribeResponse_Update: if s.rootDesc != nil { for _, update := range resp.Update.Update { switch update.Val.Value.(type) { case *gnmi.TypedValue_ProtoBytes: m := dynamic.NewMessage(s.rootDesc.GetFile().FindMessage("Nokia.SROS.root")) err := m.Unmarshal(update.Val.GetProtoBytes()) if err != nil { s.gApp.Logger.Printf("failed to unmarshal m: %v", err) } jsondata, err := m.MarshalJSON() if err != nil { s.gApp.Logger.Printf("failed to marshal dynamic proto msg: %v", err) continue } if s.gApp.Config.Debug { s.gApp.Logger.Printf("json format=%s", string(jsondata)) } update.Val.Value = &gnmi.TypedValue_JsonVal{JsonVal: jsondata} } } } for _, o := range s.Outputs { go o.Write(s.ctx, subResp, outMeta) } case *gnmi.SubscribeResponse_SyncResponse: s.gApp.Logger.Printf("received sync response=%+v from %s", resp.SyncResponse, outMeta["source"]) } } return nil } ================================================ FILE: pkg/cmd/path/path.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package path import ( "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // New creates the path command tree. func New(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "path", Short: "generate gnmi or xpath style from yang file", Annotations: map[string]string{ "--file": "YANG", "--dir": "DIR", }, PreRunE: gApp.PathPreRunE, RunE: gApp.PathRunE, PostRun: func(cmd *cobra.Command, _ []string) { cmd.ResetFlags() gApp.InitPathFlags(cmd) }, SilenceUsage: true, } gApp.InitPathFlags(cmd) return cmd } ================================================ FILE: pkg/cmd/processor/processor.go ================================================ package processor import ( "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // processorCmd represents the processor command func New(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "processor", Aliases: []string{"proc"}, Short: "apply a list of processors", PreRunE: gApp.ProcessorPreRunE, RunE: gApp.ProcessorRunE, PostRun: func(cmd *cobra.Command, args []string) { gApp.CleanupPlugins() }, SilenceUsage: true, } gApp.InitProcessorFlags(cmd) return cmd } ================================================ FILE: pkg/cmd/prompt.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package cmd import ( "errors" "fmt" "os" "sort" "strings" "unicode" goprompt "github.com/c-bata/go-prompt" "github.com/c-bata/go-prompt/completer" homedir "github.com/mitchellh/go-homedir" "github.com/nsf/termbox-go" "github.com/olekukonko/tablewriter" "github.com/openconfig/goyang/pkg/yang" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/cmd/get" "github.com/openconfig/gnmic/pkg/cmd/subscribe" ) var colorMapping = map[string]goprompt.Color{ "black": goprompt.Black, "dark_red": goprompt.DarkRed, "dark_green": goprompt.DarkGreen, "brown": goprompt.Brown, "dark_blue": goprompt.DarkBlue, "purple": goprompt.Purple, "cyan": goprompt.Cyan, "light_gray": goprompt.LightGray, "dark_gray": goprompt.DarkGray, "red": goprompt.Red, "green": goprompt.Green, "yellow": goprompt.Yellow, "blue": goprompt.Blue, "fuchsia": goprompt.Fuchsia, "turquoise": goprompt.Turquoise, "white": goprompt.White, } var targetListHeader = []string{ "Name", "Address", "Username", "Password", "Insecure", "Skip Verify", "TLS CA", "TLS Certificate", "TLS Key"} var subscriptionListHeader = []string{"Name", "Mode", "Prefix", "Paths", "Interval", "Encoding"} func getColor(flagName string) goprompt.Color { switch flagName { case "prefix-color": if cgoprompt, ok := colorMapping[gApp.Config.LocalFlags.PromptPrefixColor]; ok { return cgoprompt } case "suggestions-bg-color": if cgoprompt, ok := colorMapping[gApp.Config.LocalFlags.PromptSuggestionsBGColor]; ok { return cgoprompt } case "description-bg-color": if cgoprompt, ok := colorMapping[gApp.Config.LocalFlags.PromptDescriptionBGColor]; ok { return cgoprompt } } defColor := "yellow" promptModeCmd.Flags().VisitAll( func(f *pflag.Flag) { if f.Name == flagName { defColor = f.DefValue return } }, ) return colorMapping[defColor] } var promptModeCmd *cobra.Command func newPromptCmd() *cobra.Command { promptModeCmd = &cobra.Command{ Use: "prompt", Short: "enter the interactive gnmic prompt mode", PreRunE: gApp.PromptPreRunE, RunE: gApp.PromptRunE, PostRun: func(cmd *cobra.Command, _ []string) { cmd.ResetFlags() //initPromptFlags(cmd) }, SilenceUsage: true, } gApp.InitPromptFlags(promptModeCmd) return promptModeCmd } var promptQuitCmd = &cobra.Command{ Use: "quit", Short: "quit the gnmic-prompt", Run: func(_ *cobra.Command, _ []string) { // cancel gctx gApp.Cfn() // save history home, err := homedir.Dir() if err != nil { os.Exit(0) } f, err := os.Create(home + "/.gnmic.history") if err != nil { os.Exit(0) } l := len(gApp.PromptHistory) if l > 128 { gApp.PromptHistory = gApp.PromptHistory[l-128:] } for i := range gApp.PromptHistory { f.WriteString(gApp.PromptHistory[i] + "\n") } f.Close() os.Exit(0) }, } var targetCmd = &cobra.Command{ Use: "target", Short: "manipulate configured targets", } var targetListCmd = &cobra.Command{ Use: "list", Short: "list configured targets", RunE: func(_ *cobra.Command, _ []string) error { targetsConfig, err := gApp.Config.GetTargets() if err != nil { return err } tabData := targetTable(targetsConfig, true) renderTable(tabData, targetListHeader) return nil }, PostRun: func(_ *cobra.Command, _ []string) { name = "" }, } var targetShowCmd = &cobra.Command{ Use: "show", Short: "show a target details", Annotations: map[string]string{ "--name": "TARGET", }, RunE: func(_ *cobra.Command, _ []string) error { if name == "" { fmt.Println("provide a target name with --name") return nil } targetsConfig, err := gApp.Config.GetTargets() if err != nil { return err } if tc, ok := targetsConfig[name]; ok { tabData := targetTable(map[string]*types.TargetConfig{name: tc}, false) renderTable(tabData, []string{"Param", "Value"}) return nil } return errors.New("unknown target") }, PostRun: func(_ *cobra.Command, _ []string) { name = "" }, } var subscriptionCmd = &cobra.Command{ Use: "subscription", Short: "manipulate configured subscriptions", } var subscriptionListCmd = &cobra.Command{ Use: "list", Short: "list configured subscriptions", RunE: func(_ *cobra.Command, _ []string) error { subs, err := gApp.Config.GetSubscriptions(nil) if err != nil { return err } tabData := subscriptionTable(subs, true) renderTable(tabData, subscriptionListHeader) return nil }, PostRun: func(_ *cobra.Command, _ []string) { name = "" }, } var subscriptionShowCmd = &cobra.Command{ Use: "show", Short: "show a subscription details", Annotations: map[string]string{ "--name": "SUBSCRIPTION", }, RunE: func(_ *cobra.Command, _ []string) error { if name == "" { fmt.Println("provide a subscription name with --name") return nil } subs, err := gApp.Config.GetSubscriptions(nil) if err != nil { return err } if s, ok := subs[name]; ok { tabData := subscriptionTable(map[string]*types.SubscriptionConfig{name: s}, false) renderTable(tabData, []string{"Param", "Value"}) return nil } return errors.New("unknown subscription") }, PostRun: func(_ *cobra.Command, _ []string) { name = "" }, } var outputCmd = &cobra.Command{ Use: "output", Short: "manipulate configured outputs", } var outputListCmd = &cobra.Command{ Use: "list", Short: "list configured outputs", RunE: func(_ *cobra.Command, _ []string) error { tabData := gApp.Config.GetOutputsConfigs() renderTable(tabData, []string{"Name", "Config"}) return nil }, } func renderTable(tabData [][]string, header []string) { table := tablewriter.NewWriter(os.Stdout) table.SetHeader(header) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetAutoFormatHeaders(false) table.SetAutoWrapText(false) table.AppendBulk(tabData) table.Render() } func targetTable(targetConfigs map[string]*types.TargetConfig, list bool) [][]string { if list { tabData := make([][]string, 0) for _, tc := range targetConfigs { tabData = append(tabData, []string{ tc.Name, tc.Address, tc.UsernameString(), tc.PasswordString(), tc.InsecureString(), tc.SkipVerifyString(), tc.TLSCAString(), tc.TLSCertString(), tc.TLSKeyString(), }) } sort.Slice(tabData, func(i, j int) bool { return tabData[i][0] < tabData[j][0] }) return tabData } if len(targetConfigs) > 1 { gApp.Logger.Printf("cannot show multiple targets") return nil } for _, tc := range targetConfigs { tabData := make([][]string, 0, 16) tabData = append(tabData, []string{"Name", tc.Name}) tabData = append(tabData, []string{"Address", tc.Address}) tabData = append(tabData, []string{"Username", tc.UsernameString()}) tabData = append(tabData, []string{"Password", tc.PasswordString()}) tabData = append(tabData, []string{"Insecure", tc.InsecureString()}) tabData = append(tabData, []string{"Skip Verify", tc.SkipVerifyString()}) tabData = append(tabData, []string{"TLS CA", tc.TLSCAString()}) tabData = append(tabData, []string{"TLS Certificate", tc.TLSCertString()}) tabData = append(tabData, []string{"TLS Key", tc.TLSKeyString()}) tabData = append(tabData, []string{"TLS Min Version", tc.TLSMinVersion}) tabData = append(tabData, []string{"TLS Max Version", tc.TLSMaxVersion}) tabData = append(tabData, []string{"TLS Version", tc.TLSVersion}) tabData = append(tabData, []string{"Subscriptions", strings.Join(tc.Subscriptions, "\n")}) tabData = append(tabData, []string{"Outputs", strings.Join(tc.Outputs, "\n")}) tabData = append(tabData, []string{"Buffer Size", tc.BufferSizeString()}) tabData = append(tabData, []string{"Retry Timer", tc.RetryTimer.String()}) return tabData } return [][]string{} } func subscriptionTable(scs map[string]*types.SubscriptionConfig, list bool) [][]string { if list { tabData := make([][]string, 0, len(scs)) for _, sub := range scs { enc := "" if sub.Encoding != nil { enc = *sub.Encoding } tabData = append(tabData, []string{ sub.Name, sub.ModeString(), sub.PrefixString(), sub.PathsString(), sub.SampleIntervalString(), enc, }) } sort.Slice(tabData, func(i, j int) bool { return tabData[i][0] < tabData[j][0] }) return tabData } if len(scs) > 1 { gApp.Logger.Printf("cannot show multiple subscriptions") return nil } for _, sub := range scs { tabData := make([][]string, 0, 8) tabData = append(tabData, []string{"Name", sub.Name}) tabData = append(tabData, []string{"Mode", sub.ModeString()}) tabData = append(tabData, []string{"Prefix", sub.PrefixString()}) tabData = append(tabData, []string{"Paths", sub.PathsString()}) tabData = append(tabData, []string{"Sample Interval", sub.SampleIntervalString()}) tabData = append(tabData, []string{"Encoding", *sub.Encoding}) tabData = append(tabData, []string{"Qos", sub.QosString()}) tabData = append(tabData, []string{"Heartbeat Interval", sub.HeartbeatIntervalString()}) return tabData } return [][]string{} } var name string func findMatchedXPATH(entry *yang.Entry, input string, prefixPresent bool) []goprompt.Suggest { if strings.HasPrefix(input, ":") { return nil } suggestions := make([]goprompt.Suggest, 0, 4) inputLen := len(input) for i, c := range input { if c == ':' && i+1 < inputLen { input = input[i+1:] inputLen -= (i + 1) break } } prependOrigin := gApp.Config.LocalFlags.PromptSuggestWithOrigin && !prefixPresent for name, child := range entry.Dir { if child.IsCase() || child.IsChoice() { for _, gchild := range child.Dir { suggestions = append(suggestions, findMatchedXPATH(gchild, input, prefixPresent)...) } continue } pathelem := "/" + name if strings.HasPrefix(pathelem, input) { node := "" if inputLen == 0 && prependOrigin { node = fmt.Sprintf("%s:/%s", entry.Name, name) } else if inputLen > 0 && input[0] == '/' { node = name } else { node = pathelem } suggestions = append(suggestions, goprompt.Suggest{Text: node, Description: buildXPATHDescription(child)}) if child.Key != "" { // list keylist := strings.Split(child.Key, " ") for _, key := range keylist { node = fmt.Sprintf("%s[%s=*]", node, key) } suggestions = append(suggestions, goprompt.Suggest{Text: node, Description: buildXPATHDescription(child)}) } } else if strings.HasPrefix(input, pathelem) { var prevC rune var bracketCount int var endIndex int = -1 var stop bool for i, c := range input { switch c { case '[': bracketCount++ case ']': if prevC != '\\' { bracketCount-- endIndex = i } case '/': if i != 0 && bracketCount == 0 { endIndex = i stop = true } } if stop { break } prevC = c } if bracketCount == 0 { if endIndex >= 0 { suggestions = append(suggestions, findMatchedXPATH(child, input[endIndex:], prefixPresent)...) } else { suggestions = append(suggestions, findMatchedXPATH(child, input[len(pathelem):], prefixPresent)...) } } } } return suggestions } func getDescriptionPrefix(entry *yang.Entry) string { switch { case entry.Dir == nil && entry.ListAttr != nil: // leaf-list return "[⋯]" case entry.Dir == nil: // leaf return " " case entry.ListAttr != nil: // list return "[+]" default: // container return "[+]" } } func getEntryType(entry *yang.Entry) string { if entry.Type != nil { return entry.Type.Kind.String() } return "" } func buildXPATHDescription(entry *yang.Entry) string { sb := strings.Builder{} sb.WriteString(getDescriptionPrefix(entry)) sb.WriteString(" ") sb.WriteString(getPermissions(entry)) sb.WriteString(" ") if gApp.Config.LocalFlags.PromptDescriptionWithTypes { n, _ := sb.WriteString(getEntryType(entry)) if n > 0 { sb.WriteString(", ") } } if gApp.Config.LocalFlags.PromptDescriptionWithPrefix { if entry.Prefix != nil { sb.WriteString(entry.Prefix.Name) sb.WriteString(": ") } } sb.WriteString(entry.Description) return sb.String() } func getPermissions(entry *yang.Entry) string { if entry == nil { return "(rw)" } switch entry.Config { case yang.TSTrue: return "(rw)" case yang.TSFalse: return "(ro)" case yang.TSUnset: return getPermissions(entry.Parent) } return "(rw)" } func findMatchedSchema(entry *yang.Entry, input string) []*yang.Entry { schemaNodes := []*yang.Entry{} for name, child := range entry.Dir { pathelem := "/" + name if strings.HasPrefix(pathelem, input) { schemaNodes = append(schemaNodes, child) if child.Key != "" { // list schemaNodes = append(schemaNodes, child) } } else if strings.HasPrefix(input, pathelem) { var prevC rune var bracketCount int var endIndex int = -1 var stop bool for i, c := range input { switch c { case '[': bracketCount++ case ']': if prevC != '\\' { bracketCount-- endIndex = i } case '/': if i != 0 && bracketCount == 0 { endIndex = i stop = true } } if stop { break } prevC = c } if bracketCount == 0 { if endIndex >= 0 { schemaNodes = append(schemaNodes, findMatchedSchema(child, input[endIndex:])...) } else { schemaNodes = append(schemaNodes, findMatchedSchema(child, input[len(pathelem):])...) } } } } return schemaNodes } var filePathCompleter = completer.FilePathCompleter{ IgnoreCase: true, Filter: func(fi os.FileInfo) bool { return fi.IsDir() || !strings.HasPrefix(fi.Name(), ".") }, } var yangPathCompleter = completer.FilePathCompleter{ IgnoreCase: true, Filter: func(fi os.FileInfo) bool { return fi.IsDir() || strings.HasSuffix(fi.Name(), ".yang") }, } var dirPathCompleter = completer.FilePathCompleter{ IgnoreCase: true, Filter: func(fi os.FileInfo) bool { return fi.IsDir() }, } func findDynamicSuggestions(annotation string, doc goprompt.Document) []goprompt.Suggest { switch annotation { case "XPATH": line := doc.CurrentLine() word := doc.GetWordBeforeCursor() suggestions := make([]goprompt.Suggest, 0, 16) entries := []*yang.Entry{} if index := strings.Index(line, "--prefix"); index >= 0 { line = strings.TrimLeft(line[index+8:], " ") // 8 is len("--prefix") end := strings.Index(line, " ") if end >= 0 { line = line[:end] lineLen := len(line) // remove "origin:" from prefix if present for i, c := range line { if c == ':' && i+1 < lineLen { line = line[i+1:] break } } // find yang entries matching the prefix for _, entry := range gApp.SchemaTree.Dir { entries = append(entries, findMatchedSchema(entry, line)...) } // generate suggestions from matching entries for _, entry := range entries { suggestions = append(suggestions, findMatchedXPATH(entry, word, true)...) } } } else { // generate suggestions from yang schema for _, entry := range gApp.SchemaTree.Dir { suggestions = append(suggestions, findMatchedXPATH(entry, word, false)...) } } sort.Slice(suggestions, func(i, j int) bool { if suggestions[i].Text == suggestions[j].Text { return suggestions[i].Description < suggestions[j].Description } return suggestions[i].Text < suggestions[j].Text }) return suggestions case "PREFIX": word := doc.GetWordBeforeCursor() suggestions := make([]goprompt.Suggest, 0, 16) for _, entry := range gApp.SchemaTree.Dir { suggestions = append(suggestions, findMatchedXPATH(entry, word, false)...) } sort.Slice(suggestions, func(i, j int) bool { if suggestions[i].Text == suggestions[j].Text { return suggestions[i].Description < suggestions[j].Description } return suggestions[i].Text < suggestions[j].Text }) return suggestions case "FILE": return filePathCompleter.Complete(doc) case "YANG": return yangPathCompleter.Complete(doc) case "MODEL": suggestions := make([]goprompt.Suggest, 0, len(gApp.SchemaTree.Dir)) for name, dir := range gApp.SchemaTree.Dir { if dir != nil { suggestions = append(suggestions, goprompt.Suggest{Text: name, Description: dir.Description}) continue } suggestions = append(suggestions, goprompt.Suggest{Text: name}) } sort.Slice(suggestions, func(i, j int) bool { if suggestions[i].Text == suggestions[j].Text { return suggestions[i].Description < suggestions[j].Description } return suggestions[i].Text < suggestions[j].Text }) return goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true) case "DIR": return dirPathCompleter.Complete(doc) case "ENCODING": suggestions := make([]goprompt.Suggest, 0, len(encodings)) for _, sugg := range encodings { suggestions = append(suggestions, goprompt.Suggest{Text: sugg[0], Description: sugg[1]}) } return goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true) case "FORMAT": suggestions := make([]goprompt.Suggest, 0, len(formats)) for _, sugg := range formats { suggestions = append(suggestions, goprompt.Suggest{Text: sugg[0], Description: sugg[1]}) } return goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true) case "STORE": suggestions := make([]goprompt.Suggest, 0, len(get.DataType)) for _, sugg := range get.DataType { suggestions = append(suggestions, goprompt.Suggest{Text: sugg[0], Description: sugg[1]}) } return goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true) case "SUBSC_MODE": suggestions := make([]goprompt.Suggest, 0, len(subscribe.Modes)) for _, sugg := range subscribe.Modes { suggestions = append(suggestions, goprompt.Suggest{Text: sugg[0], Description: sugg[1]}) } return goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true) case "STREAM_MODE": suggestions := make([]goprompt.Suggest, 0, len(subscribe.StreamModes)) for _, sugg := range subscribe.StreamModes { suggestions = append(suggestions, goprompt.Suggest{Text: sugg[0], Description: sugg[1]}) } return goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true) case "SUBSCRIPTION": subs := gApp.Config.GetSubscriptionsFromFile() suggestions := make([]goprompt.Suggest, 0, len(subs)) for _, sub := range subs { suggestions = append(suggestions, goprompt.Suggest{Text: sub.Name, Description: subscriptionDescription(sub)}) } return goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true) case "TARGET": targetsConfig := gApp.Config.TargetsList() suggestions := make([]goprompt.Suggest, 0, len(targetsConfig)) for _, target := range targetsConfig { sb := strings.Builder{} if target.Name != target.Address { sb.WriteString("address=") sb.WriteString(target.Address) sb.WriteString(", ") } sb.WriteString("secure=") if *target.Insecure { sb.WriteString("false") } else { sb.WriteString(fmt.Sprintf("%v", !(strings.Contains(doc.CurrentLine(), "--insecure")))) } suggestions = append(suggestions, goprompt.Suggest{Text: target.Name, Description: sb.String()}) } return goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true) case "OUTPUT": outputGroups := gApp.Config.GetOutputsSuggestions() suggestions := make([]goprompt.Suggest, 0, len(outputGroups)) for _, sugg := range outputGroups { suggestions = append(suggestions, goprompt.Suggest{Text: sugg.Name, Description: strings.Join(sugg.Types, ", ")}) } return goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true) } return []goprompt.Suggest{} } func subscriptionDescription(sub *types.SubscriptionConfig) string { sb := strings.Builder{} sb.WriteString("mode=") sb.WriteString(sub.Mode) sb.WriteString(", ") if strings.ToLower(sub.Mode) == "stream" { sb.WriteString("stream-mode=") sb.WriteString(sub.StreamMode) sb.WriteString(", ") if strings.ToLower(sub.StreamMode) == "sample" { sb.WriteString("sample-interval=") sb.WriteString(sub.SampleInterval.String()) sb.WriteString(", ") } } if sub.Encoding != nil { sb.WriteString("encoding=") sb.WriteString(*sub.Encoding) sb.WriteString(", ") } if sub.Prefix != "" { sb.WriteString("prefix=") sb.WriteString(sub.Prefix) sb.WriteString(", ") } sb.WriteString("path(s)=") sb.WriteString(strings.Join(sub.Paths, ",")) return sb.String() } func showCommandArguments(b *goprompt.Buffer) { doc := b.Document() showLocalFlags := false command := gApp.RootCmd args := strings.Fields(doc.CurrentLine()) if found, _, err := command.Find(args); err == nil { if command != found { showLocalFlags = true } command = found } maxNameLen := 0 suggestions := make([]goprompt.Suggest, 0, 32) if command.HasAvailableSubCommands() { for _, c := range command.Commands() { if c.Hidden { continue } length := len(c.Name()) if maxNameLen < length { maxNameLen = length } suggestions = append(suggestions, goprompt.Suggest{Text: c.Name(), Description: c.Short}) } } if showLocalFlags { addFlags := func(flag *pflag.Flag) { if flag.Hidden { return } length := len(flag.Name) if maxNameLen < length+2 { maxNameLen = length + 2 } suggestions = append(suggestions, goprompt.Suggest{Text: "--" + flag.Name, Description: flag.Usage}) } command.LocalFlags().VisitAll(addFlags) } suggestions = goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true) if len(suggestions) == 0 { return } if err := termbox.Init(); err != nil { gApp.Logger.Fatalf("%v", err) } w, _ := termbox.Size() termbox.Close() fmt.Printf("\n") maxDescLen := w - maxNameLen - 6 format := fmt.Sprintf(" %%-%ds : %%-%ds\n", maxNameLen, maxDescLen) for i := range suggestions { length := len(suggestions[i].Description) if length > maxDescLen { fmt.Printf(format, suggestions[i].Text, suggestions[i].Description[:maxDescLen]) } else { fmt.Printf(format, suggestions[i].Text, suggestions[i].Description) } } fmt.Printf("\n") } // ExecutePrompt load and run gnmic-prompt mode. func ExecutePrompt() { initPromptCmds() shell := &cmdPrompt{ RootCmd: gApp.RootCmd, GoPromptOptions: []goprompt.Option{ goprompt.OptionTitle("gnmic-prompt"), goprompt.OptionPrefix("gnmic> "), goprompt.OptionHistory(gApp.PromptHistory), goprompt.OptionMaxSuggestion(gApp.Config.LocalFlags.PromptMaxSuggestions), goprompt.OptionPrefixTextColor(getColor("prefix-color")), goprompt.OptionPreviewSuggestionTextColor(goprompt.Cyan), goprompt.OptionSuggestionTextColor(goprompt.White), goprompt.OptionSuggestionBGColor(getColor("suggestions-bg-color")), goprompt.OptionSelectedSuggestionTextColor(goprompt.Black), goprompt.OptionSelectedSuggestionBGColor(goprompt.White), goprompt.OptionDescriptionTextColor(goprompt.LightGray), goprompt.OptionDescriptionBGColor(getColor("description-bg-color")), goprompt.OptionSelectedDescriptionTextColor(goprompt.Black), goprompt.OptionSelectedDescriptionBGColor(goprompt.White), goprompt.OptionScrollbarBGColor(goprompt.DarkGray), goprompt.OptionScrollbarThumbColor(goprompt.Blue), goprompt.OptionAddASCIICodeBind( // bind '?' character to show cmd args goprompt.ASCIICodeBind{ ASCIICode: []byte{0x3f}, Fn: showCommandArguments, }, // bind OS X Option+Left key binding goprompt.ASCIICodeBind{ ASCIICode: []byte{0x1b, 0x62}, Fn: goprompt.GoLeftWord, }, // bind OS X Option+Right key binding goprompt.ASCIICodeBind{ ASCIICode: []byte{0x1b, 0x66}, Fn: goprompt.GoRightWord, }, ), goprompt.OptionAddKeyBind( // bind Linux CTRL+Left key binding goprompt.KeyBind{ Key: goprompt.ControlLeft, Fn: goprompt.GoLeftWord, }, // bind Linux CTRL+Right key binding goprompt.KeyBind{ Key: goprompt.ControlRight, Fn: goprompt.GoRightWord, }, // bind CTRL+Z key to delete path elements goprompt.KeyBind{ Key: goprompt.ControlZ, Fn: func(buf *goprompt.Buffer) { // If the last word before the cursor does not contain a "/" return. // This is needed to avoid deleting down to a previous flag value if !strings.Contains(buf.Document().GetWordBeforeCursorWithSpace(), "/") { return } // Check if the last rune is a PathSeparator and is not the path root then delete it if buf.Document().GetCharRelativeToCursor(0) == os.PathSeparator && buf.Document().GetCharRelativeToCursor(-1) != ' ' { buf.DeleteBeforeCursor(1) } // Delete down until the next "/" buf.DeleteBeforeCursor(len([]rune(buf.Document().GetWordBeforeCursorUntilSeparator("/")))) }, }, ), goprompt.OptionCompletionWordSeparator(completer.FilePathCompletionSeparator), // goprompt.OptionCompletionOnDown(), goprompt.OptionShowCompletionAtStart(), }, } shell.Run() } func initPromptCmds() { gApp.RootCmd.AddCommand(promptQuitCmd) gApp.RootCmd.AddCommand(targetCmd) gApp.RootCmd.AddCommand(subscriptionCmd) gApp.RootCmd.AddCommand(outputCmd) targetCmd.AddCommand(targetListCmd) targetCmd.AddCommand(targetShowCmd) targetShowCmd.Flags().StringVarP(&name, "name", "", "", "target name") subscriptionCmd.AddCommand(subscriptionListCmd) subscriptionCmd.AddCommand(subscriptionShowCmd) subscriptionShowCmd.Flags().StringVarP(&name, "name", "", "", "subscription name") outputCmd.AddCommand(outputListCmd) gApp.RootCmd.RemoveCommand(promptModeCmd) } // Reference: https://github.com/stromland/cobra-prompt // cmdPrompt requires RootCmd to run type cmdPrompt struct { // RootCmd is the start point, all its sub commands and flags will be available as suggestions RootCmd *cobra.Command // GoPromptOptions is for customize go-prompt // see https://github.com/c-bata/go-prompt/blob/master/option.go GoPromptOptions []goprompt.Option } // Run will automatically generate suggestions for all cobra commands // and flags defined by RootCmd and execute the selected commands. func (co cmdPrompt) Run() { p := goprompt.New( func(in string) { promptArgs, err := parsePromptArgs(in) if err != nil { fmt.Fprint(os.Stderr, err) return } os.Args = append([]string{os.Args[0]}, promptArgs...) if len(promptArgs) > 0 { err := co.RootCmd.Execute() if err == nil && in != "" { gApp.PromptHistory = append(gApp.PromptHistory, in) } } }, func(d goprompt.Document) []goprompt.Suggest { return findSuggestions(co, d) }, co.GoPromptOptions..., ) p.Run() } func parsePromptArgs(in string) ([]string, error) { var m = []string{} var s string // space suffix ensures the last string is appended in = strings.TrimSpace(in) + " " lastQuote := rune(0) isSpace := false for _, c := range in { switch { // ending a quoted item, break out, skip this character and reset lastQuote case c == lastQuote: lastQuote = rune(0) // in a quoted item, include this character case lastQuote != rune(0): s += string(c) // starting a quoted item, set lastQuote case unicode.In(c, unicode.Quotation_Mark): isSpace = false lastQuote = c // a space, append the string to the list // if it was not already added (previous char was a space) // and reset string s case unicode.IsSpace(c): if isSpace { continue } isSpace = true m = append(m, s) s = "" // add the char to the string default: isSpace = false s += string(c) } } if lastQuote != rune(0) { return nil, fmt.Errorf("quotes not closed") } return m, nil } func findSuggestions(co cmdPrompt, doc goprompt.Document) []goprompt.Suggest { command := co.RootCmd args := strings.Fields(doc.CurrentLine()) if found, _, err := command.Find(args); err == nil { command = found } suggestions := make([]goprompt.Suggest, 0, 32) // check flag annotation for the dynamic suggestion annotation := "" argnum := len(args) wordBefore := doc.GetWordBeforeCursor() if wordBefore == "" { if argnum >= 1 { annotation = command.Annotations[args[argnum-1]] } } else { if argnum >= 2 { annotation = command.Annotations[args[argnum-2]] } } if annotation != "" { return append(suggestions, findDynamicSuggestions(annotation, doc)...) } // add sub commands suggestions if they exist if command.HasAvailableSubCommands() { for _, c := range command.Commands() { if !c.Hidden { suggestions = append(suggestions, goprompt.Suggest{Text: c.Name(), Description: c.Short}) } } } addFlags := func(flag *pflag.Flag) { if flag.Hidden { return } suggestions = append(suggestions, goprompt.Suggest{Text: "--" + flag.Name, Description: flag.Usage}) } // load local flags command.LocalFlags().VisitAll(addFlags) if gApp.Config.LocalFlags.PromptSuggestAllFlags { // load inherited flags command.InheritedFlags().VisitAll(addFlags) } return goprompt.FilterHasPrefix(suggestions, doc.GetWordBeforeCursor(), true) } ================================================ FILE: pkg/cmd/prompt_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package cmd import ( "testing" "github.com/google/go-cmp/cmp" ) type testItem struct { in string out []string } var promptArgsTestSet = map[string]testItem{ "no_args": { in: ``, out: []string{""}, }, "one_arg": { in: `arg`, out: []string{"arg"}, }, "multiple_args": { in: `arg1 arg2 --flag1 val1`, out: []string{"arg1", "arg2", "--flag1", "val1"}, }, "single_quoted_args": { in: `arg1 arg2 --flag1 'val 1'`, out: []string{"arg1", "arg2", "--flag1", "val 1"}, }, "double_quoted_args": { in: `arg1 arg2 --flag1 "val 1"`, out: []string{"arg1", "arg2", "--flag1", "val 1"}, }, "quoted_args_with_multiple_spaces": { in: `arg1 arg2 --flag1 "val 1" --flag2 "val \t2"`, out: []string{"arg1", "arg2", "--flag1", "val 1", "--flag2", `val \t2`}, }, "quoted_args_with_spaces_between_items": { in: ` arg1 arg2 --flag1 'val 1' --flag2 "val 2" `, out: []string{"arg1", "arg2", "--flag1", "val 1", "--flag2", `val 2`}, }, } func TestGetInstancesTagsMatches(t *testing.T) { for name, item := range promptArgsTestSet { t.Run(name, func(t *testing.T) { res, err := parsePromptArgs(item.in) if err != nil { t.Logf("failed: %v", err) t.Fail() } t.Logf("exp value: %#v", item.out) t.Logf("got value: %#v", res) if !cmp.Equal(item.out, res) { t.Fail() } }) } } ================================================ FILE: pkg/cmd/proxy/proxy.go ================================================ package proxy import ( "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // proxyCmd represents the proxy command func New(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "proxy", Short: "run a gNMI server that proxies gNMI requests towards known targets", PreRunE: gApp.ProxyPreRunE, RunE: gApp.ProxyRunE, PostRun: func(cmd *cobra.Command, args []string) { gApp.CleanupPlugins() }, SilenceUsage: true, } return cmd } ================================================ FILE: pkg/cmd/root.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package cmd import ( "context" "fmt" "io/fs" "os" "os/signal" "syscall" "github.com/spf13/cobra" "github.com/openconfig/gnmic/pkg/app" "github.com/openconfig/gnmic/pkg/cmd/capabilities" "github.com/openconfig/gnmic/pkg/cmd/collector" "github.com/openconfig/gnmic/pkg/cmd/diff" "github.com/openconfig/gnmic/pkg/cmd/generate" "github.com/openconfig/gnmic/pkg/cmd/get" "github.com/openconfig/gnmic/pkg/cmd/getset" "github.com/openconfig/gnmic/pkg/cmd/listener" "github.com/openconfig/gnmic/pkg/cmd/path" "github.com/openconfig/gnmic/pkg/cmd/processor" "github.com/openconfig/gnmic/pkg/cmd/proxy" "github.com/openconfig/gnmic/pkg/cmd/set" "github.com/openconfig/gnmic/pkg/cmd/subscribe" "github.com/openconfig/gnmic/pkg/cmd/tree" "github.com/openconfig/gnmic/pkg/cmd/version" ) var encodings = [][2]string{ {"json", "JSON encoded string (RFC7159)"}, {"bytes", "byte sequence whose semantics is opaque to the protocol"}, {"proto", "serialised protobuf message using protobuf.Any"}, {"ascii", "ASCII encoded string representing text formatted according to a target-defined convention"}, {"json_ietf", "JSON_IETF encoded string (RFC7951)"}, } var formats = [][2]string{ {"json", "similar to protojson but with xpath style paths and decoded timestamps"}, {"protojson", "protocol buffer messages in JSON format"}, {"prototext", "protocol buffer messages in textproto format"}, {"event", "protocol buffer messages as a timestamped list of tags and values"}, {"proto", "protocol buffer messages in binary wire format"}, } var gApp = app.New() func newRootCmd() *cobra.Command { gApp.RootCmd = &cobra.Command{ Use: "gnmic", Short: "run gnmi rpcs from the terminal (https://gnmic.openconfig.net)", Annotations: map[string]string{ "--encoding": "ENCODING", "--config": "FILE", "--format": "FORMAT", "--address": "TARGET", }, PersistentPreRunE: gApp.PreRunE, } gApp.InitGlobalFlags() gApp.RootCmd.AddCommand(newCompletionCmd(gApp)) gApp.RootCmd.AddCommand(newPromptCmd()) // Subcommands gApp.RootCmd.AddCommand(capabilities.New(gApp)) gApp.RootCmd.AddCommand(get.New(gApp)) gApp.RootCmd.AddCommand(getset.New(gApp)) gApp.RootCmd.AddCommand(listener.New(gApp)) gApp.RootCmd.AddCommand(path.New(gApp)) gApp.RootCmd.AddCommand(diff.New(gApp)) gApp.RootCmd.AddCommand(generate.New(gApp)) gApp.RootCmd.AddCommand(set.New(gApp)) gApp.RootCmd.AddCommand(subscribe.New(gApp)) gApp.RootCmd.AddCommand(version.New(gApp)) gApp.RootCmd.AddCommand(proxy.New(gApp)) gApp.RootCmd.AddCommand(processor.New(gApp)) gApp.RootCmd.AddCommand(collector.New(gApp)) gApp.RootCmd.AddCommand(tree.New(gApp)) return gApp.RootCmd } // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. func Execute() { setupCloseHandler(gApp.Cfn) if err := newRootCmd().Execute(); err != nil { //fmt.Println(err) os.Exit(1) } if gApp.PromptMode { ExecutePrompt() } } func init() { cobra.OnInitialize(initConfig) } // initConfig reads in config file and ENV variables if set. func initConfig() { err := gApp.Config.Load(gApp.Context()) if err == nil { return } if _, ok := err.(*fs.PathError); !ok { fmt.Fprintf(os.Stderr, "failed loading config file: %v\n", err) } } func setupCloseHandler(cancelFn context.CancelFunc) { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGINT) go func() { sig := <-c fmt.Printf("\nreceived signal '%s'. terminating...\n", sig.String()) gApp.CleanupPlugins() cancelFn() os.Exit(0) }() } ================================================ FILE: pkg/cmd/set/set.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package set import ( "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // New creates the set command tree. func New(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "set", Short: "run gnmi set on targets", Annotations: map[string]string{ "--delete": "XPATH", "--prefix": "PREFIX", "--replace": "XPATH", "--replace-file": "FILE", "--replace-path": "XPATH", "--update": "XPATH", "--update-file": "FILE", "--update-path": "XPATH", }, PreRunE: gApp.SetPreRunE, RunE: gApp.SetRunE, SilenceUsage: true, } gApp.InitSetFlags(cmd) return cmd } ================================================ FILE: pkg/cmd/subscribe/subscribe.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package subscribe import ( "github.com/spf13/cobra" "github.com/openconfig/gnmic/pkg/app" ) var ( // Modes is the list of supported subscription modes. Modes = [][2]string{ {"once", "a single request/response channel. The target creates the relevant update messages, transmits them, and subsequently closes the RPC"}, {"stream", "long-lived subscriptions which continue to transmit updates relating to the set of paths that are covered within the subscription indefinitely"}, {"poll", "on-demand retrieval of data items via long-lived RPCs"}, } // StreamModes is the list of supported streaming modes. StreamModes = [][2]string{ {"target-defined", "the target MUST determine the best type of subscription to be created on a per-leaf basis"}, {"sample", "the value of the data item(s) MUST be sent once per sample interval to the client"}, {"on-change", "data updates are only sent when the value of the data item changes"}, } ) // New create the subscribe command tree. func New(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "subscribe", Aliases: []string{"sub"}, Short: "subscribe to gnmi updates on targets", Annotations: map[string]string{ "--path": "XPATH", "--prefix": "PREFIX", "--model": "MODEL", "--mode": "SUBSC_MODE", "--stream-mode": "STREAM_MODE", "--name": "SUBSCRIPTION", "--output": "OUTPUT", }, PreRunE: gApp.SubscribePreRunE, RunE: gApp.SubscribeRunE, PostRun: func(cmd *cobra.Command, args []string) { gApp.CleanupPlugins() }, SilenceUsage: true, } gApp.InitSubscribeFlags(cmd) return cmd } ================================================ FILE: pkg/cmd/tree/tree.go ================================================ package tree import ( "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // New create the subscribe command tree. func New(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "tree", Short: "print the commands tree", RunE: gApp.RunETree, PostRun: func(cmd *cobra.Command, args []string) { gApp.CleanupPlugins() }, SilenceUsage: true, } gApp.InitTreeFlags(cmd) return cmd } ================================================ FILE: pkg/cmd/version/version.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package version import ( "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" ) // New creates the version command tree. func New(gApp *app.App) *cobra.Command { versionCmd := &cobra.Command{ Use: "version", Short: "show gnmic version", PreRun: func(cmd *cobra.Command, _ []string) { gApp.Config.SetLocalFlagsFromFile(cmd) }, Run: gApp.VersionRun, } versionCmd.AddCommand(newVersionUpgradeCmd(gApp)) return versionCmd } ================================================ FILE: pkg/cmd/version/versionUpgrade.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package version import ( "fmt" "github.com/openconfig/gnmic/pkg/app" "github.com/spf13/cobra" "github.com/spf13/pflag" ) // newVersionUpgradeCmd creates the version upgrade command tree. func newVersionUpgradeCmd(gApp *app.App) *cobra.Command { cmd := &cobra.Command{ Use: "upgrade", Aliases: []string{"up"}, Short: "upgrade gnmic to latest available version", PreRun: func(cmd *cobra.Command, _ []string) { gApp.Config.SetLocalFlagsFromFile(cmd) }, RunE: gApp.VersionUpgradeRun, } initVersionUpgradeFlags(cmd, gApp) return cmd } func initVersionUpgradeFlags(cmd *cobra.Command, gApp *app.App) { cmd.Flags().Bool("use-pkg", false, "upgrade using package") cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) { gApp.Config.FileConfig.BindPFlag(fmt.Sprintf("%s-%s", cmd.Name(), flag.Name), flag) }) } ================================================ FILE: pkg/collector/api/const/const.go ================================================ package apiconst const ( APIv1 = "/api/v1" ConfigAPIv1URL = APIv1 + "/config" TargetsConfigAPIv1URL = ConfigAPIv1URL + "/targets" TargetsAPIv1URL = APIv1 + "/targets" SubscriptionsAPIv1URL = APIv1 + "/subscriptions" OutputsAPIv1URL = APIv1 + "/outputs" InputsAPIv1URL = APIv1 + "/inputs" AssignmentsAPIv1URL = APIv1 + "/assignments" ProcessorsAPIv1URL = APIv1 + "/processors" ) ================================================ FILE: pkg/collector/api/server/apiserver.go ================================================ package apiserver import ( "context" "crypto/tls" "fmt" "log/slog" "net" "net/http" "sync" "github.com/gorilla/mux" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/collector/env" cluster_manager "github.com/openconfig/gnmic/pkg/collector/managers/cluster" inputs_manager "github.com/openconfig/gnmic/pkg/collector/managers/inputs" outputs_manager "github.com/openconfig/gnmic/pkg/collector/managers/outputs" targets_manager "github.com/openconfig/gnmic/pkg/collector/managers/targets" collstore "github.com/openconfig/gnmic/pkg/collector/store" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/lockers" "github.com/openconfig/gnmic/pkg/logging" "github.com/prometheus/client_golang/prometheus" ) type Server struct { router *mux.Router store *collstore.Store locker lockers.Locker targetsManager *targets_manager.TargetsManager outputsManager *outputs_manager.OutputsManager inputsManager *inputs_manager.InputsManager clusterManager *cluster_manager.ClusterManager srv *http.Server logger *slog.Logger reg *prometheus.Registry applyLock *sync.Mutex } func NewServer( store *collstore.Store, targetManager *targets_manager.TargetsManager, outputsManager *outputs_manager.OutputsManager, inputsManager *inputs_manager.InputsManager, clusterManager *cluster_manager.ClusterManager, reg *prometheus.Registry, ) *Server { s := &Server{ router: mux.NewRouter(), store: store, targetsManager: targetManager, outputsManager: outputsManager, inputsManager: inputsManager, clusterManager: clusterManager, reg: reg, applyLock: new(sync.Mutex), } s.routes() s.registerMetrics() return s } func (s *Server) Start(locker lockers.Locker, wg *sync.WaitGroup) error { s.locker = locker s.logger = logging.NewLogger(s.store.Config, "component", "api-server") s.logger.Info("starting API server") apiServer, ok, err := s.store.Config.Get("api-server", "api-server") if err != nil { s.logger.Error("failed to get api-server config", "error", err) return err } if !ok { return nil } if apiServer == nil { s.logger.Info("api-server config not found, skipping API server") return nil } var apiCfg *config.APIServer var listener net.Listener switch apiCfgImpl := apiServer.(type) { case *config.APIServer: if apiCfgImpl == nil { s.logger.Info("api-server config is nil, skipping API server") return nil } apiCfg = apiCfgImpl env.ExpandAPIEnv(apiCfg) // create listener listener, err = createListener(apiCfg) if err != nil { s.logger.Error("failed to create listener", "error", err) return err } default: s.logger.Error("invalid api-server config", "config", apiServer) return fmt.Errorf("invalid api-server config: %v", apiServer) } s.srv = &http.Server{ Addr: apiCfg.Address, Handler: s.router, // TODO: add timeouts // ReadTimeout: apiCfg.Timeout / 2, // WriteTimeout: apiCfg.Timeout / 2, // IdleTimeout: apiCfg.Timeout / 2, } wg.Add(1) go func() { defer wg.Done() err := s.srv.Serve(listener) if err != nil { // TODO: ignore shutdown errors s.logger.Error("failed to serve API server", "error", err) } }() return nil } func (s *Server) Stop() { s.logger.Info("stopping API server") err := s.srv.Shutdown(context.Background()) // TODO: change context ? if err != nil { s.logger.Error("failed to shutdown API server", "error", err) } } type APIErrors struct { Errors []string `json:"errors,omitempty"` } func createListener(apiCfg *config.APIServer) (net.Listener, error) { if apiCfg.TLS != nil { tlsCfg, err := utils.NewTLSConfig( apiCfg.TLS.CaFile, apiCfg.TLS.CertFile, apiCfg.TLS.KeyFile, apiCfg.TLS.ClientAuth, apiCfg.TLS.SkipVerify, false, // genSelfSigned ) if err != nil { return nil, err } return tls.Listen("tcp", apiCfg.Address, tlsCfg) } return net.Listen("tcp", apiCfg.Address) } ================================================ FILE: pkg/collector/api/server/apply.go ================================================ package apiserver import ( "compress/gzip" "encoding/json" "errors" "io" "net/http" "github.com/mitchellh/mapstructure" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/config" ) // Apply request is a request to apply the configuration to the collector. // Any object that is not provided in the request is deleted. type ConfigApplyRequest struct { Targets map[string]*types.TargetConfig `json:"targets"` Subscriptions map[string]*types.SubscriptionConfig `json:"subscriptions"` Outputs map[string]map[string]any `json:"outputs"` Inputs map[string]map[string]any `json:"inputs"` Processors map[string]map[string]any `json:"processors"` TunnelTargetMatches map[string]*config.TunnelTargetMatch `json:"tunnel-target-matches"` } func validateApplyRequest(req *ConfigApplyRequest) error { if len(req.Targets) == 0 && len(req.Subscriptions) == 0 && len(req.Outputs) == 0 && len(req.Inputs) == 0 && len(req.Processors) == 0 && len(req.TunnelTargetMatches) == 0 { return nil // valid reset request } if len(req.Targets) > 0 && len(req.Subscriptions) == 0 { return errors.New("if targets are provided, at least one subscription is required") } if len(req.TunnelTargetMatches) > 0 && len(req.Subscriptions) == 0 { return errors.New("if tunnel-target-matches are provided, at least one subscription is required") } if len(req.Inputs) > 0 && len(req.Outputs) == 0 { return errors.New("if inputs are provided, at least one output is required") } // TODO: validate each config // TODO: validate references return nil } func (s *Server) handleConfigApply(w http.ResponseWriter, r *http.Request) { s.applyLock.Lock() defer s.applyLock.Unlock() var reader io.Reader = r.Body // if content is gzip, decompress it if r.Header.Get("Content-Encoding") == "gzip" { gz, err := gzip.NewReader(r.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"gzip error: " + err.Error()}}) return } defer gz.Close() reader = gz } req, err := decodeRequest(reader) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"decode error: " + err.Error()}}) return } err = validateApplyRequest(req) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"validate error: " + err.Error()}}) return } // delete subscriptions existingSubscriptions, err := s.store.Config.Keys("subscriptions") if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"get subscriptions error: " + err.Error()}}) return } for _, name := range existingSubscriptions { if _, ok := req.Subscriptions[name]; !ok { _, _, err := s.store.Config.Delete("subscriptions", name) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } } // delete targets (skip tunnel-created targets which have TunnelTargetType set) existingTargets, err := s.store.Config.List("targets", func(_ string, val any) bool { // only include non-tunnel targets (TunnelTargetType == "") if tc, ok := val.(*types.TargetConfig); ok { return tc.TunnelTargetType == "" } return true }) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"get targets error: " + err.Error()}}) return } for name := range existingTargets { if _, ok := req.Targets[name]; !ok { _, _, err := s.store.Config.Delete("targets", name) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"delete target error: " + err.Error()}}) return } } } // delete inputs existingInputs, err := s.store.Config.Keys("inputs") if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"get inputs error: " + err.Error()}}) return } for _, name := range existingInputs { if _, ok := req.Inputs[name]; !ok { _, _, err := s.store.Config.Delete("inputs", name) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"delete input error: " + err.Error()}}) return } } } // delete outputs existingOutputs, err := s.store.Config.Keys("outputs") if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"get outputs error: " + err.Error()}}) return } for _, name := range existingOutputs { if _, ok := req.Outputs[name]; !ok { _, _, err := s.store.Config.Delete("outputs", name) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"delete output error: " + err.Error()}}) return } } } // delete processors existingProcessors, err := s.store.Config.Keys("processors") if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"get processors error: " + err.Error()}}) return } for _, name := range existingProcessors { if _, ok := req.Processors[name]; !ok { _, _, err := s.store.Config.Delete("processors", name) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"delete processor error: " + err.Error()}}) return } } } // delete tunnel-target-matches existingTunnelTargetMatches, err := s.store.Config.Keys("tunnel-target-matches") if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"get tunnel-target-matches error: " + err.Error()}}) return } for _, name := range existingTunnelTargetMatches { if _, ok := req.TunnelTargetMatches[name]; !ok { _, _, err := s.store.Config.Delete("tunnel-target-matches", name) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"delete tunnel-target-match error: " + err.Error()}}) return } } } // // apply subscriptions for name, cfg := range req.Subscriptions { _, err = s.store.Config.Set("subscriptions", name, cfg) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"set subscription error: " + err.Error()}}) return } } // apply processors for name, cfg := range req.Processors { _, err = s.store.Config.Set("processors", name, cfg) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"set processor error: " + err.Error()}}) return } } // apply outputs for name, cfg := range req.Outputs { _, err = s.store.Config.Set("outputs", name, cfg) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"set output error: " + err.Error()}}) return } } // apply targets for name, cfg := range req.Targets { _, err = s.store.Config.Set("targets", name, cfg) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"set target error: " + err.Error()}}) return } } // apply inputs for name, cfg := range req.Inputs { _, err = s.store.Config.Set("inputs", name, cfg) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"set input error: " + err.Error()}}) return } } // apply tunnel-target-matches for name, cfg := range req.TunnelTargetMatches { _, err = s.store.Config.Set("tunnel-target-matches", name, cfg) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"set tunnel-target-match error: " + err.Error()}}) return } } w.WriteHeader(http.StatusOK) } func decodeRequest(reader io.Reader) (*ConfigApplyRequest, error) { dec := json.NewDecoder(reader) reqMap := make(map[string]any) err := dec.Decode(&reqMap) if err != nil { return nil, err } req, err := decodeRequestMap(reqMap) if err != nil { return nil, err } return req, nil } func decodeRequestMap(reqMap map[string]any) (*ConfigApplyRequest, error) { req := new(ConfigApplyRequest) mdec, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), Result: req, }, ) if err != nil { return nil, err } err = mdec.Decode(reqMap) if err != nil { return nil, err } return req, nil } ================================================ FILE: pkg/collector/api/server/assignment.go ================================================ package apiserver import ( "encoding/json" "fmt" "io" "net/http" "sort" "github.com/gorilla/mux" ) type assignmentConfig struct { Assignments []*assignement `json:"assignments"` Unassignments []string `json:"unassignments,omitempty"` } func (a *assignmentConfig) validate() error { if len(a.Assignments) == 0 && len(a.Unassignments) == 0 { return fmt.Errorf("assignments or unassignments is required") } if len(a.Assignments) > 0 { for _, assignment := range a.Assignments { if assignment.Target == "" { return fmt.Errorf("target is required") } if assignment.Member == "" { return fmt.Errorf("member is required") } } } if len(a.Unassignments) > 0 { for _, unassignment := range a.Unassignments { if unassignment == "" { return fmt.Errorf("unassignment is required") } } } return nil } type assignement struct { Target string `json:"target,omitempty"` Member string `json:"member,omitempty"` // Epoch int64 `json:"epoch,omitempty"` } // create an assignment by sending a POST request to the assignments endpoint // sample body: // // { // "assignments": [{"target": "target1", "member": "member1", "epoch": 1}, {"target": "target2", "member": "member2", "epoch": 2}] // list of target names // } // // sample curl command: // curl --request POST -H "Content-Type: application/json" \ // -d '{"assignments": [{"target": "target1", "member": "member1", "epoch": 1}, {"target": "target2", "member": "member2", "epoch": 2}]}' \ // http://localhost:8080/api/v1/assignments func (s *Server) handleAssignmentPost(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer r.Body.Close() cfg := new(assignmentConfig) err = json.Unmarshal(body, &cfg) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if cfg == nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"invalid assignment config"}}) return } err = cfg.validate() if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } for _, assignment := range cfg.Assignments { _, err := s.store.Config.Set("assignments", assignment.Target, assignment) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } for _, unassignment := range cfg.Unassignments { _, _, err = s.store.Config.Delete("assignments", unassignment) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } w.WriteHeader(http.StatusOK) } // delete an assignment by sending a DELETE request to the assignments endpoint // sample curl command: // curl --request DELETE http://localhost:8080/api/v1/assignments/target1 func (s *Server) handleAssignmentDelete(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] ok, _, err := s.store.Config.Delete("assignments", id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"assignment not found"}}) return } w.WriteHeader(http.StatusOK) } type assignmentResponse struct { Member string `json:"member"` Targets []string `json:"targets"` } func (s *Server) handleAssignmentGet(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id == "" { assignments, err := s.store.Config.List("assignments") if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } ar := &assignmentResponse{ Targets: make([]string, 0, len(assignments)), } for k, v := range assignments { if ar.Member == "" { vm, ok := v.(*assignement) if ok { ar.Member = vm.Member } } ar.Targets = append(ar.Targets, k) } sort.Strings(ar.Targets) err = json.NewEncoder(w).Encode(ar) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } return } assignment, ok, err := s.store.Config.Get("assignments", id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"assignment not found"}}) return } err = json.NewEncoder(w).Encode(assignment) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } ================================================ FILE: pkg/collector/api/server/cluster.go ================================================ package apiserver import ( "context" "encoding/json" "fmt" "io" "net/http" "strings" "time" "github.com/gorilla/mux" cluster_manager "github.com/openconfig/gnmic/pkg/collector/managers/cluster" "github.com/openconfig/gnmic/pkg/config" ) type clusteringResponse struct { ClusterName string `json:"name,omitempty"` NumberOfLockedTargets int `json:"number-of-locked-targets"` Leader string `json:"leader,omitempty"` Members []clusterMember `json:"members,omitempty"` } type clusterMember struct { Name string `json:"name,omitempty"` APIEndpoint string `json:"api-endpoint,omitempty"` IsLeader bool `json:"is-leader,omitempty"` NumberOfLockedTargets int `json:"number-of-locked-nodes"` LockedTargets []string `json:"locked-targets,omitempty"` } func (s *Server) requireClustering(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if s.locker == nil { w.WriteHeader(http.StatusServiceUnavailable) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"clustering is not enabled"}}) return } next.ServeHTTP(w, r) }) } func (s *Server) handleClusteringGet(w http.ResponseWriter, r *http.Request) { // clusteringResponse clusteringCfg, ok, err := s.store.Config.Get("clustering", "clustering") if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"clustering config not found"}}) return } clustering, ok := clusteringCfg.(*config.Clustering) if !ok { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"clustering config is not a config.Clustering"}}) return } if clustering == nil { return } cr := &clusteringResponse{ ClusterName: clustering.ClusterName, NumberOfLockedTargets: 0, Leader: "", Members: make([]clusterMember, 0), } cr.Leader, err = s.clusterManager.GetLeaderName(r.Context()) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } services, err := s.locker.GetServices(r.Context(), fmt.Sprintf("%s-gnmic-api", clustering.ClusterName), nil) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } instanceNodes, err := s.clusterManager.GetInstanceToTargetsMapping(r.Context()) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } for _, v := range instanceNodes { cr.NumberOfLockedTargets += len(v) } cr.Members = make([]clusterMember, len(services)) for i, srv := range services { scheme := cluster_manager.GetAPIScheme(&cluster_manager.Member{Labels: srv.Tags}) cr.Members[i].APIEndpoint = fmt.Sprintf("%s://%s", scheme, srv.Address) cr.Members[i].Name = strings.TrimSuffix(srv.ID, "-api") cr.Members[i].IsLeader = cr.Leader == cr.Members[i].Name cr.Members[i].NumberOfLockedTargets = len(instanceNodes[cr.Members[i].Name]) cr.Members[i].LockedTargets = instanceNodes[cr.Members[i].Name] } err = json.NewEncoder(w).Encode(cr) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } func (s *Server) handleClusterRebalance(w http.ResponseWriter, r *http.Request) { isLeader, err := s.clusterManager.IsLeader(r.Context()) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !isLeader { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"not leader"}}) return } err = s.clusterManager.RebalanceTargetsV2() if err != nil { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } s.logger.Info("rebalance targets completed") w.WriteHeader(http.StatusAccepted) } func (s *Server) handleClusteringLeaderGet(w http.ResponseWriter, r *http.Request) { clusteringCfg, ok, err := s.store.Config.Get("clustering", "clustering") if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"clustering config not found"}}) return } clustering, ok := clusteringCfg.(*config.Clustering) if !ok { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"clustering config is not a config.Clustering"}}) return } ctx, cancel := context.WithCancel(r.Context()) defer cancel() // get leader leader, err := s.clusterManager.GetLeaderName(ctx) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } services, err := s.locker.GetServices(ctx, fmt.Sprintf("%s-gnmic-api", clustering.ClusterName), nil) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } instanceNodes, err := s.clusterManager.GetInstanceToTargetsMapping(ctx) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } members := make([]clusterMember, 1) for _, s := range services { if strings.TrimSuffix(s.ID, "-api") != leader { continue } scheme := cluster_manager.GetAPIScheme(&cluster_manager.Member{Labels: s.Tags}) // add the leader as a member then break from loop members[0].APIEndpoint = fmt.Sprintf("%s://%s", scheme, s.Address) members[0].Name = strings.TrimSuffix(s.ID, "-api") members[0].IsLeader = true members[0].NumberOfLockedTargets = len(instanceNodes[members[0].Name]) members[0].LockedTargets = instanceNodes[members[0].Name] break } b, err := json.Marshal(members) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.Write(b) } func (s *Server) handleClusteringLeaderDelete(w http.ResponseWriter, r *http.Request) { leader, err := s.clusterManager.IsLeader(r.Context()) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !leader { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"not leader"}}) return } err = s.clusterManager.WithdrawLeader(r.Context(), 30*time.Second) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } func (s *Server) handleClusteringMembersGet(w http.ResponseWriter, r *http.Request) { // clusteringResponse clusteringCfg, ok, err := s.store.Config.Get("clustering", "clustering") if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"clustering config not found"}}) return } clustering, ok := clusteringCfg.(*config.Clustering) if !ok { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"clustering config is not a config.Clustering"}}) return } // get leader leader, err := s.clusterManager.GetLeaderName(r.Context()) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } services, err := s.locker.GetServices(r.Context(), fmt.Sprintf("%s-gnmic-api", clustering.ClusterName), nil) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } instanceNodes, err := s.clusterManager.GetInstanceToTargetsMapping(r.Context()) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } members := make([]clusterMember, len(services)) for i, s := range services { scheme := cluster_manager.GetAPIScheme(&cluster_manager.Member{Labels: s.Tags}) members[i].APIEndpoint = fmt.Sprintf("%s://%s", scheme, s.Address) members[i].Name = strings.TrimSuffix(s.ID, "-api") members[i].IsLeader = leader == members[i].Name members[i].NumberOfLockedTargets = len(instanceNodes[members[i].Name]) members[i].LockedTargets = instanceNodes[members[i].Name] } b, err := json.Marshal(members) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } _, err = w.Write(b) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } func (s *Server) handleClusteringDrainInstance(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id == "" { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"member id is required"}}) return } leader, err := s.clusterManager.IsLeader(r.Context()) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !leader { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"not leader"}}) return } err = s.clusterManager.DrainMember(r.Context(), id) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.WriteHeader(http.StatusOK) } type moveRequest struct { Target string `json:"target,omitempty"` DestinationMember string `json:"member,omitempty"` } func (s *Server) handleClusterMove(w http.ResponseWriter, r *http.Request) { // read body body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer r.Body.Close() var moveRequest moveRequest err = json.Unmarshal(body, &moveRequest) if err != nil { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if moveRequest.Target == "" { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"target is required"}}) return } if moveRequest.DestinationMember == "" { w.WriteHeader(http.StatusBadRequest) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"member is required"}}) return } // TODO: implement move target // err = s.clusterManager.MoveTarget(r.Context(), moveRequest.Target, moveRequest.DestinationMember) // if err != nil { // w.WriteHeader(http.StatusInternalServerError) // _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) // return // } // w.WriteHeader(http.StatusOK) } // // func (s *Server) handleConfig(w http.ResponseWriter, r *http.Request) { res, err := s.store.Config.GetAll() if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } configs := make(map[string]any) for k, v := range res { configs[k] = v } sanitizedRes := sanitizeConfig(configs) err = json.NewEncoder(w).Encode(sanitizedRes) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } func sanitizeConfig(res map[string]any) map[string]any { keys := []string{ "api-server", "gnmi-server", "loader", "clustering", "global-flags", "tunnel-server", } for _, key := range keys { val, ok := res[key] if !ok { continue } switch v := val.(type) { case map[string]any: res[key] = v[key] default: } } return res } func (s *Server) handleHealthzGet(w http.ResponseWriter, r *http.Request) { res := map[string]string{"status": "healthy"} b, err := json.Marshal(res) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } _, err = w.Write(b) if err != nil { w.WriteHeader(http.StatusInternalServerError) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) } } func (s *Server) handleAdminShutdown(w http.ResponseWriter, r *http.Request) { // Not implemented yet w.WriteHeader(http.StatusNotImplemented) _ = json.NewEncoder(w).Encode(APIErrors{Errors: []string{"shutdown not implemented"}}) } ================================================ FILE: pkg/collector/api/server/inputs.go ================================================ package apiserver import ( "encoding/json" "fmt" "io" "net/http" "slices" "github.com/gorilla/mux" "github.com/openconfig/gnmic/pkg/inputs" ) func (s *Server) handleConfigInputsGet(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id == "" { inputs, err := s.store.Config.List("inputs") if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } err = json.NewEncoder(w).Encode(inputs) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } else { input, ok, err := s.store.Config.Get("inputs", id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"input not found"}}) return } err = json.NewEncoder(w).Encode(input) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } } func (s *Server) handleConfigInputsPost(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer r.Body.Close() cfg := map[string]any{} fmt.Println("body", string(body)) err = json.Unmarshal(body, &cfg) if err != nil { w.WriteHeader(http.StatusBadRequest) fmt.Println("err", err) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if cfg == nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"invalid input config"}}) return } inputType, ok := cfg["type"].(string) if !ok { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"input type is required"}}) return } if !slices.Contains(inputs.InputTypes, inputType) { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("unknown input type: %q", inputType)}}) return } inputName, ok := cfg["name"].(string) if !ok { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"input name is required"}}) return } if inputName == "" { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"input name is required"}}) return } initializer := inputs.Inputs[inputType] if initializer == nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("unknown input type: %q", inputType)}}) return } impl := initializer() err = impl.Validate(cfg) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } // validate event processors exist evps, ok := cfg["event-processors"].([]string) if ok { for _, ep := range evps { _, ok, err := s.store.Config.Get("processors", ep) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("event processor %s not found", ep)}}) return } } } // validate outputs exist outs, ok := cfg["outputs"].([]string) if ok { for _, out := range outs { _, ok, err := s.store.Config.Get("outputs", out) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("output %s not found", out)}}) return } } } _, err = s.store.Config.Set("inputs", inputName, cfg) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.WriteHeader(http.StatusOK) } func (s *Server) handleConfigInputsProcessorsPatch(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotImplemented) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"not implemented"}}) } func (s *Server) handleConfigInputsOutputsPatch(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotImplemented) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"not implemented"}}) } func (s *Server) handleConfigInputsDelete(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] ok, _, err := s.store.Config.Delete("inputs", id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"input not found"}}) return } w.WriteHeader(http.StatusOK) } ================================================ FILE: pkg/collector/api/server/metrics.go ================================================ package apiserver import "github.com/prometheus/client_golang/prometheus/collectors" func (s *Server) registerMetrics() { s.reg.MustRegister(collectors.NewGoCollector()) s.reg.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) } ================================================ FILE: pkg/collector/api/server/outputs.go ================================================ package apiserver import ( "encoding/json" "fmt" "io" "net/http" "github.com/gorilla/mux" "github.com/openconfig/gnmic/pkg/outputs" ) // get all outputs // curl command: // curl http://localhost:8080/api/v1/outputs func (s *Server) handleConfigOutputsGet(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id == "" { outputs, err := s.store.Config.List("outputs") if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } err = json.NewEncoder(w).Encode(outputs) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } else { output, ok, err := s.store.Config.Get("outputs", id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"output not found"}}) return } err = json.NewEncoder(w).Encode(output) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } } // sample body: // // { // "name": "output1", // "type": "file", // "filename": "output.txt" // } // // curl command: // curl --request POST -H "Content-Type: application/json" \ // -d '{"name": "output1", "type": "file", "filename": "output.txt"}' \ // http://localhost:8080/api/v1/outputs func (s *Server) handleConfigOutputsPost(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer r.Body.Close() cfg := map[string]any{} err = json.Unmarshal(body, &cfg) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if cfg == nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"invalid output config"}}) return } outputName, ok := cfg["name"].(string) if !ok { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"output name is required"}}) return } if outputName == "" { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"output name is required"}}) return } initializer := outputs.Outputs[cfg["type"].(string)] if initializer == nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"unknown output type"}}) return } impl := initializer() err = impl.Validate(cfg) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } evps, ok := cfg["event-processors"].([]string) if ok { for _, ep := range evps { _, ok, err := s.store.Config.Get("processors", ep) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("event processor %s not found", ep)}}) return } } } _, err = s.store.Config.Set("outputs", outputName, cfg) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.WriteHeader(http.StatusOK) } func (s *Server) handleConfigOutputsProcessorsPatch(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotImplemented) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"not implemented"}}) } func (s *Server) handleConfigOutputsDelete(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] err := s.outputsManager.DeleteOutput(id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.WriteHeader(http.StatusOK) } ================================================ FILE: pkg/collector/api/server/processors.go ================================================ package apiserver import ( "encoding/json" "fmt" "io" "net/http" "github.com/gorilla/mux" ) type ProcessorConfigResponse struct { Name string `json:"name"` Type string `json:"type"` Config any `json:"config"` } type ProcessorConfigRequest struct { Name string `json:"name"` Type string `json:"type"` Config any `json:"config"` } func (s *Server) handleConfigProcessorsGet(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id != "" { processor, ok, err := s.store.Config.Get("processors", id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"processor not found"}}) return } processorConfig := ProcessorConfigResponse{ Name: id, } for k, v := range processor.(map[string]any) { switch v.(type) { case map[string]any: processorConfig.Type = k processorConfig.Config = v default: w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("unknown processor type: %T", v)}}) return } } err = json.NewEncoder(w).Encode(processorConfig) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } return } processors, err := s.store.Config.List("processors") if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } processorConfigs := make([]ProcessorConfigResponse, 0, len(processors)) for name, processor := range processors { switch processor := processor.(type) { case map[string]any: processorConfig := ProcessorConfigResponse{ Name: name, } for k, v := range processor { switch v.(type) { case map[string]any: processorConfig.Type = k processorConfig.Config = v default: w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("unknown processor type: %T", v)}}) return } break } processorConfigs = append(processorConfigs, processorConfig) default: w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("unknown processor type: %T", processor)}}) return } } err = json.NewEncoder(w).Encode(processorConfigs) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } func (s *Server) handleConfigProcessorsPost(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer r.Body.Close() cfg := new(ProcessorConfigRequest) err = json.Unmarshal(body, &cfg) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if cfg == nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"invalid processor config"}}) return } if cfg.Name == "" { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"processor name is required"}}) return } if cfg.Type == "" { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"processor type is required"}}) return } storeCfg := map[string]any{ cfg.Type: cfg.Config, } _, err = s.store.Config.Set("processors", cfg.Name, storeCfg) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.WriteHeader(http.StatusOK) } func (s *Server) handleConfigProcessorsDelete(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if s.outputsManager.ProcessorInUse(id) { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"processor is in use by outputs"}}) return } if s.inputsManager.ProcessorInUse(id) { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"processor is in use by inputs"}}) return } _, _, err := s.store.Config.Delete("processors", id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.WriteHeader(http.StatusOK) } ================================================ FILE: pkg/collector/api/server/routes.go ================================================ package apiserver import ( "net/http" "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus/promhttp" ) func (s *Server) routes() { s.router.Handle("/metrics", promhttp.HandlerFor(s.reg, promhttp.HandlerOpts{})) apiV1 := s.router.PathPrefix("/api/v1").Subrouter() s.clusterRoutes(apiV1) s.configRoutes(apiV1) s.targetRoutes(apiV1) s.subscriptionRoutes(apiV1) s.healthRoutes(apiV1) s.adminRoutes(apiV1) s.outputsRoutes(apiV1) s.inputsRoutes(apiV1) s.processorsRoutes(apiV1) s.assignmentRoutes(apiV1) s.sseRoutes(apiV1) } func (s *Server) healthRoutes(r *mux.Router) { r.HandleFunc("/healthz", s.handleHealthzGet).Methods(http.MethodGet) } func (s *Server) adminRoutes(r *mux.Router) { r.HandleFunc("/admin/shutdown", s.handleAdminShutdown).Methods(http.MethodPost) } func (s *Server) clusterRoutes(r *mux.Router) { cluster := r.PathPrefix("/cluster").Subrouter() cluster.Use(s.requireClustering) cluster.HandleFunc("", s.handleClusteringGet).Methods(http.MethodGet) cluster.HandleFunc("/rebalance", s.handleClusterRebalance).Methods(http.MethodPost) cluster.HandleFunc("/leader", s.handleClusteringLeaderGet).Methods(http.MethodGet) cluster.HandleFunc("/leader", s.handleClusteringLeaderDelete).Methods(http.MethodDelete) cluster.HandleFunc("/members", s.handleClusteringMembersGet).Methods(http.MethodGet) cluster.HandleFunc("/members/{id}/drain", s.handleClusteringDrainInstance).Methods(http.MethodPost) cluster.HandleFunc("/move", s.handleClusterMove).Methods(http.MethodPost) } func (s *Server) configRoutes(r *mux.Router) { r.HandleFunc("/config", s.handleConfig).Methods(http.MethodGet) r.HandleFunc("/config/apply", s.handleConfigApply).Methods(http.MethodPost) r.HandleFunc("/config/targets", s.handleConfigTargetsGet).Methods(http.MethodGet) r.HandleFunc("/config/targets/{id}", s.handleConfigTargetsGet).Methods(http.MethodGet) r.HandleFunc("/config/targets", s.handleConfigTargetsPost).Methods(http.MethodPost) r.HandleFunc("/config/targets/{id}", s.handleConfigTargetsDelete).Methods(http.MethodDelete) r.HandleFunc("/config/targets/{id}/subscriptions", s.handleConfigTargetsSubscriptionsPatch).Methods(http.MethodPatch) r.HandleFunc("/config/targets/{id}/outputs", s.handleConfigTargetsOutputsPatch).Methods(http.MethodPatch) r.HandleFunc("/config/targets/{id}/state", s.handleTargetsStatePost).Methods(http.MethodPost) // r.HandleFunc("/config/subscriptions", s.handleConfigSubscriptionsGet).Methods(http.MethodGet) r.HandleFunc("/config/subscriptions", s.handleConfigSubscriptionsPost).Methods(http.MethodPost) r.HandleFunc("/config/subscriptions/{id}", s.handleConfigSubscriptionsGet).Methods(http.MethodGet) r.HandleFunc("/config/subscriptions/{id}", s.handleConfigSubscriptionsDelete).Methods(http.MethodDelete) // r.HandleFunc("/config/outputs", s.handleConfigOutputsGet).Methods(http.MethodGet) r.HandleFunc("/config/outputs", s.handleConfigOutputsPost).Methods(http.MethodPost) r.HandleFunc("/config/outputs/{id}", s.handleConfigOutputsGet).Methods(http.MethodGet) r.HandleFunc("/config/outputs/{id}/processors", s.handleConfigOutputsProcessorsPatch).Methods(http.MethodPatch) r.HandleFunc("/config/outputs/{id}", s.handleConfigOutputsDelete).Methods(http.MethodDelete) // r.HandleFunc("/config/inputs", s.handleConfigInputsGet).Methods(http.MethodGet) r.HandleFunc("/config/inputs", s.handleConfigInputsPost).Methods(http.MethodPost) r.HandleFunc("/config/inputs/{id}", s.handleConfigInputsGet).Methods(http.MethodGet) r.HandleFunc("/config/inputs/{id}/processors", s.handleConfigInputsProcessorsPatch).Methods(http.MethodPatch) r.HandleFunc("/config/inputs/{id}/outputs", s.handleConfigInputsOutputsPatch).Methods(http.MethodPatch) r.HandleFunc("/config/inputs/{id}", s.handleConfigInputsDelete).Methods(http.MethodDelete) // r.HandleFunc("/config/processors", s.handleConfigProcessorsGet).Methods(http.MethodGet) r.HandleFunc("/config/processors", s.handleConfigProcessorsPost).Methods(http.MethodPost) r.HandleFunc("/config/processors/{id}", s.handleConfigProcessorsGet).Methods(http.MethodGet) r.HandleFunc("/config/processors/{id}", s.handleConfigProcessorsDelete).Methods(http.MethodDelete) // r.HandleFunc("/config/tunnel-target-matches", s.handleConfigTunnelTargetMatchesGet).Methods(http.MethodGet) r.HandleFunc("/config/tunnel-target-matches", s.handleConfigTunnelTargetMatchesPost).Methods(http.MethodPost) r.HandleFunc("/config/tunnel-target-matches/{id}", s.handleConfigTunnelTargetMatchesGet).Methods(http.MethodGet) r.HandleFunc("/config/tunnel-target-matches/{id}", s.handleConfigTunnelTargetMatchesDelete).Methods(http.MethodDelete) } func (s *Server) targetRoutes(r *mux.Router) { r.HandleFunc("/targets", s.handleTargetsGet).Methods(http.MethodGet) r.HandleFunc("/targets/{id}", s.handleTargetsGet).Methods(http.MethodGet) r.HandleFunc("/targets/{id}/state/{state}", s.handleTargetsStatePost).Methods(http.MethodPost) } func (s *Server) subscriptionRoutes(r *mux.Router) { r.HandleFunc("/subscriptions", s.handleSubscriptionsGet).Methods(http.MethodGet) r.HandleFunc("/subscriptions/{id}", s.handleSubscriptionsGet).Methods(http.MethodGet) } func (s *Server) outputsRoutes(r *mux.Router) { // r.HandleFunc("/outputs", c.handleOutputsGet).Methods(http.MethodGet) // r.HandleFunc("/outputs", c.handleOutputsPost).Methods(http.MethodPost) // r.HandleFunc("/outputs/{id}", c.handleOutputsGet).Methods(http.MethodGet) // r.HandleFunc("/outputs/{id}", c.handleOutputsPatch).Methods(http.MethodPatch) // r.HandleFunc("/outputs/{id}", c.handleOutputsDelete).Methods(http.MethodDelete) } func (s *Server) inputsRoutes(r *mux.Router) { // r.HandleFunc("/inputs", c.handleInputsGet).Methods(http.MethodGet) // r.HandleFunc("/inputs", c.handleInputsPost).Methods(http.MethodPost) // r.HandleFunc("/inputs/{id}", c.handleInputsGet).Methods(http.MethodGet) // r.HandleFunc("/inputs/{id}", c.handleInputsDelete).Methods(http.MethodDelete) } func (s *Server) processorsRoutes(r *mux.Router) { // r.HandleFunc("/processors", c.handleProcessorsGet).Methods(http.MethodGet) // r.HandleFunc("/processors", c.handleProcessorsPost).Methods(http.MethodPost) // r.HandleFunc("/processors/{id}", c.handleProcessorsGet).Methods(http.MethodGet) // r.HandleFunc("/processors/{id}", c.handleProcessorsDelete).Methods(http.MethodDelete) } func (s *Server) assignmentRoutes(r *mux.Router) { r.HandleFunc("/assignments", s.handleAssignmentGet).Methods(http.MethodGet) r.HandleFunc("/assignments", s.handleAssignmentPost).Methods(http.MethodPost) r.HandleFunc("/assignments/{id}", s.handleAssignmentGet).Methods(http.MethodGet) r.HandleFunc("/assignments/{id}", s.handleAssignmentDelete).Methods(http.MethodDelete) } func (s *Server) sseRoutes(r *mux.Router) { r.HandleFunc("/sse/{kind}", s.handleSSE).Methods(http.MethodGet) } ================================================ FILE: pkg/collector/api/server/sse.go ================================================ package apiserver import ( "encoding/json" "fmt" "net/http" "time" "github.com/gorilla/mux" collstore "github.com/openconfig/gnmic/pkg/collector/store" "github.com/zestor-dev/zestor/store" ) // validSSEKinds are the store kinds that can be streamed via SSE. var validSSEKinds = map[string]struct{}{ collstore.KindTargets: {}, collstore.KindOutputs: {}, collstore.KindInputs: {}, collstore.KindSubscriptions: {}, collstore.KindProcessors: {}, collstore.KindAssignments: {}, collstore.KindTunnelTargetMatches: {}, } // sseEvent is the JSON payload sent for each SSE event. type sseEvent struct { Timestamp time.Time `json:"timestamp"` // when the event was emitted Store string `json:"store"` // "config" or "state" Kind string `json:"kind"` // targets, outputs, inputs, subscriptions Name string `json:"name"` // entry name / key EventType string `json:"event-type"` // create, update, delete Object any `json:"object"` // the entry value } // handleSSE streams store changes for a given kind as Server-Sent Events. // // GET /api/v1/sse/{kind}?store=config|state|all // // Query parameter "store" selects which store(s) to watch: // - "config" — config store only // - "state" — state store only // - "all" — both (default) // // The client receives an event stream where each event is a JSON-encoded // sseEvent. An initial snapshot of existing entries is sent first (as // "create" events), followed by live updates. func (s *Server) handleSSE(w http.ResponseWriter, r *http.Request) { kind := mux.Vars(r)["kind"] if _, ok := validSSEKinds[kind]; !ok { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{ Errors: []string{fmt.Sprintf("invalid kind %q; expected one of: targets, outputs, inputs, subscriptions", kind)}, }) return } storeFilter := r.URL.Query().Get("store") if storeFilter == "" { storeFilter = "all" } if storeFilter != "config" && storeFilter != "state" && storeFilter != "all" { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{ Errors: []string{fmt.Sprintf("invalid store %q; expected one of: config, state, all", storeFilter)}, }) return } // ensure the ResponseWriter supports flushing. flusher, ok := w.(http.Flusher) if !ok { http.Error(w, "streaming not supported", http.StatusInternalServerError) return } // set up watches. var configCh <-chan *store.Event[any] var configCancel func() var stateCh <-chan *store.Event[any] var stateCancel func() if storeFilter == "config" || storeFilter == "all" { var err error configCh, configCancel, err = s.store.Config.Watch(kind, store.WithInitialReplay[any]()) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer configCancel() } if storeFilter == "state" || storeFilter == "all" { var err error stateCh, stateCancel, err = s.store.State.Watch(kind, store.WithInitialReplay[any]()) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer stateCancel() } // set SSE headers. w.Header().Set("Content-Type", "text/event-stream") w.Header().Set("Cache-Control", "no-cache") w.Header().Set("Connection", "keep-alive") w.Header().Set("X-Accel-Buffering", "no") // disable nginx buffering flusher.Flush() ctx := r.Context() keepalive := time.NewTicker(15 * time.Second) defer keepalive.Stop() for { select { case <-ctx.Done(): return case <-keepalive.C: // SSE comment line as keepalive to detect broken connections. fmt.Fprintf(w, ": keepalive\n\n") flusher.Flush() case ev, ok := <-configCh: if !ok { configCh = nil continue } s.sendSSEEvent(w, flusher, "config", ev) case ev, ok := <-stateCh: if !ok { stateCh = nil continue } s.sendSSEEvent(w, flusher, "state", ev) } } } func (s *Server) sendSSEEvent(w http.ResponseWriter, flusher http.Flusher, storeName string, ev *store.Event[any]) { data, err := json.Marshal(sseEvent{ Timestamp: time.Now(), Store: storeName, Kind: ev.Kind, Name: ev.Name, EventType: string(ev.EventType), Object: ev.Object, }) if err != nil { s.logger.Error("failed to marshal SSE event", "error", err) return } fmt.Fprintf(w, "event: %s\ndata: %s\n\n", ev.EventType, data) flusher.Flush() } ================================================ FILE: pkg/collector/api/server/subscriptions.go ================================================ package apiserver import ( "encoding/json" "fmt" "io" "net/http" "github.com/gorilla/mux" "github.com/mitchellh/mapstructure" "github.com/openconfig/gnmic/pkg/api/types" targets_manager "github.com/openconfig/gnmic/pkg/collector/managers/targets" ) func (s *Server) handleConfigSubscriptionsGet(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id == "" { // Get all subscriptions subscriptions, err := s.store.Config.List("subscriptions") if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } err = json.NewEncoder(w).Encode(subscriptions) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } return } // Get single subscription by ID sub, ok, err := s.store.Config.Get("subscriptions", id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"subscription not found"}}) return } err = json.NewEncoder(w).Encode(sub) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } // sample body: // // { // "name": "subscription1", // "prefix": "interfaces", // "set-target": true, // "paths": ["interfaces/interface/state"], // "mode": "STREAM", // "stream-mode": "TARGET_DEFINED", // "encoding": "JSON", // "sample-interval": 1000 // } // // sample curl command: // curl --request POST -H "Content-Type: application/json" \ // -d '{"name": "subscription1", "prefix": "interfaces", "set-target": true, "paths": ["interfaces/interface/state"], "mode": "STREAM", "stream-mode": "TARGET_DEFINED", "encoding": "JSON", "sample-interval": 1000}' \ // http://localhost:8080/api/v1/subscriptions func (s *Server) handleConfigSubscriptionsPost(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer r.Body.Close() m := map[string]any{} err = json.Unmarshal(body, &m) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } sub := new(types.SubscriptionConfig) // handles time.Duration decoder, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), Result: sub, }) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } err = decoder.Decode(m) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } _, err = s.store.Config.Set("subscriptions", sub.Name, sub) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.WriteHeader(http.StatusOK) } func (s *Server) handleConfigSubscriptionsDelete(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] ok, _, err := s.store.Config.Delete("subscriptions", id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"subscription not found"}}) return } w.WriteHeader(http.StatusOK) } // SubscriptionResponse represents a subscription with its targets and states type SubscriptionResponse struct { Name string `json:"name"` Config *types.SubscriptionConfig `json:"config"` Targets map[string]*TargetStateInfo `json:"targets"` } // TargetStateInfo represents target information for a subscription type TargetStateInfo struct { Name string `json:"name"` State string `json:"state"` } // handleSubscriptionsGet returns runtime subscription information with target states func (s *Server) handleSubscriptionsGet(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id == "" { subscriptionsMap := make(map[string]*SubscriptionResponse) // build current subscriptions map _, err := s.store.Config.List("subscriptions", func(name string, sub any) bool { switch sub := sub.(type) { case *types.SubscriptionConfig: subscriptionsMap[sub.Name] = &SubscriptionResponse{ Name: sub.Name, Config: sub, Targets: make(map[string]*TargetStateInfo), } } return false }) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } // Collect all subscriptions from targets s.targetsManager.ForEach(func(mt *targets_manager.ManagedTarget) { subStates := mt.T.SubscribeClientStates() for name, active := range subStates { if subscriptionsMap[name] == nil { subscriptionsMap[name] = &SubscriptionResponse{ Name: name, Targets: make(map[string]*TargetStateInfo), } } state := "stopped" if active { state = "running" } subscriptionsMap[name].Targets[mt.Name] = &TargetStateInfo{ Name: mt.Name, State: state, } } }) // Return all subscriptions response := make([]*SubscriptionResponse, 0) for _, sub := range subscriptionsMap { response = append(response, sub) } err = json.NewEncoder(w).Encode(response) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } return } sub, ok, err := s.store.Config.Get("subscriptions", id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"subscription not found"}}) return } switch sub := sub.(type) { case *types.SubscriptionConfig: response := &SubscriptionResponse{ Name: sub.Name, Config: sub, Targets: make(map[string]*TargetStateInfo), } s.targetsManager.ForEach(func(mt *targets_manager.ManagedTarget) { subStates := mt.T.SubscribeClientStates() active, exists := subStates[id] if !exists { return } state := "stopped" if active { state = "running" } response.Targets[mt.Name] = &TargetStateInfo{ Name: mt.Name, State: state, } }) err = json.NewEncoder(w).Encode(response) if err != nil { w.WriteHeader(http.StatusInternalServerError) } default: w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("unknown subscription type: %T", sub)}}) return } } ================================================ FILE: pkg/collector/api/server/targets.go ================================================ package apiserver import ( "encoding/json" "errors" "fmt" "io" "net/http" "github.com/gorilla/mux" "github.com/mitchellh/mapstructure" "github.com/openconfig/gnmic/pkg/api/types" targets_manager "github.com/openconfig/gnmic/pkg/collector/managers/targets" collstore "github.com/openconfig/gnmic/pkg/collector/store" "github.com/zestor-dev/zestor/store" ) func (s *Server) handleConfigTargetsGet(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id == "" { targets, err := s.store.Config.List("targets") if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } err = json.NewEncoder(w).Encode(targets) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } return } tc, ok, err := s.store.Config.Get("targets", id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("target %s not found", id)}}) return } err = json.NewEncoder(w).Encode(tc) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } // sample body: // // { // "name": "target1", // "address": "127.0.0.1:57400", // "username": "admin", // "password": "admin" // } // // sample curl command: // curl --request POST -H "Content-Type: application/json" \ // -d '{"name": "target1", "address": "127.0.0.1:57400", "username": "admin", "password": "admin", "insecure": true}' \ // http://localhost:8080/api/v1/config/targets func (s *Server) handleConfigTargetsPost(w http.ResponseWriter, r *http.Request) { body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer r.Body.Close() m := map[string]any{} err = json.Unmarshal(body, &m) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } tc := new(types.TargetConfig) // handles time.Duration decoder, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), Result: tc, }) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } err = decoder.Decode(m) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if tc.Name == "" { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"target name is required"}}) return } if tc.Address == "" { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"target address is required"}}) return } // validate subscriptions for _, sub := range tc.Subscriptions { _, ok, err := s.store.Config.Get("subscriptions", sub) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("subscription %s not found", sub)}}) return } } // validate outputs for _, out := range tc.Outputs { _, ok, err := s.store.Config.Get("outputs", out) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("output %s not found", out)}}) return } } _, err = s.store.Config.Set("targets", tc.Name, tc) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.WriteHeader(http.StatusOK) } // update target subscriptions by sending a PATCH request to the target id // sample body: // // { // "subscriptions": ["sub1", "sub2"] // } // // sample curl command: // curl --request PATCH -H "Content-Type: application/json" \ // -d '["sub1", "sub2"]' \ // http://localhost:8080/api/v1/config/targets/target1/subscriptions func (s *Server) handleConfigTargetsSubscriptionsPatch(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer r.Body.Close() subs := []string{} if len(body) > 0 { err = json.Unmarshal(body, &subs) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } // ensure subscriptions exist for _, sub := range subs { _, ok, err := s.store.Config.Get("subscriptions", sub) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("subscription %s not found", sub)}}) return } } _, err = s.store.Config.SetFn("targets", id, func(v any) (any, error) { tc, ok := v.(*types.TargetConfig) if !ok { return nil, fmt.Errorf("malformed target config") } tc.Subscriptions = subs return tc, nil }) if err != nil { if errors.Is(err, store.ErrKeyNotFound) { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("target %s not found", id)}}) return } w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.WriteHeader(http.StatusOK) } // update target outputs by sending a PATCH request to the target id // sample body: // // { // "outputs": ["output1", "output2"] // } // // sample curl command: // curl --request PATCH -H "Content-Type: application/json" \ // -d '["output1", "output2"]' \ // http://localhost:8080/api/v1/config/targets/target1/outputs func (s *Server) handleConfigTargetsOutputsPatch(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] body, err := io.ReadAll(r.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } defer r.Body.Close() outs := []string{} if len(body) > 0 { err = json.Unmarshal(body, &outs) if err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } // ensure outputs exist for _, out := range outs { _, ok, err := s.store.Config.Get("outputs", out) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("output %s not found", out)}}) return } } _, err = s.store.Config.SetFn("targets", id, func(v any) (any, error) { tc, ok := v.(*types.TargetConfig) if !ok { return nil, fmt.Errorf("malformed target config") } tc.Outputs = outs return tc, nil }) if err != nil { if errors.Is(err, store.ErrKeyNotFound) { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("target %s not found", id)}}) return } w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.WriteHeader(http.StatusOK) } func (s *Server) handleConfigTargetsDelete(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] _, _, err := s.store.Config.Delete("targets", id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.WriteHeader(http.StatusOK) } type TargetResponse struct { Name string `json:"name"` Config *types.TargetConfig `json:"config"` State *collstore.TargetState `json:"state,omitempty"` } func (s *Server) handleTargetsGet(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] response := make([]*TargetResponse, 0) if id == "" { s.targetsManager.ForEach(func(mt *targets_manager.ManagedTarget) { ts := s.targetsManager.GetTargetState(mt.Name) response = append(response, targetResponseFromState(mt.Name, mt.T.Config, ts)) }) err := json.NewEncoder(w).Encode(response) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } return } mt := s.targetsManager.Lookup(id) if mt == nil { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"target not found"}}) return } ts := s.targetsManager.GetTargetState(id) response = append(response, targetResponseFromState(mt.Name, mt.T.Config, ts)) err := json.NewEncoder(w).Encode(response) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } // targetResponseFromState builds a TargetResponse from a TargetState. func targetResponseFromState(name string, cfg *types.TargetConfig, ts *collstore.TargetState) *TargetResponse { return &TargetResponse{ Name: name, Config: cfg, State: ts, } } // change target state to running/stopped by sending a POST request to the target id func (s *Server) handleTargetsStatePost(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id == "" { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"target id is required"}}) return } state := vars["state"] if state == "" { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"target state is required"}}) return } mt := s.targetsManager.Lookup(id) if mt == nil { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"target not found"}}) return } ok := s.targetsManager.SetIntendedState(id, state) if !ok { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{"target state not changed"}}) return } w.WriteHeader(http.StatusOK) } ================================================ FILE: pkg/collector/api/server/tunnel_target_match.go ================================================ package apiserver import ( "encoding/json" "fmt" "net/http" "github.com/gorilla/mux" "github.com/mitchellh/mapstructure" "github.com/openconfig/gnmic/pkg/config" ) const ( tunnelTargetMatchesPath = "tunnel-target-matches" ) func (s *Server) handleConfigTunnelTargetMatchesGet(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] if id == "" { targets, err := s.store.Config.List(tunnelTargetMatchesPath) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } err = json.NewEncoder(w).Encode(targets) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } return } tc, ok, err := s.store.Config.Get(tunnelTargetMatchesPath, id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("target %s not found", id)}}) return } err = json.NewEncoder(w).Encode(tc) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } } func (s *Server) handleConfigTunnelTargetMatchesPost(w http.ResponseWriter, r *http.Request) { dec := json.NewDecoder(r.Body) defer r.Body.Close() var m map[string]any if err := dec.Decode(&m); err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } tc := new(config.TunnelTargetMatch) if err := mapstructure.Decode(m, tc); err != nil { w.WriteHeader(http.StatusBadRequest) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } _, err := s.store.Config.Set(tunnelTargetMatchesPath, tc.ID, tc) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } w.WriteHeader(http.StatusOK) } func (s *Server) handleConfigTunnelTargetMatchesDelete(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) id := vars["id"] ok, _, err := s.store.Config.Delete(tunnelTargetMatchesPath, id) if err != nil { w.WriteHeader(http.StatusInternalServerError) json.NewEncoder(w).Encode(APIErrors{Errors: []string{err.Error()}}) return } if !ok { w.WriteHeader(http.StatusNotFound) json.NewEncoder(w).Encode(APIErrors{Errors: []string{fmt.Sprintf("target %s not found", id)}}) return } w.WriteHeader(http.StatusOK) } ================================================ FILE: pkg/collector/collector.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package collector import ( "context" "errors" "fmt" "log" "log/slog" "os" "sync" "time" "github.com/grafana/pyroscope-go" "github.com/openconfig/gnmic/pkg/cache" apiserver "github.com/openconfig/gnmic/pkg/collector/api/server" cluster_manager "github.com/openconfig/gnmic/pkg/collector/managers/cluster" inputs_manager "github.com/openconfig/gnmic/pkg/collector/managers/inputs" outputs_manager "github.com/openconfig/gnmic/pkg/collector/managers/outputs" targets_manager "github.com/openconfig/gnmic/pkg/collector/managers/targets" collstore "github.com/openconfig/gnmic/pkg/collector/store" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/lockers" "github.com/openconfig/gnmic/pkg/logging" "github.com/openconfig/gnmic/pkg/pipeline" "github.com/prometheus/client_golang/prometheus" "github.com/spf13/cobra" "github.com/zestor-dev/zestor/store" ) const ( defaultPipelineBufferSize = 1_000_000 initLockerRetryTimer = 1 * time.Second ) type Collector struct { ctx context.Context store *collstore.Store apiServer *apiserver.Server cache cache.Cache locker lockers.Locker clusterManager *cluster_manager.ClusterManager targetsManager *targets_manager.TargetsManager outputsManager *outputs_manager.OutputsManager inputsManager *inputs_manager.InputsManager pipeline chan *pipeline.Msg wg *sync.WaitGroup logger *slog.Logger reg *prometheus.Registry profiler *pyroscope.Profiler } func New(ctx context.Context, configStore store.Store[any]) *Collector { s := collstore.NewStore(configStore) pipeline := make(chan *pipeline.Msg, defaultPipelineBufferSize) reg := prometheus.NewRegistry() clusterManager := cluster_manager.NewClusterManager(s) targetsManager := targets_manager.NewTargetsManager(ctx, s, pipeline, reg) outputsManager := outputs_manager.NewOutputsManager(ctx, s, pipeline, reg) inputsManager := inputs_manager.NewInputsManager(ctx, s, pipeline) apiServer := apiserver.NewServer( s, targetsManager, outputsManager, inputsManager, clusterManager, reg, ) c := &Collector{ ctx: ctx, store: s, apiServer: apiServer, clusterManager: clusterManager, targetsManager: targetsManager, outputsManager: outputsManager, inputsManager: inputsManager, pipeline: pipeline, wg: new(sync.WaitGroup), reg: reg, } return c } func (c *Collector) Start() error { c.logger = logging.NewLogger(c.store.Config, "component", "collector") var err error c.logger.Info("starting collector") // build locker for { err = c.getLocker() if err != nil { c.logger.Error("failed to get locker", "error", err) time.Sleep(initLockerRetryTimer) continue } break } if c.locker != nil { // start cluster manager err = c.clusterManager.Start(c.ctx, c.locker, c.wg) if err != nil { return err } } // create cache c.initCache() // start managers err = c.targetsManager.Start(c.locker, c.wg) if err != nil { return err } err = c.outputsManager.Start(c.cache, c.wg) if err != nil { return err } err = c.inputsManager.Start(c.wg) if err != nil { return err } // start API server err = c.apiServer.Start(c.locker, c.wg) if err != nil { return err } // wait for context done <-c.ctx.Done() // wait for all components to finish c.wg.Wait() return nil } func (c *Collector) Stop() { c.logger.Info("stopping collector") c.apiServer.Stop() c.clusterManager.Stop() c.targetsManager.Stop() c.outputsManager.Stop() c.inputsManager.Stop() } func (c *Collector) getLocker() error { clusteringMap, ok, err := c.store.Config.Get("clustering", "clustering") if err != nil { return err } if !ok { return nil } clustering, ok := clusteringMap.(*config.Clustering) if !ok { return errors.New("malformed clustering config") } if clustering == nil { return nil } if lockerType, ok := clustering.Locker["type"]; ok { c.logger.Info("starting locker", "type", lockerType) if initializer, ok := lockers.Lockers[lockerType.(string)]; ok { lock := initializer() err := lock.Init(c.ctx, clustering.Locker, lockers.WithLogger(log.New(os.Stdout, "", log.LstdFlags))) if err != nil { return err } c.locker = lock return nil } return fmt.Errorf("unknown locker type %q", lockerType) } return errors.New("missing locker type field") } func (c *Collector) CollectorPreRunE(cmd *cobra.Command, args []string) error { if len(args) > 0 { return fmt.Errorf("unknown command %q", args[0]) } pyroscopeServerAddress := cmd.Flag("pyroscope-server-address").Value.String() pyroscopeApplicationName := cmd.Flag("pyroscope-application-name").Value.String() if pyroscopeServerAddress == "" { return nil } var err error c.profiler, err = pyroscope.Start( pyroscope.Config{ ApplicationName: pyroscopeApplicationName, ServerAddress: pyroscopeServerAddress, ProfileTypes: []pyroscope.ProfileType{ pyroscope.ProfileInuseObjects, pyroscope.ProfileAllocObjects, pyroscope.ProfileInuseSpace, pyroscope.ProfileAllocSpace, pyroscope.ProfileGoroutines, pyroscope.ProfileMutexCount, pyroscope.ProfileMutexDuration, pyroscope.ProfileBlockCount, pyroscope.ProfileBlockDuration, }, }) if err != nil { return err } return nil } func (c *Collector) CollectorRunE(cmd *cobra.Command, _ []string) error { if c.profiler != nil { defer c.profiler.Stop() } return c.Start() } // InitSubscribeFlags used to init or reset subscribeCmd flags for gnmic-prompt mode func (c *Collector) InitCollectorFlags(cmd *cobra.Command) { cmd.ResetFlags() cmd.Flags().String("pyroscope-server-address", "", "Pyroscope server address") cmd.Flags().String("pyroscope-application-name", "gnmic-collector", "Pyroscope application name") } func (c *Collector) initCache() error { cfg, ok, err := c.store.Config.Get("gnmi-server", "gnmi-server") if err != nil { return err } if !ok { return nil } if cfg == nil { return nil } switch cfg := cfg.(type) { case *config.GNMIServer: if cfg == nil { return nil } if cfg.Cache == nil { return nil } c.cache, err = cache.New(cfg.Cache, cache.WithLogger(log.New(os.Stdout, "", log.LstdFlags))) if err != nil { return err } } return nil } ================================================ FILE: pkg/collector/env/env.go ================================================ package env import ( "os" "strconv" "strings" "github.com/openconfig/gnmic/pkg/config" ) func ExpandClusterEnv(clusteringConfig *config.Clustering) { clusteringConfig.ClusterName = os.ExpandEnv(clusteringConfig.ClusterName) clusteringConfig.InstanceName = os.ExpandEnv(clusteringConfig.InstanceName) clusteringConfig.ServiceAddress = os.ExpandEnv(clusteringConfig.ServiceAddress) for i := range clusteringConfig.Tags { clusteringConfig.Tags[i] = os.ExpandEnv(clusteringConfig.Tags[i]) } if clusteringConfig.TLS != nil { clusteringConfig.TLS.CaFile = os.ExpandEnv(clusteringConfig.TLS.CaFile) clusteringConfig.TLS.CertFile = os.ExpandEnv(clusteringConfig.TLS.CertFile) clusteringConfig.TLS.KeyFile = os.ExpandEnv(clusteringConfig.TLS.KeyFile) } if clusteringConfig.Locker != nil { expandLockerEnv(clusteringConfig.Locker) } } func ExpandAPIEnv(apiConfig *config.APIServer) { apiConfig.Address = os.ExpandEnv(apiConfig.Address) if apiConfig.TLS != nil { apiConfig.TLS.CaFile = os.ExpandEnv(apiConfig.TLS.CaFile) apiConfig.TLS.CertFile = os.ExpandEnv(apiConfig.TLS.CertFile) apiConfig.TLS.KeyFile = os.ExpandEnv(apiConfig.TLS.KeyFile) apiConfig.TLS.ClientAuth = os.ExpandEnv(apiConfig.TLS.ClientAuth) } apiConfig.EnableMetrics = os.ExpandEnv(strings.ToLower(strconv.FormatBool(apiConfig.EnableMetrics))) == "true" apiConfig.EnableProfiling = os.ExpandEnv(strings.ToLower(strconv.FormatBool(apiConfig.EnableProfiling))) == "true" apiConfig.Debug = os.ExpandEnv(strings.ToLower(strconv.FormatBool(apiConfig.Debug))) == "true" apiConfig.HealthzDisableLogging = os.ExpandEnv(strings.ToLower(strconv.FormatBool(apiConfig.HealthzDisableLogging))) == "true" } func expandLockerEnv(locker map[string]any) { expandMapEnv(locker) } func expandMapEnv(m map[string]any) { for f := range m { switch v := m[f].(type) { case string: m[f] = os.ExpandEnv(v) case map[string]any: expandMapEnv(v) m[f] = v case []any: for i, item := range v { switch item := item.(type) { case string: v[i] = os.ExpandEnv(item) case map[string]any: expandMapEnv(item) case []any: expandSliceEnv(item) } } m[f] = v } } } func expandSliceEnv(s []any) { for i, item := range s { switch item := item.(type) { case string: s[i] = os.ExpandEnv(item) case map[string]any: expandMapEnv(item) case []any: expandSliceEnv(item) } } } ================================================ FILE: pkg/collector/env/env_test.go ================================================ package env_test import ( "os" "testing" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/collector/env" "github.com/openconfig/gnmic/pkg/config" ) func TestExpandClusterEnv(t *testing.T) { // Set env vars for expansion tests; restore after. const testCluster = "test-cluster-name" const testInstance = "test-instance-01" const testAddr = "0.0.0.0:7890" const testTag = "region:us-east" const testCa = "/etc/ssl/ca.pem" const testCert = "/etc/ssl/cert.pem" const testKey = "/etc/ssl/key.pem" os.Setenv("GNMIC_CLUSTER", testCluster) os.Setenv("GNMIC_INSTANCE", testInstance) os.Setenv("GNMIC_ADDR", testAddr) os.Setenv("GNMIC_TAG", testTag) os.Setenv("GNMIC_CA", testCa) os.Setenv("GNMIC_CERT", testCert) os.Setenv("GNMIC_KEY", testKey) defer func() { os.Unsetenv("GNMIC_CLUSTER") os.Unsetenv("GNMIC_INSTANCE") os.Unsetenv("GNMIC_ADDR") os.Unsetenv("GNMIC_TAG") os.Unsetenv("GNMIC_CA") os.Unsetenv("GNMIC_CERT") os.Unsetenv("GNMIC_KEY") }() tests := []struct { name string clusteringConfig *config.Clustering validate func(t *testing.T, c *config.Clustering) }{ { name: "empty_config", clusteringConfig: &config.Clustering{}, validate: func(t *testing.T, c *config.Clustering) { if c.ClusterName != "" || c.InstanceName != "" || c.ServiceAddress != "" { t.Errorf("empty config should remain empty") } }, }, { name: "literal_strings_unchanged", clusteringConfig: &config.Clustering{ ClusterName: "my-cluster", InstanceName: "instance-1", ServiceAddress: ":7890", }, validate: func(t *testing.T, c *config.Clustering) { if c.ClusterName != "my-cluster" || c.InstanceName != "instance-1" || c.ServiceAddress != ":7890" { t.Errorf("literal strings should be unchanged") } }, }, { name: "cluster_fields_expanded", clusteringConfig: &config.Clustering{ ClusterName: "$GNMIC_CLUSTER", InstanceName: "$GNMIC_INSTANCE", ServiceAddress: "$GNMIC_ADDR", }, validate: func(t *testing.T, c *config.Clustering) { if c.ClusterName != testCluster || c.InstanceName != testInstance || c.ServiceAddress != testAddr { t.Errorf("got cluster=%q instance=%q addr=%q", c.ClusterName, c.InstanceName, c.ServiceAddress) } }, }, { name: "tags_expanded", clusteringConfig: &config.Clustering{ Tags: []string{"$GNMIC_TAG", "literal", "${GNMIC_TAG}"}, }, validate: func(t *testing.T, c *config.Clustering) { if len(c.Tags) != 3 { t.Fatalf("len(Tags)=%d", len(c.Tags)) } if c.Tags[0] != testTag || c.Tags[1] != "literal" || c.Tags[2] != testTag { t.Errorf("tags: got %q", c.Tags) } }, }, { name: "tls_nil_no_panic", clusteringConfig: &config.Clustering{ ClusterName: "c1", TLS: nil, }, validate: func(t *testing.T, c *config.Clustering) { if c.TLS != nil { t.Error("TLS should still be nil") } }, }, { name: "tls_paths_expanded", clusteringConfig: &config.Clustering{ ClusterName: "c1", TLS: &types.TLSConfig{ CaFile: "$GNMIC_CA", CertFile: "$GNMIC_CERT", KeyFile: "$GNMIC_KEY", }, }, validate: func(t *testing.T, c *config.Clustering) { if c.TLS == nil { t.Fatal("TLS should be set") } if c.TLS.CaFile != testCa || c.TLS.CertFile != testCert || c.TLS.KeyFile != testKey { t.Errorf("TLS paths: ca=%q cert=%q key=%q", c.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile) } }, }, { name: "locker_nil_no_panic", clusteringConfig: &config.Clustering{ ClusterName: "c1", Locker: nil, }, validate: func(t *testing.T, c *config.Clustering) { if c.Locker != nil { t.Error("Locker should still be nil") } }, }, { name: "locker_string_values_expanded", clusteringConfig: &config.Clustering{ ClusterName: "c1", Locker: map[string]any{ "type": "consul", "address": "$GNMIC_ADDR", "key": "literal", }, }, validate: func(t *testing.T, c *config.Clustering) { if c.Locker == nil { t.Fatal("Locker should be set") } if c.Locker["address"] != testAddr || c.Locker["key"] != "literal" { t.Errorf("locker: got %v", c.Locker) } }, }, { name: "locker_nested_map_expanded", clusteringConfig: &config.Clustering{ ClusterName: "c1", Locker: map[string]any{ "type": "consul", "opts": map[string]any{ "host": "$GNMIC_CLUSTER", }, }, }, validate: func(t *testing.T, c *config.Clustering) { opts, _ := c.Locker["opts"].(map[string]any) if opts == nil || opts["host"] != testCluster { t.Errorf("nested locker opts: got %v", c.Locker["opts"]) } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { env.ExpandClusterEnv(tt.clusteringConfig) if tt.validate != nil && tt.clusteringConfig != nil { tt.validate(t, tt.clusteringConfig) } }) } } func TestExpandAPIEnv(t *testing.T) { const testAddr = "127.0.0.1:7890" const testCa = "/api/ca.pem" const testCert = "/api/cert.pem" const testKey = "/api/key.pem" const testClientAuth = "require" os.Setenv("API_ADDR", testAddr) os.Setenv("API_CA", testCa) os.Setenv("API_CERT", testCert) os.Setenv("API_KEY", testKey) os.Setenv("API_CLIENT_AUTH", testClientAuth) defer func() { os.Unsetenv("API_ADDR") os.Unsetenv("API_CA") os.Unsetenv("API_CERT") os.Unsetenv("API_KEY") os.Unsetenv("API_CLIENT_AUTH") }() tests := []struct { name string apiConfig *config.APIServer validate func(t *testing.T, a *config.APIServer) }{ { name: "empty_config", apiConfig: &config.APIServer{}, validate: func(t *testing.T, a *config.APIServer) { if a.Address != "" { t.Errorf("Address should be empty, got %q", a.Address) } }, }, { name: "address_expanded", apiConfig: &config.APIServer{ Address: "$API_ADDR", }, validate: func(t *testing.T, a *config.APIServer) { if a.Address != testAddr { t.Errorf("Address: got %q", a.Address) } }, }, { name: "literal_address_unchanged", apiConfig: &config.APIServer{ Address: ":7890", }, validate: func(t *testing.T, a *config.APIServer) { if a.Address != ":7890" { t.Errorf("Address: got %q", a.Address) } }, }, { name: "tls_nil_no_panic", apiConfig: &config.APIServer{ Address: ":7890", TLS: nil, }, validate: func(t *testing.T, a *config.APIServer) { if a.TLS != nil { t.Error("TLS should still be nil") } }, }, { name: "tls_paths_and_client_auth_expanded", apiConfig: &config.APIServer{ Address: ":7890", TLS: &types.TLSConfig{ CaFile: "$API_CA", CertFile: "$API_CERT", KeyFile: "$API_KEY", ClientAuth: "$API_CLIENT_AUTH", }, }, validate: func(t *testing.T, a *config.APIServer) { if a.TLS == nil { t.Fatal("TLS should be set") } if a.TLS.CaFile != testCa || a.TLS.CertFile != testCert || a.TLS.KeyFile != testKey || a.TLS.ClientAuth != testClientAuth { t.Errorf("TLS: ca=%q cert=%q key=%q clientAuth=%q", a.TLS.CaFile, a.TLS.CertFile, a.TLS.KeyFile, a.TLS.ClientAuth) } }, }, { name: "bool_flags_unchanged_true", apiConfig: &config.APIServer{ EnableMetrics: true, EnableProfiling: true, Debug: true, HealthzDisableLogging: true, }, validate: func(t *testing.T, a *config.APIServer) { if !a.EnableMetrics || !a.EnableProfiling || !a.Debug || !a.HealthzDisableLogging { t.Errorf("bools true: metrics=%v profiling=%v debug=%v healthz=%v", a.EnableMetrics, a.EnableProfiling, a.Debug, a.HealthzDisableLogging) } }, }, { name: "bool_flags_unchanged_false", apiConfig: &config.APIServer{ EnableMetrics: false, EnableProfiling: false, Debug: false, HealthzDisableLogging: false, }, validate: func(t *testing.T, a *config.APIServer) { if a.EnableMetrics || a.EnableProfiling || a.Debug || a.HealthzDisableLogging { t.Errorf("bools false: metrics=%v profiling=%v debug=%v healthz=%v", a.EnableMetrics, a.EnableProfiling, a.Debug, a.HealthzDisableLogging) } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { env.ExpandAPIEnv(tt.apiConfig) if tt.validate != nil && tt.apiConfig != nil { tt.validate(t, tt.apiConfig) } }) } } ================================================ FILE: pkg/collector/managers/cluster/assigner.go ================================================ package cluster_manager import ( "bytes" "context" "encoding/json" "fmt" "io" "log/slog" "net/http" "time" apiconst "github.com/openconfig/gnmic/pkg/collector/api/const" collstore "github.com/openconfig/gnmic/pkg/collector/store" ) type Assignment struct { Target string `json:"target,omitempty"` Member string `json:"member,omitempty"` } type assignmentConfig struct { Assignments []*Assignment `json:"assignments"` Unassignments []string `json:"unassignments,omitempty"` } type Assigner interface { Assign(ctx context.Context, targetToMember map[string]*Member) error Unassign(ctx context.Context, member *Member, target ...string) error } const ( httpScheme = "http" httpsScheme = "https" protocolLabel = "__protocol" ) type restAssigner struct { client *http.Client store *collstore.Store logger *slog.Logger } func NewAssigner(store *collstore.Store) Assigner { return &restAssigner{ store: store, logger: slog.With("component", "assignment-pusher"), client: &http.Client{ Timeout: 10 * time.Second, }, } } func (p *restAssigner) Assign(ctx context.Context, targetToMember map[string]*Member) error { // TODO: group by address for targetName, member := range targetToMember { if member == nil || member.Address == "" { p.logger.Warn("member is nil or address is empty", "target", targetName, "member", member) continue } scheme := GetAPIScheme(member) address := scheme + "://" + member.Address + apiconst.AssignmentsAPIv1URL err := p.assignOne(ctx, address, []*Assignment{ { Target: targetName, Member: member.ID, // Epoch: epoch }, }) if err != nil { return err } } return nil } func (p *restAssigner) assignOne(ctx context.Context, address string, assignmentSet []*Assignment) error { b, err := json.Marshal(&assignmentConfig{Assignments: assignmentSet}) if err != nil { return err } req, err := http.NewRequestWithContext(ctx, http.MethodPost, address, bytes.NewReader(b)) if err != nil { return err } req.Header.Set("Content-Type", "application/json") resp, err := p.client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode/100 != 2 { body, _ := io.ReadAll(resp.Body) p.logger.Error("failed to assign", "address", address, "assignmentSet", assignmentSet, "status", resp.Status, "body", string(body)) return fmt.Errorf("assign: %s", resp.Status) } return nil } func (p *restAssigner) Unassign(ctx context.Context, member *Member, target ...string) error { if member == nil || member.Address == "" { return fmt.Errorf("member is nil or address is empty") } scheme := GetAPIScheme(member) address := scheme + "://" + member.Address + apiconst.AssignmentsAPIv1URL body, err := json.Marshal(&assignmentConfig{Unassignments: target}) if err != nil { return err } req, err := http.NewRequestWithContext(ctx, http.MethodPost, address, bytes.NewReader(body)) if err != nil { return err } req.Header.Set("Content-Type", "application/json") resp, err := p.client.Do(req) if err != nil { return err } body, err = io.ReadAll(resp.Body) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode/100 != 2 { return fmt.Errorf("unassign: %s: %s", resp.Status, string(body)) } return nil } ================================================ FILE: pkg/collector/managers/cluster/cluster_manager.go ================================================ package cluster_manager import ( "context" "errors" "fmt" "log/slog" "maps" "net" "net/http" "path" "sort" "strconv" "sync" "sync/atomic" "time" "golang.org/x/sync/semaphore" apiconst "github.com/openconfig/gnmic/pkg/collector/api/const" "github.com/openconfig/gnmic/pkg/collector/env" collstore "github.com/openconfig/gnmic/pkg/collector/store" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/lockers" "github.com/openconfig/gnmic/pkg/logging" "github.com/zestor-dev/zestor/store" ) const ( protocolTagName = "__protocol" retryRegistrationBackoff = 2 * time.Second ) type ClusterManager struct { store *collstore.Store clusteringConfig *config.Clustering apiConfig *config.APIServer locker lockers.Locker election Election recampaignCooldown atomic.Int64 membership Membership assigner Assigner lockCheckLimiter chan struct{} // semaphore to limit the number of concurrent rebalancing operations (to 1) rebalancingSem *semaphore.Weighted apiClient *http.Client mm *sync.RWMutex members map[string]*Member logger *slog.Logger wg *sync.WaitGroup cfn context.CancelFunc } func NewClusterManager(store *collstore.Store) *ClusterManager { return &ClusterManager{ store: store, mm: new(sync.RWMutex), members: make(map[string]*Member), locker: nil, lockCheckLimiter: make(chan struct{}, 64), // TODO: make this configurable rebalancingSem: semaphore.NewWeighted(1), apiClient: &http.Client{Timeout: 10 * time.Second}, // TODO: } } func (c *ClusterManager) Start(ctx context.Context, locker lockers.Locker, wg *sync.WaitGroup) error { c.locker = locker c.logger = logging.NewLogger(c.store.Config, "component", "cluster-manager") ctx, cfn := context.WithCancel(ctx) c.cfn = cfn c.wg = wg //get clustring config from store clusteringConfig, ok, err := c.store.Config.Get("clustering", "clustering") if err != nil { return err } if !ok { return nil } clustering, ok := clusteringConfig.(*config.Clustering) if !ok { return nil } if clustering == nil { return nil } c.clusteringConfig = clustering env.ExpandClusterEnv(c.clusteringConfig) apiConfig, ok, err := c.store.Config.Get("api-server", "api-server") if err != nil { return err } if !ok { return nil } api, ok := apiConfig.(*config.APIServer) if !ok { return errors.New("missing api-server config when clustring is enabled") } if api == nil { return errors.New("missing api-server config when clustring is enabled") } c.apiConfig = api env.ExpandAPIEnv(c.apiConfig) c.logger.Info("starting cluster manager") c.election, err = NewElection(c.locker, clustering, c.logger) if err != nil { return err } c.membership = NewMembership(c.locker, clustering, c.logger) c.assigner = NewAssigner(c.store) // start registration to register the api service wg.Add(1) go func() { defer wg.Done() for { if ctx.Err() != nil { return } if err := c.startRegistration(ctx); err != nil { c.logger.Error("registration failed", "err", err) select { case <-ctx.Done(): return case <-time.After(retryRegistrationBackoff): continue } } // startRegistration should block until ctx.Done(); when it returns, exit. return } }() wg.Add(1) // run election campaign to grab the leader lock // when grabbed, start leader duties go func() { defer wg.Done() err := c.runCampaign(ctx) if err != nil { c.logger.Error("runCampaign exited with error", "error", err) } }() return nil } func (c *ClusterManager) Stop() error { if c.cfn != nil { c.cfn() } return nil } func (c *ClusterManager) runCampaign(ctx context.Context) error { backoff := time.Second for { if ctx.Err() != nil { return nil } // Cooldown after an API triggered withdraw if wait := c.recampaignCooldown.Load(); wait > 0 { c.logger.Info("waiting for cooldown", "cooldown", time.Duration(wait)) select { case <-ctx.Done(): return nil case <-time.After(time.Duration(wait)): } // reset c.recampaignCooldown.Store(0) } term, err := c.election.Campaign(ctx) if err != nil { if ctx.Err() != nil { return nil } c.logger.Error("failed to campaign", "error", err) time.Sleep(backoff) continue } c.logger.Info("became leader", "term", term, "node", c.clusteringConfig.InstanceName, "cluster", c.clusteringConfig.ClusterName) // Leader session context leaderCtx, leaderCancel := context.WithCancel(ctx) cancelLeader := func() { leaderCancel() // TODO: any extra cleanups? } // Start leader duties go func() { if err := c.runLeader(leaderCtx); err != nil && leaderCtx.Err() == nil { c.logger.Error("runLeader exited with error", "err", err) } }() // this blocks until leadership is lost or we’re shutting down. lost := c.election.Observe(ctx) if lost != nil { select { case <-ctx.Done(): cancelLeader() return nil case <-lost: c.logger.Warn("leadership lost", "term", term) cancelLeader() } } else { // Shouldn't happen c.logger.Warn("Observe returned nil channel; cancelling leader") cancelLeader() } time.Sleep(backoff) // small backoff before campaigning again } } func (c *ClusterManager) startRegistration(ctx context.Context) error { c.logger.Info("starting registration", "address", c.apiConfig.Address) addr, port, _ := net.SplitHostPort(c.apiConfig.Address) p, _ := strconv.Atoi(port) tags := make([]string, 0, 2+len(c.clusteringConfig.Tags)) tags = append(tags, fmt.Sprintf("cluster-name=%s", c.clusteringConfig.ClusterName)) tags = append(tags, fmt.Sprintf("instance-name=%s", c.clusteringConfig.InstanceName)) if c.apiConfig.TLS != nil { tags = append(tags, protocolTagName+"=https") } else { tags = append(tags, protocolTagName+"=http") } tags = append(tags, c.clusteringConfig.Tags...) address := c.clusteringConfig.ServiceAddress if address == "" { address = addr } deregister, err := c.membership.Register(ctx, c.clusteringConfig.ClusterName, &Registration{ ID: c.clusteringConfig.InstanceName, Address: address, Port: p, Labels: tags, }) defer deregister() if err != nil { return err } return nil } // runLeader executes as long as this node is the elected leader. // It continuously reconciles cluster state: assigns targets to nodes, // verifies ownership via locks, and updates assignments in the store. func (c *ClusterManager) runLeader(ctx context.Context) error { c.logger.Info("starting leader duties") select { case <-ctx.Done(): return ctx.Err() case <-time.After(c.clusteringConfig.LeaderWaitTimer): break } // watch membership (other nodes joining/leaving) membersCh, cancelMembers, err := c.membership.Watch(ctx) if err != nil { return fmt.Errorf("failed to start watching membership: %w", err) } defer cancelMembers() // watch targets targetsCh, cancelTargets, err := c.store.Config.Watch("targets") // no initial replay if err != nil { return fmt.Errorf("failed to watch targets: %w", err) } defer cancelTargets() // ticker for periodic reconciliation of target assignments targetsWatchTicker := time.NewTicker(c.clusteringConfig.TargetsWatchTimer) defer targetsWatchTicker.Stop() // intial membership sync members, err := c.membership.GetMembers(ctx) if err != nil { c.logger.Error("failed to get members", "error", err) //log error but continue } else { // initial reconcile c.mm.Lock() c.members = members c.mm.Unlock() if err := c.reconcileAssignments(ctx, members); err != nil { c.logger.Error("reconcile assignments failed", "error", err) } } for { select { case <-ctx.Done(): c.logger.Info("stopping leader duties") return nil case members, ok := <-membersCh: if !ok { c.logger.Warn("membership watcher closed") // happens only when explicitly closed return nil } c.logger.Info("membership update", "members", members) c.mm.Lock() c.members = members c.mm.Unlock() case targets, ok := <-targetsCh: if !ok { c.logger.Warn("targets watcher closed") return nil } c.logger.Info("targets update", "targets", targets) switch targets.EventType { case store.EventTypeCreate: err := c.handleTargetCreate(ctx, targets.Name) if err != nil { c.logger.Error("failed to handle target create", "target", targets.Name, "error", err) } // case store.EventTypeUpdate: // c.handleTarget(ctx, targets.Name) case store.EventTypeDelete: err := c.handleTargetDelete(ctx, targets.Name) if err != nil { c.logger.Error("failed to handle target delete", "target", targets.Name, "error", err) } } case <-targetsWatchTicker.C: // periodic reconciliation of target assignments members := c.snapshotMembers() if len(members) == 0 { c.logger.Warn("no members, skipping reconciliation") continue } if err := c.reconcileAssignments(ctx, members); err != nil { c.logger.Error("reconcile assignments failed", "error", err) } } } } func (c *ClusterManager) WithdrawLeader(ctx context.Context, cooldown time.Duration) error { c.recampaignCooldown.Store(cooldown.Nanoseconds()) return c.election.Withdraw() } func (c *ClusterManager) IsLeader(ctx context.Context) (bool, error) { leader, err := c.GetLeaderName(ctx) if err != nil { return false, err } return leader == c.clusteringConfig.InstanceName, nil } func (c *ClusterManager) snapshotMembers() map[string]*Member { c.mm.RLock() defer c.mm.RUnlock() members := make(map[string]*Member, len(c.members)) maps.Copy(members, c.members) return members } func (c *ClusterManager) reconcileAssignments(ctx context.Context, members map[string]*Member) error { // 1. List all known targets targets, err := c.store.Config.Keys("targets") if err != nil { return err } if len(targets) == 0 { c.logger.Info("no targets, skipping reconciliation") return nil } // 2. get current assignments from locker // target -> holder currentAssignments, err := c.getAssignments(ctx) if err != nil { return err } c.logger.Debug("current assignments", "assignments", currentAssignments) membersLoad, err := c.getMembersLoad(ctx) if err != nil { return err } c.logger.Debug("reconcile assignments members with load", "members", members) quotas := calculateMembersQuota(members, int64(len(targets))) c.logger.Debug("reconcile assignments quotas", "quotas", quotas) // 3. Decide assignments assignments := make(map[string]*Member) // targetName -> member for _, tName := range targets { c.logger.Info("reconciling target", "target", tName) currentHolder, ok := currentAssignments[tName] if ok { if m, ok := members[currentHolder.ID]; ok { c.logger.Info("target already assigned to member", "target", tName, "member", m.ID) assignments[tName] = m continue } else { c.logger.Warn("target lock holder not found", "target", tName, "holder", currentHolder) } } assigned := pickAssignee(tName, members, quotas, membersLoad) if assigned == nil { c.logger.Warn("no assignee found for target", "target", tName) continue } c.logger.Info("assigning target", "target", tName, "assignee", assigned.ID) assignments[tName] = assigned } // 4. Publish assignments with assigner err = c.assigner.Assign(ctx, assignments) if err != nil { c.logger.Error("failed to push assignments", "count", len(assignments), "error", err) } // 5. Optionally verify active locks on assigned targets for tName, member := range assignments { c.asyncVerifyLock(ctx, tName, member.ID, time.Now().Add(5*time.Second)) } return nil } func (c *ClusterManager) handleTargetCreate(ctx context.Context, target string, deniedMembers ...string) error { // 1. get current members with Load populated currentMembers, err := c.GetMembers(ctx) if err != nil { return err } c.logger.Debug("current members", "currentMembers", currentMembers) for _, m := range deniedMembers { delete(currentMembers, m) } return c.assignTarget(ctx, target, currentMembers) } // assignTarget assigns a target to the least loaded member. // This is used when a new target is created or when a member is drained from its targets. func (c *ClusterManager) assignTarget(ctx context.Context, target string, currentMembers map[string]*Member) error { // 2. find least loaded member leastLoadedMember := c.getLeastLoadedMember(currentMembers) if leastLoadedMember == nil { return fmt.Errorf("no least loaded member found") } // 3. assign target to member err := c.assigner.Assign(ctx, map[string]*Member{target: leastLoadedMember}) if err != nil { c.logger.Error("failed to push assignment", "target", target, "error", err) return err } leastLoadedMember.Load++ c.asyncVerifyLock(context.Background(), target, leastLoadedMember.ID, time.Now().Add(5*time.Second)) return nil } func (c *ClusterManager) getAssignments(ctx context.Context) (map[string]*Member, error) { currentAssignments, err := c.locker.List(ctx, targetsLockPrefix(c.clusteringConfig.ClusterName)) if err != nil { return nil, err } res := make(map[string]*Member) c.mm.RLock() defer c.mm.RUnlock() for tName, memberName := range currentAssignments { // normalize targetName targetName := path.Base(tName) if m, ok := c.members[memberName]; ok { res[targetName] = m } else { // TODO: unknwon member ? c.logger.Warn("found unknown member in current assignments", "member", memberName) } } return res, nil } func (c *ClusterManager) getAssignment(ctx context.Context, target string) *Member { member, ok := c.targetLockHolder(ctx, target) if !ok { return nil } return member } // GetMembers returns all members in the cluster. // Populates the Load field with the number of locked targets. func (c *ClusterManager) GetMembers(ctx context.Context) (map[string]*Member, error) { currentMembers := c.snapshotMembers() if len(currentMembers) == 0 { return nil, fmt.Errorf("no members found") } return c.populateMemberLoad(ctx, currentMembers) } func (c *ClusterManager) populateMemberLoad(ctx context.Context, members map[string]*Member) (map[string]*Member, error) { currentAssignments, err := c.locker.List(ctx, targetsLockPrefix(c.clusteringConfig.ClusterName)) if err != nil { return nil, err } res := make(map[string]*Member) // seed res with current members for _, m := range members { res[m.ID] = &Member{ ID: m.ID, Address: m.Address, Labels: m.Labels, Load: 0, Targets: nil, } } for tName, memberName := range currentAssignments { // normalize targetName targetName := path.Base(tName) if m, ok := members[memberName]; ok { if am, ok := res[memberName]; ok { am.Load++ am.Targets = append(am.Targets, targetName) } else { am := &Member{ ID: m.ID, Address: m.Address, Labels: m.Labels, Load: 1, Targets: []string{targetName}, } res[memberName] = am } } else { c.logger.Warn("found unknown member in current assignments", "member", memberName) } } return res, nil } func (c *ClusterManager) getMembersLoad(ctx context.Context) (map[string]int64, error) { currentAssignments, err := c.locker.List(ctx, targetsLockPrefix(c.clusteringConfig.ClusterName)) if err != nil { return nil, err } res := make(map[string]int64) for _, memberName := range currentAssignments { _, ok := res[memberName] if !ok { res[memberName] = 0 } res[memberName]++ } return res, nil } func (c *ClusterManager) getLeastLoadedMember(assignments map[string]*Member) *Member { var leastLoadedMember *Member for _, member := range assignments { if leastLoadedMember == nil { leastLoadedMember = member continue } if member.Load < leastLoadedMember.Load { leastLoadedMember = member } } return leastLoadedMember } func (c *ClusterManager) getMostLoadedMember(members map[string]*Member) *Member { var mostLoadedMember *Member for _, member := range members { if mostLoadedMember == nil { mostLoadedMember = member } if member.Load > mostLoadedMember.Load { mostLoadedMember = member } } return mostLoadedMember } func (c *ClusterManager) handleTargetDelete(ctx context.Context, target string) error { // find target assignment assignedTo := c.getAssignment(ctx, target) if assignedTo == nil { return fmt.Errorf("target is not assigned to any member") } err := c.assigner.Unassign(ctx, assignedTo, target) if err != nil { c.logger.Error("failed to unassign target", "target", target, "error", err) return err } // delete from other instances c.mm.RLock() members := make(map[string]*Member, len(c.members)) maps.Copy(members, c.members) c.mm.RUnlock() // delete self from members since the initial trigger for this function is the target delete event delete(members, assignedTo.ID) err = c.deleteTargetFromMembers(ctx, target, members) if err != nil { c.logger.Error("failed to delete target from members", "target", target, "error", err) return err } // TODO: verify ? return nil } func (c *ClusterManager) deleteTargetFromMembers(ctx context.Context, target string, members map[string]*Member) error { for _, member := range members { address := getMemberAddress(member) url := address + apiconst.TargetsConfigAPIv1URL + "/" + target req, err := http.NewRequestWithContext(ctx, http.MethodDelete, url, nil) if err != nil { c.logger.Error("failed to create request", "error", err) return err } resp, err := c.apiClient.Do(req) if err != nil { c.logger.Error("failed to delete target", "error", err) return err } resp.Body.Close() if resp.StatusCode/100 != 2 { c.logger.Error("failed to delete target", "error", resp.Status) return fmt.Errorf("failed to delete target: %s", resp.Status) } } return nil } func (c *ClusterManager) targetLockHolder(ctx context.Context, target string) (*Member, bool) { holder, ok := holder(ctx, c.locker, targetLockKey(target, c.clusteringConfig.ClusterName)) if !ok || holder == "" { return nil, false } c.mm.RLock() defer c.mm.RUnlock() member, ok := c.members[holder] if ok { return member, true } return nil, false } func (c *ClusterManager) GetInstanceToTargetsMapping(ctx context.Context) (map[string][]string, error) { locks, err := c.locker.List(ctx, targetsLockPrefix(c.clusteringConfig.ClusterName)) if err != nil { return nil, err } rs := make(map[string][]string) for k, v := range locks { if _, ok := rs[v]; !ok { rs[v] = make([]string, 0) } rs[v] = append(rs[v], path.Base(k)) } for _, ls := range rs { sort.Strings(ls) } return rs, nil } func (c *ClusterManager) GetLeaderName(ctx context.Context) (string, error) { leaderKey := fmt.Sprintf("gnmic/%s/leader", c.clusteringConfig.ClusterName) leader, err := c.locker.List(ctx, leaderKey) if err != nil { return "", err } if len(leader) == 0 { return "", nil } return leader[leaderKey], nil } func (c *ClusterManager) DrainMember(ctx context.Context, toBeDrained string) error { members, err := c.GetMembers(ctx) if err != nil { return err } c.logger.Info("members", "members", members) memberToDrain, ok := members[toBeDrained] if !ok { return fmt.Errorf("member to drain not found") } if memberToDrain == nil { return fmt.Errorf("member to drain not found") } if len(memberToDrain.Targets) == 0 { c.logger.Info("member has no targets", "member", toBeDrained) return nil } c.logger.Info("draining member", "member", toBeDrained) c.logger.Info("unassigning targets", "targets", memberToDrain.Targets) err = c.assigner.Unassign(ctx, memberToDrain, memberToDrain.Targets...) if err != nil { c.logger.Error("failed to unassign targets", "member", toBeDrained, "error", err) return err } c.logger.Info("unassigned targets", "targets", memberToDrain.Targets) c.logger.Info("deleting member", "member", toBeDrained) delete(members, toBeDrained) for _, t := range memberToDrain.Targets { c.assignTarget(ctx, t, members) } return nil } func (c *ClusterManager) asyncVerifyLock(ctx context.Context, target, expectHolder string, deadline time.Time) { select { case c.lockCheckLimiter <- struct{}{}: // acquire semaphore case <-ctx.Done(): return } go func() { defer func() { <-c.lockCheckLimiter // release semaphore }() key := targetLockKey(target, c.clusteringConfig.ClusterName) for { if ctx.Err() != nil || time.Now().After(deadline) { c.logger.Info("lock not observed before deadline", "target", target, "expect", expectHolder) return } holder, ok := holder(ctx, c.locker, key) if ok && holder == expectHolder { c.logger.Info("lock observed", "target", target, "holder", holder) return } select { case <-ctx.Done(): return case <-time.After(200 * time.Millisecond): } } }() } ================================================ FILE: pkg/collector/managers/cluster/election.go ================================================ package cluster_manager import ( "context" "errors" "fmt" "log/slog" "sync" "sync/atomic" "time" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/lockers" ) const ( // campaignPeriod = 1 * time.Second recampaignBackoff = 200 * time.Millisecond recampaignJitterRatio = 0.2 ) type Election interface { // Blocks until this node becomes leader (i.e., acquires the leader lock) or ctx is done. // Returns a monotonically increasing term for observability/metrics. Campaign(ctx context.Context) (term int64, err error) // Closes when leadership is lost (or returns nil if you don't need it). Observe(ctx context.Context) <-chan struct{} // closes/receives when leadership is lost (optional: return nil if N/A) // Withdraw withdraws from the leader position Withdraw() error } type election struct { nodeID string clusterName string RenewEvery time.Duration // renew every (e.g., 1/2 of TTL) locker lockers.Locker logger *slog.Logger // // internals term atomic.Int64 held atomic.Bool loseOnce sync.Once loseCh chan struct{} cancelKeepAlive context.CancelFunc // backend-specific release fn for the held lock releaseFn func() error mu sync.Mutex } func NewElection(locker lockers.Locker, clustering *config.Clustering, logger *slog.Logger) (Election, error) { var renewEvery time.Duration sTTL, ok := clustering.Locker["session-ttl"] if ok { switch st := sTTL.(type) { case string: var err error renewEvery, err = time.ParseDuration(st) if err != nil { return nil, err } if renewEvery <= 0 { return nil, errors.New("session-ttl must be greater than 0") } renewEvery = renewEvery / 2 default: return nil, errors.New("session-ttl must be a string") } } else { renewEvery = 5 * time.Second } return &election{ locker: locker, nodeID: clustering.InstanceName, clusterName: clustering.ClusterName, RenewEvery: renewEvery, logger: logger, }, nil } func (e *election) Campaign(ctx context.Context) (term int64, err error) { e.logger.Info("campaigning for leader", "node", e.nodeID, "cluster", e.clusterName) // reinitialize loseCh for this term e.mu.Lock() e.loseOnce = sync.Once{} e.loseCh = make(chan struct{}) e.mu.Unlock() key := e.leaderKey() // try lock // keep trying until ctx canceled ticker := time.NewTimer(0) // fire immediately first time defer ticker.Stop() for { if !ticker.Stop() { select { case <-ticker.C: default: } } e.logger.Info("trying to acquire leader lock", "node", e.nodeID, "cluster", e.clusterName, "term", term) // Try to acquire the leader lock ok, release, err := tryAcquire(ctx, e.locker, key, []byte(e.nodeID)) if err != nil { e.logger.Error("failed to acquire leader lock", "node", e.nodeID, "cluster", e.clusterName, "term", term, "error", err) // locker error... backoff a bit delay := jittered(recampaignBackoff) ticker.Reset(delay) select { case <-ctx.Done(): return 0, ctx.Err() case <-ticker.C: continue } } if ok { e.logger.Info("acquired leader lock", "node", e.nodeID, "cluster", e.clusterName, "term", term) // I'm the captain now! e.mu.Lock() e.releaseFn = release e.mu.Unlock() e.held.Store(true) term := e.term.Add(1) // start keepalive loop bound to this leadership session keepCtx, cancel := context.WithCancel(ctx) e.cancelKeepAlive = cancel go e.keepalive(keepCtx, key) return term, nil } e.logger.Info("not acquired leader lock", "node", e.nodeID, "cluster", e.clusterName, "term", term) // lock not acquired, add a jitter and retry delay := jittered(recampaignBackoff) ticker.Reset(delay) select { case <-ctx.Done(): return 0, ctx.Err() case <-ticker.C: continue } } } // Observe closes when this node loses leadership. // (Safe to call multiple times, same channel is returned.) func (e *election) Observe(ctx context.Context) <-chan struct{} { e.mu.Lock() ch := e.loseCh e.mu.Unlock() return ch } func (e *election) Withdraw() error { if !e.held.Load() { return nil } e.mu.Lock() release := e.releaseFn e.releaseFn = nil cancel := e.cancelKeepAlive e.cancelKeepAlive = nil e.mu.Unlock() if cancel != nil { cancel() } if release != nil { _ = release() // ignore error } // signal loss e.loseOnce.Do(func() { e.held.Store(false) if e.loseCh != nil { close(e.loseCh) } }) e.logger.Info("leadership withdrawn", "term", e.term.Load(), "node", e.nodeID, "cluster", e.clusterName) return nil } func (e *election) leaderKey() string { return fmt.Sprintf("gnmic/%s/leader", e.clusterName) } // keepalive periodically renews the lock and detects loss. // On failure (or if the holder changes), it signals loss and cleans up. func (e *election) keepalive(ctx context.Context, key string) { t := time.NewTicker(e.RenewEvery) defer t.Stop() e.logger.Info("starting keepalive loop", "node", e.nodeID, "cluster", e.clusterName, "term", e.term.Load()) for { select { case <-ctx.Done(): return case <-t.C: e.logger.Info("renewing leader lock", "node", e.nodeID, "cluster", e.clusterName, "term", e.term.Load()) // Renew our lease,if that fails or another node took over, we lost leadership. if err := renew(ctx, e.locker, key, []byte(e.nodeID)); err != nil { e.signalLoss() return } e.logger.Info("renewed leader lock", "node", e.nodeID, "cluster", e.clusterName, "term", e.term.Load()) if h, ok := holder(ctx, e.locker, key); ok && h != e.nodeID { // someone else is now the holder → we lost e.signalLoss() return } } } } func (e *election) signalLoss() { // release our lock (best effort) e.mu.Lock() release := e.releaseFn e.releaseFn = nil e.mu.Unlock() if release != nil { _ = release() // ignore error, we already lost } // stop renew loop if e.cancelKeepAlive != nil { e.cancelKeepAlive() } // signal once e.loseOnce.Do(func() { e.held.Store(false) close(e.loseCh) }) e.logger.Warn("lost leadership", "term", e.term.Load(), "node", e.nodeID, "cluster", e.clusterName) } // tryAcquire tries to acquire key with value=holder and TTL. // Returns (true, releaseFn, nil) if acquired, (false, nil, nil) if not acquired, or (false, nil, err) on backend error. func tryAcquire(ctx context.Context, lk lockers.Locker, key string, holder []byte) (bool, func() error, error) { // Lock() attempts to acquire the lock, it returns (true,nil) if successful, // (false,nil) if already locked, or (false,err) if backend error. ok, err := lk.Lock(ctx, key, holder) if err != nil { return false, nil, err } if !ok { // someone else already holds the lock return false, nil, nil } // Start a keepalive session for this lock. kaCtx, cancel := context.WithCancel(context.Background()) doneCh, errCh := lk.KeepLock(kaCtx, key) // Release function closes keepalive and unlocks. release := func() error { cancel() // drain both channels to avoid goroutine leaks select { case <-doneCh: default: } select { case <-errCh: default: } return lk.Unlock(context.Background(), key) } // Background watcher: if KeepLock fails (err or done), cancel leadership early. go func() { select { case <-doneCh: // Lock lost gracefully (KeepLock closed) cancel() case err := <-errCh: // Renewal failed or backend issue _ = err cancel() case <-kaCtx.Done(): } }() return true, release, nil } // renew refreshes the TTL for a lock we hold. func renew(ctx context.Context, lk lockers.Locker, key string, holder []byte) error { // In this Locker API, TTL renewals are managed by KeepLock(). // So "renew" doesn’t need to explicitly refresh, just check if lock is still held. held, err := lk.IsLocked(ctx, key) if err != nil { return err } if !held { return fmt.Errorf("lock %q lost", key) } return nil } // holder returns current holder id (stringified from value) if locked. func holder(ctx context.Context, lk lockers.Locker, key string) (string, bool) { m, err := lk.List(ctx, key) if err != nil { return "", false } // The Locker.List returns map[string]string{ lockName -> holderID } if len(m) == 0 { return "", false } if v, ok := m[key]; ok { return v, true } return "", false } ================================================ FILE: pkg/collector/managers/cluster/membership.go ================================================ package cluster_manager import ( "context" "encoding/json" "fmt" "log/slog" "time" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/lockers" ) const ( apiServiceName = "gnmic-api" ) type Membership interface { Register(ctx context.Context, clusterName string, self *Registration) (func() error, error) GetMembers(ctx context.Context) (map[string]*Member, error) Watch(ctx context.Context) (<-chan map[string]*Member, func(), error) } type Registration struct { ID string // instance ID Name string // service Name Address string // service Address Port int //service port Labels []string // labels/tags list } type Member struct { ID string `json:"id,omitempty"` Address string `json:"address,omitempty"` Labels []string `json:"labels,omitempty"` Load int64 `json:"load,omitempty"` // populated by the cluster manager based on lock count Targets []string `json:"targets,omitempty"` } func (m *Member) String() string { b, _ := json.Marshal(m) return string(b) } type membership struct { locker lockers.Locker logger *slog.Logger // clusterName string config *config.Clustering } func NewMembership(locker lockers.Locker, config *config.Clustering, logger *slog.Logger) Membership { return &membership{locker: locker, logger: logger, config: config} } func (m *membership) GetMembers(ctx context.Context) (map[string]*Member, error) { members := make(map[string]*Member) srvs, err := m.locker.GetServices(ctx, m.serviceName(), nil) if err != nil { return nil, err } for _, srv := range srvs { members[srv.ID] = &Member{ ID: srv.ID, Address: srv.Address, Labels: srv.Tags, } } return members, nil } func (m *membership) Watch(ctx context.Context) (<-chan map[string]*Member, func(), error) { lockerCh := make(chan []*lockers.Service) ctx, cancel := context.WithCancel(ctx) serviceName := m.serviceName() m.logger.Info("watching services", "serviceName", serviceName) go m.locker.WatchServices(ctx, serviceName, []string{"cluster-name=" + m.config.ClusterName}, lockerCh, m.config.ServicesWatchTimer) ch := make(chan map[string]*Member) go func() { defer cancel() for { select { case <-ctx.Done(): return case srvs, ok := <-lockerCh: if !ok { return } members := make(map[string]*Member) for _, srv := range srvs { members[srv.ID] = &Member{ID: srv.ID, Address: srv.Address, Labels: srv.Tags} } select { case <-ctx.Done(): return case ch <- members: } } } }() return ch, func() { cancel() close(ch) }, nil } func (m *membership) Register(ctx context.Context, clusterName string, self *Registration) (func() error, error) { ctx, cancel := context.WithCancel(ctx) err := m.locker.Register(ctx, &lockers.ServiceRegistration{ ID: self.ID, Name: fmt.Sprintf("%s-%s", clusterName, apiServiceName), Address: self.Address, Port: self.Port, Tags: self.Labels, TTL: 5 * time.Second, // TODO: make this configurable }) return func() error { cancel() return m.locker.Deregister(self.ID) }, err } func (m *membership) serviceName() string { return fmt.Sprintf("%s-%s", m.config.ClusterName, apiServiceName) } ================================================ FILE: pkg/collector/managers/cluster/placement.go ================================================ package cluster_manager import ( "hash/fnv" ) // pickAssignee selects the best Member to assign a target to, based on current quota and load. // It chooses the member with the most available quota (quota - load), and uses tieBreak for deterministic selection when tied. // Updates the membersLoad map to reflect the new assignment and returns the selected Member. func pickAssignee( targetName string, members map[string]*Member, quotas map[string]int64, membersLoad map[string]int64, ) *Member { if len(members) == 0 { return nil } var pick *Member highestFreeQuota := int64(-1 << 62) for _, m := range members { s := quotas[m.ID] - membersLoad[m.ID] if s > highestFreeQuota || (s == highestFreeQuota && tieBreak(targetName, m.ID, pick.ID)) { pick = m highestFreeQuota = s } } membersLoad[pick.ID]++ return pick } // tieBreak deterministically chooses between two members for assignment by hashing the combination of targetName and memberID using FNV-1a. // If the hashes are equal, it resorts to lexicographical comparison of the member IDs. func tieBreak(targetName, memberA, memberB string) bool { if memberB == "" { return true } ha := fnv64(targetName + memberA) hb := fnv64(targetName + memberB) if ha == hb { return memberA < memberB } return ha < hb } // fnv64 computes the FNV-1a 64-bit hash for a given string. func fnv64(s string) uint64 { h := fnv.New64a() _, _ = h.Write([]byte(s)) return h.Sum64() } ================================================ FILE: pkg/collector/managers/cluster/rebalance.go ================================================ package cluster_manager import ( "context" "fmt" "math/rand" "sort" "time" ) func (c *ClusterManager) RebalanceTargets(ctx context.Context) error { members, err := c.GetMembers(ctx) if err != nil { return err } if len(members) < 2 { return fmt.Errorf("no members or only one member found") } c.logger.Debug("members", "members", members) // get most loaded and least loaded mostLoadedMember := c.getMostLoadedMember(members) if mostLoadedMember == nil { return fmt.Errorf("count not determine most loaded member") } leastLoadedMember := c.getLeastLoadedMember(members) if leastLoadedMember == nil { return fmt.Errorf("count not determine least loaded member") } c.logger.Debug("mostLoadedMember", "mostLoadedMember", mostLoadedMember) c.logger.Debug("leastLoadedMember", "leastLoadedMember", leastLoadedMember) // decide if rebalancing is needed // if not, return diff := mostLoadedMember.Load - leastLoadedMember.Load if diff < 2 { c.logger.Info("rebalancing is not needed", "mostLoadedMember", mostLoadedMember.ID, "mostLoadedMemberLoad", mostLoadedMember.Load, "leastLoadedMember", leastLoadedMember.ID, "leastLoadedMemberLoad", leastLoadedMember.Load, ) return nil } c.logger.Info("rebalancing is needed", "mostLoadedMember", mostLoadedMember.ID, "mostLoadedMemberLoad", mostLoadedMember.Load, "leastLoadedMember", leastLoadedMember.ID, "leastLoadedMemberLoad", leastLoadedMember.Load, ) // determine the set of targets to move moveCount := diff / 2 // TODO: add cap moveCount = max(moveCount, leastLoadedMember.Load) candidates := append([]string{}, mostLoadedMember.Targets...) rand.Shuffle(len(candidates), func(i, j int) { candidates[i], candidates[j] = candidates[j], candidates[i] }) targetsToMove := candidates[:moveCount] assignments := make(map[string]*Member) // unassign the target from the most loaded member for _, t := range targetsToMove { c.logger.Info("unassigning target", "target", t, "member", mostLoadedMember.ID) err = c.assigner.Unassign(ctx, mostLoadedMember, t) if err != nil { c.logger.Error("failed to unassign target", "target", t, "member", mostLoadedMember.ID, "error", err) continue } assignments[t] = leastLoadedMember } c.logger.Info("assignment set", "assignments", assignments, "member", leastLoadedMember.ID) err = c.assigner.Assign(ctx, assignments) if err != nil { return err } for _, t := range targetsToMove { c.asyncVerifyLock(ctx, t, leastLoadedMember.ID, time.Now().Add(5*time.Second)) } return nil } func (c *ClusterManager) RebalanceTargetsV2() error { if ok := c.rebalancingSem.TryAcquire(1); !ok { return fmt.Errorf("rebalancing already in progress") } go func() { defer c.rebalancingSem.Release(1) ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) // TODO: configurable defer cancel() members, err := c.GetMembers(ctx) if err != nil { c.logger.Error("failed to get members", "error", err) } rebalancePlan := rebalance(members) c.logger.Info("rebalance plan", "rebalancePlan", rebalancePlan) if rebalancePlan == nil { c.logger.Info("cluster is already balanced") return } if len(rebalancePlan) == 0 { c.logger.Info("cluster is already balanced") return } // per member deltas removeBySrc := map[string][]string{} addByDst := map[string][]string{} // keep target -> current member mapping owner := make(map[string]string) for id, m := range members { for _, t := range m.Targets { owner[t] = id } } for t, dst := range rebalancePlan { if srcID, ok := owner[t]; ok && srcID != dst.ID { removeBySrc[srcID] = append(removeBySrc[srcID], t) addByDst[dst.ID] = append(addByDst[dst.ID], t) } } c.logger.Info("removing targets", "removeBySrc", removeBySrc) for srcID, ts := range removeBySrc { err = c.assigner.Unassign(ctx, members[srcID], ts...) if err != nil { c.logger.Error("failed to unassign targets", "targets", ts, "member", srcID, "error", err) continue } } c.logger.Info("adding targets", "addByDst", addByDst) for dstID, ts := range addByDst { asg := make(map[string]*Member, len(ts)) for _, t := range ts { asg[t] = members[dstID] } err = c.assigner.Assign(ctx, asg) if err != nil { c.logger.Error("assign batch failed", "member", dstID, "err", err) } } }() return nil } // rebalance computes a one-shot plan: target -> newOwner (only moved ones). // It never proposes moving the same target twice, and tries to fill each receiver to its quota. func rebalance(members map[string]*Member) map[string]*Member { // calculate total load total := int64(0) for _, m := range members { total += m.Load } // determine the quota for each member q := calculateMembersQuota(members, total) if len(q) == 0 { return nil // already balanced } donors := make([]*donor, 0) receivers := make([]*receiver, 0) // deterministic member order ids := make([]string, 0, len(members)) for id := range members { ids = append(ids, id) } sort.Strings(ids) // determine "want" and "have" for each member for _, id := range ids { m := members[id] want := q[id] have := m.Load switch { case have > want: // copy targets & randomize to avoid bias; or choose oldest/cheapest to move pool := append([]string(nil), m.Targets...) rand.Shuffle(len(pool), func(i, j int) { pool[i], pool[j] = pool[j], pool[i] }) donors = append(donors, &donor{ id: id, // m: m, surplus: have - want, pool: pool, }) case have < want: receivers = append(receivers, &receiver{ id: id, // m: m, need: want - have, }) } } if len(donors) == 0 || len(receivers) == 0 { return nil // already balanced } moves := make(map[string]*Member) // determine the best targets to move from each donor for _, d := range donors { for d.surplus > 0 && len(receivers) > 0 { r := receivers[0] if r.need == 0 { // receiver needs no more receivers = receivers[1:] continue } // find up to k targets eligible for r k := min(d.surplus, r.need) taken := int64(0) // scan donor pool, pick targets, compact pool as we consume w := 0 for _, t := range d.pool { if taken < k { moves[t] = members[r.id] // move target to receiver taken++ // skip copying this one (removed) continue } // keep in pool d.pool[w] = t w++ } d.pool = d.pool[:w] // compact pool d.surplus -= taken // update surplus r.need -= taken // update need // if this receiver still needs more, keep it at index 0, otherwise pop it if r.need == 0 { receivers = receivers[1:] } else { // rotate receiver to the back to give others a chance (optional) receivers = append(receivers[1:], r) } // If donor ran out of eligible candidates before satisfying k, break to next receiver if taken == 0 { // no eligible targets for this receiver, try with next receiver receivers = append(receivers[1:], r) // rotate if len(receivers) == 1 { // only one receiver left, but no eligible targets => donor is stuck break } } } } return moves } // calculateMembersQuota calculates the quota for each member based on the total load // the quota is the average load per member // the remainder is distributed evenly among the members func calculateMembersQuota(members map[string]*Member, total int64) map[string]int64 { if total == 0 { // no load, no quota res := make(map[string]int64, len(members)) for id := range members { res[id] = 0 } return res } if len(members) == 0 { return nil } n := int64(len(members)) base := total / n rem := total % n ids := make([]string, 0, n) for id := range members { ids = append(ids, id) } sort.Strings(ids) quota := make(map[string]int64) for i, id := range ids { if i < int(rem) { quota[id] = base + 1 } else { quota[id] = base } } return quota } type donor struct { // member id id string // surplus load surplus int64 // copy of targets pool []string } type receiver struct { // member id id string // need load need int64 } ================================================ FILE: pkg/collector/managers/cluster/utils.go ================================================ package cluster_manager import ( "fmt" "math/rand" "strings" "time" ) func targetsLockPrefix(clusterName string) string { return fmt.Sprintf("gnmic/%s/targets", clusterName) } func targetLockKey(target, clusterName string) string { return fmt.Sprintf("gnmic/%s/targets/%s", clusterName, target) } func GetAPIScheme(member *Member) string { if member == nil { return httpScheme } for _, lb := range member.Labels { parts := strings.SplitN(lb, "=", 2) if len(parts) == 2 && parts[0] == protocolLabel { if parts[1] == "https" { return httpsScheme } else { return httpScheme } } } return httpScheme } func getMemberAddress(member *Member) string { return GetAPIScheme(member) + "://" + member.Address } func jittered(d time.Duration) time.Duration { j := time.Duration(rand.Int63n(int64(float64(d) * recampaignJitterRatio))) return d + j } ================================================ FILE: pkg/collector/managers/inputs/inputs_manager.go ================================================ package inputs_manager import ( "context" "fmt" "log" "log/slog" "os" "sync" "time" collstore "github.com/openconfig/gnmic/pkg/collector/store" "github.com/openconfig/gnmic/pkg/inputs" "github.com/openconfig/gnmic/pkg/logging" "github.com/openconfig/gnmic/pkg/pipeline" "github.com/zestor-dev/zestor/store" ) type ManagedInput struct { sync.RWMutex Name string Impl inputs.Input Cfg map[string]any } type InputsManager struct { ctx context.Context store *collstore.Store inputFactories map[string]inputs.Initializer pipeline chan *pipeline.Msg logger *slog.Logger mu sync.RWMutex inputs map[string]*ManagedInput processorsInUse map[string]map[string]struct{} // processor name -> input names } func NewInputsManager(ctx context.Context, store *collstore.Store, pipeline chan *pipeline.Msg) *InputsManager { return &InputsManager{ ctx: ctx, store: store, pipeline: pipeline, inputFactories: inputs.Inputs, inputs: map[string]*ManagedInput{}, processorsInUse: make(map[string]map[string]struct{}), } } func (mgr *InputsManager) Start(wg *sync.WaitGroup) error { mgr.logger = logging.NewLogger(mgr.store.Config, "component", "inputs-manager") mgr.logger.Info("starting inputs manager") inputsCh, inputsCancel, err := mgr.store.Config.Watch("inputs", store.WithInitialReplay[any]()) if err != nil { return err } // watch processors config changes (update only) procsCh, processorsCancel, err := mgr.store.Config.Watch("processors", store.WithEventTypes[any](store.EventTypeUpdate)) if err != nil { return err } wg.Add(1) go func() { defer wg.Done() defer inputsCancel() defer processorsCancel() for { select { case <-mgr.ctx.Done(): return case ev, ok := <-inputsCh: if !ok { return } mgr.logger.Info("got input event", "event", ev) cfg, ok := ev.Object.(map[string]any) if !ok { mgr.logger.Error("invalid input config", "event", ev) continue } switch ev.EventType { case store.EventTypeCreate: mgr.createInput(ev.Name, cfg) case store.EventTypeUpdate: mgr.updateInput(ev.Name, cfg) case store.EventTypeDelete: mgr.DeleteInput(ev.Name) } case ev, ok := <-procsCh: if !ok { return } cfg, ok := ev.Object.(map[string]any) if !ok { mgr.logger.Error("invalid processor config", "event", ev) continue } switch ev.EventType { case store.EventTypeUpdate: mgr.updateProcessor(ev.Name, cfg) } } } }() return nil } func (mgr *InputsManager) Stop() { mgr.mu.Lock() defer mgr.mu.Unlock() for _, mi := range mgr.inputs { mgr.setInputState(mi.Name, collstore.StateStopped, "") err := mi.Impl.Close() if err != nil { mgr.logger.Error("failed to stop input", "name", mi.Name, "error", err) } } } func (mgr *InputsManager) createInput(name string, cfg map[string]any) { typ, _ := cfg["type"].(string) f := mgr.inputFactories[typ] if f == nil { mgr.setInputState(name, collstore.StateFailed, fmt.Sprintf("unknown input type: %s", typ)) return } impl := f() if err := impl.Start(mgr.ctx, name, cfg, inputs.WithLogger(log.New(os.Stdout, "", log.LstdFlags)), inputs.WithConfigStore(mgr.store.Config), inputs.WithPipeline(mgr.pipeline), ); err != nil { mgr.setInputState(name, collstore.StateFailed, err.Error()) return } procs := extractProcessors(cfg) mi := &ManagedInput{Name: name, Impl: impl, Cfg: cfg} mgr.mu.Lock() mgr.trackProcessorsInUse(name, procs) mgr.inputs[name] = mi mgr.mu.Unlock() mgr.setInputState(name, collstore.StateRunning, "") } func (mgr *InputsManager) updateInput(name string, cfg map[string]any) { mgr.mu.Lock() defer mgr.mu.Unlock() mi, ok := mgr.inputs[name] if !ok { mgr.createInput(name, cfg) return } mgr.logger.Info("updating input", "name", name, "cfg", cfg) mi.Lock() defer mi.Unlock() err := mi.Impl.Update(cfg) if err != nil { mgr.logger.Error("failed to update input", "name", name, "error", err) return } oldProcs := extractProcessors(mi.Cfg) newProcs := extractProcessors(cfg) mgr.logger.Info("tracking input processors in use", "name", name, "oldProcs", oldProcs, "newProcs", newProcs) mgr.untrackProcessorsInUse(name, oldProcs) mgr.trackProcessorsInUse(name, newProcs) mgr.logger.Info("updated input", "name", name, "cfg", cfg) mi.Cfg = cfg mgr.inputs[name] = mi } func (mgr *InputsManager) DeleteInput(name string) error { mgr.mu.Lock() defer mgr.mu.Unlock() mgr.logger.Info("finding input", "name", name) if mi, ok := mgr.inputs[name]; ok { mgr.logger.Info("stopping input", "name", name) mgr.setInputState(name, collstore.StateStopping, "") err := mi.Impl.Close() if err != nil { mgr.logger.Error("failed to close input", "name", name, "error", err) return fmt.Errorf("failed to close input: %w", err) } procs := extractProcessors(mi.Cfg) mgr.untrackProcessorsInUse(name, procs) mgr.setInputState(name, collstore.StateStopped, "") delete(mgr.inputs, name) mgr.store.Config.Delete("inputs", name) } mgr.store.State.Delete(collstore.KindInputs, name) return nil } func extractProcessors(cfg map[string]any) []string { v, ok := cfg["event-processors"] if !ok { return nil } switch v := v.(type) { case []any: out := make([]string, 0, len(v)) for _, it := range v { if s, ok := it.(string); ok { out = append(out, s) } } return out case []string: return v } return nil } func (mgr *InputsManager) trackProcessorsInUse(in string, procs []string) { for _, p := range procs { if mgr.processorsInUse[p] == nil { mgr.processorsInUse[p] = make(map[string]struct{}) } mgr.processorsInUse[p][in] = struct{}{} } } func (mgr *InputsManager) untrackProcessorsInUse(in string, procs []string) { for _, p := range procs { if users, ok := mgr.processorsInUse[p]; ok { delete(users, in) if len(users) == 0 { delete(mgr.processorsInUse, p) } } } } func (mgr *InputsManager) ProcessorInUse(name string) bool { mgr.mu.RLock() defer mgr.mu.RUnlock() users, ok := mgr.processorsInUse[name] if !ok { return false } return len(users) > 0 } func (mgr *InputsManager) updateProcessor(name string, cfg map[string]any) { mgr.mu.Lock() defer mgr.mu.Unlock() for _, mi := range mgr.inputs { err := mi.updateProcessor(name, cfg) if err != nil { mgr.logger.Error("failed to update event processor for input", "processorName", name, "inputName", mi.Name, "error", err) } } } func (mi *ManagedInput) updateProcessor(name string, cfg map[string]any) error { mi.Lock() defer mi.Unlock() return mi.Impl.UpdateProcessor(name, cfg) } // State store helpers func (mgr *InputsManager) setInputState(name, state, failedReason string) { is := &collstore.InputState{ ComponentState: collstore.ComponentState{ // Name: name, IntendedState: collstore.IntendedStateEnabled, State: state, FailedReason: failedReason, LastUpdated: time.Now(), }, } mgr.store.State.Set(collstore.KindInputs, name, is) } // GetInputState returns the runtime state of an input from the state store. func (mgr *InputsManager) GetInputState(name string) *collstore.InputState { v, ok, err := mgr.store.State.Get(collstore.KindInputs, name) if err != nil || !ok { return nil } is, ok := v.(*collstore.InputState) if !ok { return nil } return is } // ListInputStates returns all input states from the state store. func (mgr *InputsManager) ListInputStates() []*collstore.InputState { states := make([]*collstore.InputState, 0) mgr.store.State.List(collstore.KindInputs, func(name string, v any) bool { if is, ok := v.(*collstore.InputState); ok { states = append(states, is) } return false }) return states } ================================================ FILE: pkg/collector/managers/outputs/outputs_manager.go ================================================ package outputs_manager import ( "context" "fmt" "log" "log/slog" "os" "sync" "time" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/cache" collstore "github.com/openconfig/gnmic/pkg/collector/store" "github.com/openconfig/gnmic/pkg/logging" "github.com/openconfig/gnmic/pkg/outputs" "github.com/openconfig/gnmic/pkg/pipeline" "github.com/prometheus/client_golang/prometheus" "github.com/zestor-dev/zestor/store" "google.golang.org/protobuf/proto" ) type ManagedOutput struct { sync.RWMutex Name string Impl outputs.Output Cfg map[string]any } // OutputsManager runs outputs. type OutputsManager struct { ctx context.Context store *collstore.Store OutputsFactory map[string]outputs.Initializer in <-chan *pipeline.Msg // pipe from targets and/or inputs mu sync.RWMutex outputs map[string]*ManagedOutput processorsInUse map[string]map[string]struct{} // processor name -> output names cache cache.Cache logger *slog.Logger reg *prometheus.Registry stats *outputStats } type outputStats struct { msgCount *prometheus.CounterVec msgCountErr *prometheus.CounterVec } func NewOutputsManager(ctx context.Context, store *collstore.Store, pipe <-chan *pipeline.Msg, reg *prometheus.Registry) *OutputsManager { return &OutputsManager{ ctx: ctx, store: store, OutputsFactory: outputs.Outputs, in: pipe, outputs: map[string]*ManagedOutput{}, processorsInUse: make(map[string]map[string]struct{}), stats: newOutputStats(), reg: reg, } } func (mgr *OutputsManager) Start(cache cache.Cache, wg *sync.WaitGroup) error { mgr.logger = logging.NewLogger(mgr.store.Config, "component", "outputs-manager") mgr.logger.Info("starting outputs manager") mgr.cache = cache // register metrics mgr.registerMetrics() // watch outputs config changes outputCh, outputsCancel, err := mgr.store.Config.Watch("outputs", store.WithInitialReplay[any]()) if err != nil { return err } // watch processors config changes (update only) procsCh, processorsCancel, err := mgr.store.Config.Watch("processors", store.WithEventTypes[any](store.EventTypeUpdate)) if err != nil { return err } wg.Add(1) // forward incoming events to all running outputs // that are in the list of outputs to write to. go mgr.writeLoop(wg) wg.Add(1) go func() { defer wg.Done() defer outputsCancel() defer processorsCancel() for { select { case <-mgr.ctx.Done(): return case ev, ok := <-outputCh: if !ok { return } mgr.logger.Info("got output event", "event", ev) cfg, ok := ev.Object.(map[string]any) if !ok { mgr.logger.Error("invalid output config", "event", ev) continue } switch ev.EventType { case store.EventTypeCreate: mgr.createOutput(ev.Name, cfg) case store.EventTypeUpdate: mgr.updateOutput(ev.Name, cfg) case store.EventTypeDelete: mgr.DeleteOutput(ev.Name) } case ev, ok := <-procsCh: if !ok { return } mgr.logger.Info("got processor event", "event", ev) cfg, ok := ev.Object.(map[string]any) if !ok { mgr.logger.Error("invalid processor config", "event", ev) continue } switch ev.EventType { case store.EventTypeUpdate: mgr.updateProcessor(ev.Name, cfg) } } } }() return nil } func (mgr *OutputsManager) writeLoop(wg *sync.WaitGroup) { defer wg.Done() for { select { case <-mgr.ctx.Done(): return case e, ok := <-mgr.in: if !ok { mgr.logger.Debug("pipeline channel closed") return } mgr.logger.Debug("got pipeline message", "message", e) // Debug go mgr.write(e) if mgr.cache != nil { go mgr.cache.Write(mgr.ctx, e.Meta["subscription-name"], e.Msg) } } } } func (mgr *OutputsManager) write(e *pipeline.Msg) { outs := mgr.getOutputsForTarget(e.Outputs) outsNames := make([]string, 0, len(outs)) if mgr.logger.Enabled(mgr.ctx, slog.LevelDebug) { for _, o := range outs { outsNames = append(outsNames, o.Name) } mgr.logger.Debug("writing msg to outputs", "outputs", outsNames) } for _, mo := range outs { mgr.stats.msgCount.WithLabelValues(mo.Name).Inc() if len(e.Events) > 0 { // from inputs for _, ev := range e.Events { mo.Impl.WriteEvent(mgr.ctx, ev) } } else { // from targets or inputs mo.Impl.Write(mgr.ctx, e.Msg, e.Meta) } } } func (mgr *OutputsManager) updateProcessor(name string, cfg map[string]any) { mgr.mu.Lock() defer mgr.mu.Unlock() for _, mo := range mgr.outputs { err := mo.updateProcessor(name, cfg) if err != nil { mgr.logger.Error("failed to update event processor for output", "processorName", name, "outputName", mo.Name, "error", err) } } } func (mo *ManagedOutput) updateProcessor(name string, cfg map[string]any) error { mo.Lock() defer mo.Unlock() return mo.Impl.UpdateProcessor(name, cfg) } func (mgr *OutputsManager) getOutputsForTarget(outputs map[string]struct{}) []*ManagedOutput { mgr.mu.RLock() defer mgr.mu.RUnlock() // all outputs if len(outputs) == 0 { outs := make([]*ManagedOutput, 0, len(mgr.outputs)) for _, mo := range mgr.outputs { if mgr.getOutputStateStr(mo.Name) == collstore.StateRunning { outs = append(outs, mo) } } return outs } // specific outputs per target outs := make([]*ManagedOutput, 0, len(outputs)) for name, mo := range mgr.outputs { if _, ok := outputs[name]; !ok || mgr.getOutputStateStr(name) != collstore.StateRunning { continue } outs = append(outs, mo) } return outs } func (mgr *OutputsManager) createOutput(name string, cfg map[string]any) { typ, _ := cfg["type"].(string) f := mgr.OutputsFactory[typ] if f == nil { mgr.logger.Error("unknown output type", "name", name, "type", typ) mgr.setOutputState(name, collstore.StateFailed, fmt.Sprintf("unknown output type: %s", typ)) return } impl := f() opts := make([]outputs.Option, 0, 2) opts = append(opts, outputs.WithName(name), outputs.WithConfigStore(mgr.store.Config), outputs.WithLogger(log.New(os.Stdout, "", log.LstdFlags)), // temporary logger ) clustering, ok, err := mgr.store.Config.Get("global", "clustering") if err != nil { mgr.logger.Error("failed to get clustering for output", "name", name, "error", err) return } if ok { clus, ok := clustering.(map[string]any) if cname, cOk := clus["cluster-name"].(string); cOk && ok { opts = append(opts, outputs.WithClusterName(cname)) } } err = impl.Init(mgr.ctx, name, cfg, opts...) if err != nil { mgr.logger.Error("failed to init output", "name", name, "error", err) mgr.setOutputState(name, collstore.StateFailed, err.Error()) return } procs := extractProcessors(cfg) mo := &ManagedOutput{Name: name, Impl: impl, Cfg: cfg} mgr.mu.Lock() mgr.trackProcessorsInUse(name, procs) mgr.outputs[name] = mo mgr.mu.Unlock() mgr.setOutputState(name, collstore.StateRunning, "") } func (mgr *OutputsManager) updateOutput(name string, cfg map[string]any) { mgr.mu.Lock() defer mgr.mu.Unlock() mo, ok := mgr.outputs[name] if !ok { mgr.createOutput(name, cfg) return } mgr.logger.Info("updating output", "name", name, "cfg", cfg) mo.Lock() defer mo.Unlock() err := mo.Impl.Update(mgr.ctx, cfg) if err != nil { mgr.logger.Error("failed to update output", "name", name, "error", err) return } oldProcs := extractProcessors(mo.Cfg) newProcs := extractProcessors(cfg) mgr.logger.Info("tracking output processors in use", "name", name, "oldProcs", oldProcs, "newProcs", newProcs) mgr.untrackProcessorsInUse(name, oldProcs) mgr.trackProcessorsInUse(name, newProcs) mgr.logger.Info("updated output", "name", name, "cfg", cfg) mo.Cfg = cfg mgr.outputs[name] = mo } func (mgr *OutputsManager) StopOutput(name string) error { mgr.mu.Lock() defer mgr.mu.Unlock() mgr.logger.Info("finding output", "name", name) if mo, ok := mgr.outputs[name]; ok { mgr.logger.Info("stopping output", "name", name) mgr.setOutputState(name, collstore.StateStopping, "") err := mo.Impl.Close() if err != nil { mgr.logger.Error("failed to close output", "name", name, "error", err) return fmt.Errorf("failed to close output: %w", err) } procs := extractProcessors(mo.Cfg) mgr.untrackProcessorsInUse(name, procs) mgr.setOutputState(name, collstore.StateStopped, "") delete(mgr.outputs, name) } return nil } func (mgr *OutputsManager) DeleteOutput(name string) error { mgr.mu.Lock() defer mgr.mu.Unlock() mgr.logger.Info("deleting output", "name", name) if mo, ok := mgr.outputs[name]; ok { mgr.logger.Info("stopping output", "name", name) mgr.setOutputState(name, collstore.StateStopping, "") err := mo.Impl.Close() if err != nil { mgr.logger.Error("failed to close output", "name", name, "error", err) return fmt.Errorf("failed to close output: %w", err) } procs := extractProcessors(mo.Cfg) mgr.untrackProcessorsInUse(name, procs) mgr.setOutputState(name, collstore.StateStopped, "") delete(mgr.outputs, name) mgr.store.Config.Delete("outputs", name) } mgr.store.State.Delete(collstore.KindOutputs, name) return nil } func (mgr *OutputsManager) Stop() { mgr.mu.Lock() defer mgr.mu.Unlock() for _, mo := range mgr.outputs { mgr.setOutputState(mo.Name, collstore.StateStopped, "") err := mo.Impl.Close() if err != nil { mgr.logger.Error("failed to stop output", "name", mo.Name, "error", err) } } } func newOutputStats() *outputStats { return &outputStats{ msgCount: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "outputs", Name: "msg_sent_to_output_count", Help: "Number of messages sent to the output", }, []string{"name"}), msgCountErr: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "outputs", Name: "msg_failed_to_sent_to_output_count_error", Help: "Number of messages sent to the output with error", }, []string{"name"}), } } func (mgr *OutputsManager) registerMetrics() { if mgr.reg == nil { return } mgr.reg.MustRegister(mgr.stats.msgCount) mgr.reg.MustRegister(mgr.stats.msgCountErr) } func (mgr *OutputsManager) WriteToCache(ctx context.Context, msg *pipeline.Msg) { if mgr.cache == nil { return } if msg.Msg == nil { return } switch msg.Msg.(type) { case *gnmi.SubscribeResponse: subName, ok := msg.Meta["subscription-name"] if !ok || subName == "" { subName = "default" } targetName := utils.GetHost(msg.Meta["source"]) mgr.cache.Write(ctx, subName, addTargetToMsg(msg.Msg, targetName)) } } func addTargetToMsg(msg proto.Message, targetName string) proto.Message { switch msg := msg.(type) { case *gnmi.SubscribeResponse: switch rsp := msg.Response.(type) { case *gnmi.SubscribeResponse_Update: if rsp.Update.GetPrefix() == nil { rsp.Update.Prefix = new(gnmi.Path) } rsp.Update.Prefix.Target = targetName } } return msg } func extractProcessors(cfg map[string]any) []string { v, ok := cfg["event-processors"] if !ok { return nil } switch v := v.(type) { case []any: out := make([]string, 0, len(v)) for _, it := range v { if s, ok := it.(string); ok { out = append(out, s) } } return out case []string: return v } return nil } func (mgr *OutputsManager) trackProcessorsInUse(out string, procs []string) { for _, p := range procs { if mgr.processorsInUse[p] == nil { mgr.processorsInUse[p] = make(map[string]struct{}) } mgr.processorsInUse[p][out] = struct{}{} } } func (mgr *OutputsManager) untrackProcessorsInUse(out string, procs []string) { for _, p := range procs { if users, ok := mgr.processorsInUse[p]; ok { delete(users, out) if len(users) == 0 { delete(mgr.processorsInUse, p) } } } } func (mgr *OutputsManager) ProcessorInUse(name string) bool { mgr.mu.RLock() defer mgr.mu.RUnlock() users, ok := mgr.processorsInUse[name] if !ok { return false } return len(users) > 0 } // State store helpers func (mgr *OutputsManager) setOutputState(name, state, failedReason string) { os := &collstore.OutputState{ ComponentState: collstore.ComponentState{ // Name: name, IntendedState: collstore.IntendedStateEnabled, State: state, FailedReason: failedReason, LastUpdated: time.Now(), }, } mgr.store.State.Set(collstore.KindOutputs, name, os) } func (mgr *OutputsManager) getOutputStateStr(name string) string { os := mgr.GetOutputState(name) if os == nil { return "" } return os.State } // GetOutputState returns the runtime state of an output from the state store. func (mgr *OutputsManager) GetOutputState(name string) *collstore.OutputState { v, ok, err := mgr.store.State.Get(collstore.KindOutputs, name) if err != nil || !ok { return nil } os, ok := v.(*collstore.OutputState) if !ok { return nil } return os } // ListOutputStates returns all output states from the state store. func (mgr *OutputsManager) ListOutputStates() []*collstore.OutputState { states := make([]*collstore.OutputState, 0) mgr.store.State.List(collstore.KindOutputs, func(name string, v any) bool { if os, ok := v.(*collstore.OutputState); ok { states = append(states, os) } return false }) return states } ================================================ FILE: pkg/collector/managers/targets/cluster.go ================================================ package targets_manager import ( "fmt" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/config" ) func (tm *TargetsManager) isClustering() (*config.Clustering, bool, error) { clusterCfg, ok, err := tm.store.Config.Get("clustering", "clustering") if err != nil { return nil, false, err } if !ok { return nil, false, nil } clustering, ok := clusterCfg.(*config.Clustering) if ok { return clustering, true, nil } return nil, false, nil } func (tm *TargetsManager) amIAssigned(name string) bool { if !tm.incluster { return true // run all targets in standalone mode } tm.mas.RLock() defer tm.mas.RUnlock() _, ok := tm.assignments[name] return ok } func (tm *TargetsManager) setAssigned(name string, v bool) { tm.mas.Lock() if tm.assignments == nil { tm.assignments = map[string]struct{}{} } if v { tm.assignments[name] = struct{}{} } else { delete(tm.assignments, name) } tm.mas.Unlock() // if v { cfg, ok, err := tm.store.Config.Get("targets", name) if err != nil { tm.logger.Error("failed to get target", "target", name, "error", err) return } if ok { tcfg, tok := cfg.(*types.TargetConfig) if tok { tm.apply(name, tcfg) } else { tm.logger.Error("target config is not a types.TargetConfig", "target", name, "config", cfg) } } else { tm.logger.Error(" assignedtarget config not found", "target", name) } } else { tm.remove(name) } } func (tm *TargetsManager) targetLockKey(target string) string { return fmt.Sprintf("gnmic/%s/targets/%s", tm.clustering.ClusterName, target) } ================================================ FILE: pkg/collector/managers/targets/loader.go ================================================ package targets_manager import ( "context" "fmt" "github.com/openconfig/gnmic/pkg/loaders" ) func (tm *TargetsManager) initLoader(cfg map[string]any) (loaders.TargetLoader, error) { loaderType, ok := cfg["type"].(string) if !ok { return nil, fmt.Errorf("loader type is required") } for _, lt := range loaders.LoadersTypes { if lt == loaderType { init, ok := loaders.Loaders[loaderType] if !ok { return nil, fmt.Errorf("unknown loader type %q", loaderType) } loader := init() return loader, nil } } return nil, fmt.Errorf("unknown loader type %q", loaderType) } func (tm *TargetsManager) startLoader(ctx context.Context, loader loaders.TargetLoader) { ch := loader.Start(ctx) for { select { case <-ctx.Done(): tm.logger.Info("loader stopped") return case targetOp := <-ch: for _, add := range targetOp.Add { _, err := tm.store.Config.Set("targets", add.Name, add) if err != nil { tm.logger.Error("failed to add target from loader", "error", err, "target", add.Name) } } for _, del := range targetOp.Del { _, _, err := tm.store.Config.Delete("targets", del) if err != nil { tm.logger.Error("failed to delete target from loader", "error", err, "target", del) } } } } } ================================================ FILE: pkg/collector/managers/targets/metrics.go ================================================ package targets_manager import ( "time" "github.com/prometheus/client_golang/prometheus" ) const ( targetMetricsUpdatePeriod = 10 * time.Second ) type targetConnectionState int const ( targetConnectionStateUnknown targetConnectionState = iota targetConnectionStateIdle targetConnectionStateConnecting targetConnectionStateReady targetConnectionStateTransientFailure targetConnectionStateShutdown ) const ( targetConnectionStateUnknownStr = "UNKNOWN" targetConnectionStateIdleStr = "IDLE" targetConnectionStateConnectingStr = "CONNECTING" targetConnectionStateReadyStr = "READY" targetConnectionStateTransientFailureStr = "TRANSIENT_FAILURE" targetConnectionStateShutdownStr = "SHUTDOWN" ) func targetConnectionStateFromStr(str string) targetConnectionState { switch str { case targetConnectionStateUnknownStr: return targetConnectionStateUnknown case targetConnectionStateIdleStr: return targetConnectionStateIdle case targetConnectionStateConnectingStr: return targetConnectionStateConnecting case targetConnectionStateReadyStr: return targetConnectionStateReady case targetConnectionStateTransientFailureStr: return targetConnectionStateTransientFailure case targetConnectionStateShutdownStr: return targetConnectionStateShutdown } return targetConnectionStateUnknown } func (tcs targetConnectionState) String() string { switch tcs { case targetConnectionStateUnknown: return targetConnectionStateUnknownStr case targetConnectionStateIdle: return targetConnectionStateIdleStr case targetConnectionStateConnecting: return targetConnectionStateConnectingStr case targetConnectionStateReady: return targetConnectionStateReadyStr case targetConnectionStateTransientFailure: return targetConnectionStateTransientFailureStr case targetConnectionStateShutdown: return targetConnectionStateShutdownStr } return "" } type targetsStats struct { subscribeResponseReceived *prometheus.CounterVec droppedSubscribeResponses *prometheus.CounterVec subscriptionFailedCount *prometheus.CounterVec targetUPMetric *prometheus.GaugeVec targetConnStateMetric *prometheus.GaugeVec } const ( subscriptionRequestErrorTypeUnknown string = "UNKNOWN" subscriptionRequestErrorTypeCONFIG string = "CONFIG_ERROR" subscriptionRequestErrorTypeGRPC string = "GRPC_ERROR" ) func newTargetsStats() *targetsStats { return &targetsStats{ subscribeResponseReceived: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "targets", Name: "subscribe_response_received_count", Help: "Number of subscribe responses received", }, []string{"target", "subscription"}), droppedSubscribeResponses: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "targets", Name: "dropped_subscribe_responses_count", Help: "Number of dropped subscribe responses", }, []string{"target", "subscription"}), subscriptionFailedCount: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "subscribe", Name: "number_of_failed_subscribe_request_messages_total", Help: "Total number of failed subscribe requests", }, []string{"target", "subscription", "error_type"}), targetUPMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "target", Name: "up", Help: "Has value 1 if the gNMI target is configured; otherwise, 0.", }, []string{"name"}), targetConnStateMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "target", Name: "connection_state", Help: "The current gRPC connection state to the target. The value can be one of the following: 0(UNKNOWN), 1 (IDLE), 2 (CONNECTING), 3 (READY), 4 (TRANSIENT_FAILURE), or 5 (SHUTDOWN).", }, []string{"name"}), } } func (tm *TargetsManager) registerMetrics() { tm.reg.MustRegister(tm.stats.targetUPMetric) tm.reg.MustRegister(tm.stats.targetConnStateMetric) tm.reg.MustRegister(tm.stats.subscribeResponseReceived) tm.reg.MustRegister(tm.stats.droppedSubscribeResponses) tm.reg.MustRegister(tm.stats.subscriptionFailedCount) tm.mu.RLock() for _, mt := range tm.targets { tm.updateTargetMetrics(mt) } tm.mu.RUnlock() go func() { ticker := time.NewTicker(targetMetricsUpdatePeriod) defer ticker.Stop() for { select { case <-tm.ctx.Done(): return case <-ticker.C: tm.mu.RLock() for _, mt := range tm.targets { tm.updateTargetMetrics(mt) } tm.mu.RUnlock() } } }() } func (tm *TargetsManager) updateTargetMetrics(mt *ManagedTarget) { if mt.T == nil { tm.stats.targetUPMetric.WithLabelValues(mt.Name).Set(0) tm.stats.targetConnStateMetric.WithLabelValues(mt.Name).Set(0) return } tm.stats.targetUPMetric.WithLabelValues(mt.Name).Set(1) targetConnState := targetConnectionStateFromStr(mt.T.ConnState()) tm.stats.targetConnStateMetric.WithLabelValues(mt.Name).Set(float64(targetConnState)) } ================================================ FILE: pkg/collector/managers/targets/targets_manager.go ================================================ package targets_manager import ( "context" "encoding/json" "fmt" "hash/fnv" "log" "log/slog" "maps" "net" "os" "reflect" "slices" "sort" "sync" "time" "github.com/openconfig/gnmic/pkg/api/target" "github.com/openconfig/gnmic/pkg/api/types" apiutils "github.com/openconfig/gnmic/pkg/api/utils" collstore "github.com/openconfig/gnmic/pkg/collector/store" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/loaders" "github.com/openconfig/gnmic/pkg/lockers" "github.com/openconfig/gnmic/pkg/logging" "github.com/openconfig/gnmic/pkg/outputs" "github.com/openconfig/gnmic/pkg/pipeline" "github.com/openconfig/gnmic/pkg/utils" "github.com/openconfig/grpctunnel/tunnel" "github.com/prometheus/client_golang/prometheus" "github.com/zestor-dev/zestor/store" "google.golang.org/grpc" ) type ManagedTarget struct { sync.RWMutex Name string cfg *types.TargetConfig T *target.Target tunServer *tunnel.Server // reader readerCtx context.Context readerCancel context.CancelFunc mu *sync.Mutex readersCfn map[string]context.CancelFunc readerWG sync.WaitGroup lastError string // last error message, protected by mu outputs map[string]struct{} appliedSubscriptions []string } func (mt *ManagedTarget) setLastError(msg string) { mt.mu.Lock() mt.lastError = msg mt.mu.Unlock() } func (mt *ManagedTarget) getLastError() string { mt.mu.Lock() defer mt.mu.Unlock() return mt.lastError } func (mt *ManagedTarget) clearLastError() { mt.mu.Lock() mt.lastError = "" mt.mu.Unlock() } func newManagedTarget(name string, cfg *types.TargetConfig, tunServer *tunnel.Server) *ManagedTarget { nt := target.NewTarget(cfg) mt := &ManagedTarget{ Name: name, cfg: cfg, T: nt, tunServer: tunServer, outputs: make(map[string]struct{}, len(cfg.Outputs)), mu: new(sync.Mutex), readersCfn: make(map[string]context.CancelFunc), appliedSubscriptions: make([]string, 0, len(cfg.Subscriptions)), } for _, output := range cfg.Outputs { mt.outputs[output] = struct{}{} } return mt } // TargetsManager owns target lifecycle (connect/stop) and per-target subscriptions hookups (started by SubscriptionsManager). type TargetsManager struct { ctx context.Context cancel context.CancelFunc store *collstore.Store // pipe to outputsManager out chan *pipeline.Msg // target state mu sync.RWMutex targets map[string]*ManagedTarget // subscriptions subscriptions map[string]*types.SubscriptionConfig ts *tunnelServer logger *slog.Logger stats *targetsStats // clustring clustering *config.Clustering locker lockers.Locker incluster bool mas *sync.RWMutex assignments map[string]struct{} reg *prometheus.Registry } func NewTargetsManager(ctx context.Context, store *collstore.Store, pipeline chan *pipeline.Msg, reg *prometheus.Registry) *TargetsManager { ctx, cancel := context.WithCancel(ctx) ts := newTunnelServer(store.Config, reg) tm := &TargetsManager{ ctx: ctx, cancel: cancel, store: store, out: pipeline, targets: map[string]*ManagedTarget{}, subscriptions: map[string]*types.SubscriptionConfig{}, ts: ts, stats: newTargetsStats(), mas: new(sync.RWMutex), assignments: make(map[string]struct{}), reg: reg, } tm.registerMetrics() return tm } func (tm *TargetsManager) Start(locker lockers.Locker, wg *sync.WaitGroup) error { tm.logger = logging.NewLogger(tm.store.Config, "component", "targets-manager") tm.logger.Info("starting targets manager") tm.locker = locker clustering, ok, err := tm.isClustering() if err != nil { return err } tm.incluster = ok && clustering != nil if tm.incluster { tm.logger.Info("clustering is enabled", "clustering", clustering) tm.clustering = clustering } // start tunnel server go func() { err := tm.ts.startTunnelServer(tm.ctx) if err != nil { tm.logger.Error("failed to start tunnel server", "error", err) } }() tm.logger.Info("starting targets watcher") targetsCh, targetsCancel, err := tm.store.Config.Watch("targets", store.WithInitialReplay[any]()) if err != nil { return err } tm.logger.Info("starting subscriptions watcher") subscriptionsCh, subscriptionsCancel, err := tm.store.Config.Watch("subscriptions", store.WithInitialReplay[any]()) if err != nil { return err } cfg, ok, err := tm.store.Config.Get("loader", "loader") if err != nil { return fmt.Errorf("failed to get loader config: %w", err) } var loaderTargetOpCh <-chan *loaders.TargetOperation var loaderCfn context.CancelFunc if ok && cfg != nil { loaderCfg, ok := cfg.(map[string]any) if ok && len(loaderCfg) > 0 { loader, err := tm.initLoader(loaderCfg) if err != nil { return err } err = loader.Init(tm.ctx, loaderCfg, log.New(os.Stderr, "", apiutils.DefaultLoggingFlags), // TODO: use logger loaders.WithRegistry(tm.reg), loaders.WithTargetsDefaults(func(tc *types.TargetConfig) error { return config.SetTargetConfigDefaultsExpandEnv(tm.store.Config, tc) }), ) if err != nil { return err } tm.logger.Info("starting loader", "loader", loader) var ctx context.Context ctx, loaderCfn = context.WithCancel(tm.ctx) go tm.startLoader(ctx, loader) } } var assignmentsCancel func() var assignmentsCh <-chan *store.Event[any] if clustering != nil { tm.logger.Info("clustering is enabled", "clustering", clustering) // watch assignments assignmentsCh, assignmentsCancel, err = tm.store.Config.Watch("assignments", store.WithInitialReplay[any]()) // TODO: no initial replay ? if err != nil { if loaderCfn != nil { loaderCfn() } subscriptionsCancel() targetsCancel() return fmt.Errorf("failed to watch assignments: %w", err) } } wg.Add(1) go func() { defer wg.Done() defer targetsCancel() defer subscriptionsCancel() defer func() { if loaderCfn != nil { loaderCfn() } }() if clustering != nil { defer assignmentsCancel() } for { select { case <-tm.ctx.Done(): return case ev, ok := <-targetsCh: if !ok { return } tm.logger.Debug("got target event", "eventType", ev.EventType, "name", ev.Name) if !tm.amIAssigned(ev.Name) { tm.logger.Debug("target is not assigned to this instance", "target", ev.Name) continue } else { tm.logger.Debug("target is assigned to this instance", "target", ev.Name) } switch ev.EventType { case store.EventTypeCreate, store.EventTypeUpdate: cfg := ev.Object.(*types.TargetConfig) tm.apply(ev.Name, cfg) tm.stats.targetUPMetric.WithLabelValues(ev.Name).Set(1) case store.EventTypeDelete: tm.remove(ev.Name) tm.stats.targetUPMetric.WithLabelValues(ev.Name).Set(0) tm.stats.targetConnStateMetric.WithLabelValues(ev.Name).Set(0) } case op, ok := <-loaderTargetOpCh: if !ok { return } tm.logger.Info("got loader target operation", "operation", op) for _, add := range op.Add { _, err := tm.store.Config.Set("targets", add.Name, add) if err != nil { tm.logger.Error("failed to add target from loader", "error", err, "target", add.Name) } } for _, del := range op.Del { _, _, err := tm.store.Config.Delete("targets", del) if err != nil { tm.logger.Error("failed to delete target from loader", "error", err, "target", del) } } case ev, ok := <-subscriptionsCh: if !ok { return } tm.logger.Info("got subscription event", "event", ev, "objectType", reflect.TypeOf(ev.Object)) cfg, ok := ev.Object.(*types.SubscriptionConfig) if !ok { continue } switch ev.EventType { case store.EventTypeCreate: tm.applySubscription(ev.Name, *cfg) case store.EventTypeUpdate: tm.applySubscription(ev.Name, *cfg) case store.EventTypeDelete: tm.removeSubscription(ev.Name) } case ev, ok := <-assignmentsCh: if !ok { return } tm.logger.Info("got assignment event", "event", ev) switch ev.EventType { case store.EventTypeCreate: tm.setAssigned(ev.Name, true) case store.EventTypeUpdate: tm.setAssigned(ev.Name, true) // can this happen? yes if we add epoch/term to assignments case store.EventTypeDelete: tm.setAssigned(ev.Name, false) } go tm.reconcileAssignment(ev.Name) } } }() return nil } func (tm *TargetsManager) Stop() { if tm.cancel != nil { tm.cancel() tm.cancel = nil } } func (tm *TargetsManager) apply(name string, cfg *types.TargetConfig) { tm.logger.Info("applying target config", "name", name, "cfg", cfg) var mt *ManagedTarget created := false defer func() { tm.updateTargetMetrics(mt) }() tm.mu.Lock() mt = tm.targets[name] if mt == nil { mt = newManagedTarget(name, cfg.DeepCopy(), tm.ts.tunServer) tm.targets[name] = mt created = true } tm.mu.Unlock() if created { tm.logger.Info("starting created target", "name", name) mt.Lock() defer mt.Unlock() if err := tm.start(mt); err != nil { tm.logger.Error("failed to start target", "name", name, "error", err) mt.setLastError(err.Error()) tm.setTargetState(name, collstore.StateFailed) return } mt.clearLastError() tm.setTargetState(name, collstore.StateRunning) return } mt.Lock() defer mt.Unlock() if mt.T.Config.Equal(cfg) { return } tm.logger.Info("target config changed", "name", name, "old", mt.T.Config, "new", cfg) if !shouldReconnect(mt.T.Config, cfg) { // subscriptions // compare applied subscriptions with new subscriptions. // !Do not mutate the current config subscriptions list!. if !reflect.DeepEqual(mt.appliedSubscriptions, cfg.Subscriptions) { tm.logger.Info("subscriptions changed", "name", name, "old", mt.T.Config.Subscriptions, "new", cfg.Subscriptions) if added, removed := tm.compareSubscriptions(mt.T.Config.Subscriptions, cfg.Subscriptions); len(added) > 0 || len(removed) > 0 { tm.logger.Info("subscriptions added", "name", name, "added", added) tm.logger.Info("subscriptions removed", "name", name, "removed", removed) for _, sub := range added { tm.logger.Info("starting target subscription", "name", sub, "target", name) cfg, exists, err := tm.store.Config.Get("subscriptions", sub) if err != nil { tm.logger.Error("failed to get subscription", "name", sub, "target", name, "error", err) continue } if !exists { tm.logger.Error("subscription not found", "name", sub, "target", name) continue } scfg := cfg.(*types.SubscriptionConfig) scfg.Name = sub mt.appliedSubscriptions = append(mt.appliedSubscriptions, sub) err = tm.startTargetSubscription(mt, scfg) if err != nil { tm.logger.Error("failed to start target subscription", "name", sub, "target", name, "error", err) continue } } for _, sub := range removed { mt.mu.Lock() cfn, exists := mt.readersCfn[sub] if exists { cfn() delete(mt.readersCfn, sub) } mt.mu.Unlock() tm.logger.Info("stopping target subscription", "name", sub, "target", name) mt.T.StopSubscription(sub) delete(mt.T.Subscriptions, sub) mt.appliedSubscriptions = slices.DeleteFunc(mt.appliedSubscriptions, func(s string) bool { return s == sub }) tm.logger.Info("target subscription stopped", "name", sub, "target", name) } } else { tm.logger.Info("subscriptions unchanged", "name", name, "old", mt.T.Config.Subscriptions, "new", cfg.Subscriptions) } mt.T.Config.Subscriptions = cfg.Subscriptions } else { tm.logger.Info("subscriptions unchanged", "name", name, "old", mt.T.Config.Subscriptions, "new", cfg.Subscriptions) } // outputs if !reflect.DeepEqual(mt.T.Config.Outputs, cfg.Outputs) { tm.logger.Info("outputs changed", "name", name, "old", mt.T.Config.Outputs, "new", cfg.Outputs) if added, removed := tm.compareOutputs(mt.T.Config, cfg); len(added) > 0 || len(removed) > 0 { tm.logger.Info("outputs added", "name", name, "added", added) tm.logger.Info("outputs removed", "name", name, "removed", removed) for _, output := range added { mt.outputs[output] = struct{}{} } for _, output := range removed { delete(mt.outputs, output) } } else { tm.logger.Info("outputs unchanged", "name", name, "old", mt.T.Config.Outputs, "new", cfg.Outputs) } mt.T.Config.Outputs = cfg.Outputs } else { tm.logger.Info("outputs unchanged", "name", name, "old", mt.T.Config.Outputs, "new", cfg.Outputs) } return } // simply reconnect err := tm.stop(mt) if err != nil { tm.logger.Error("failed to stop target", "name", name, "error", err) mt.setLastError(err.Error()) tm.setTargetState(name, collstore.StateFailed) } mt.T.Config = cfg err = tm.start(mt) if err != nil { tm.logger.Error("failed to start target", "name", name, "error", err) mt.setLastError(err.Error()) tm.setTargetState(name, collstore.StateFailed) } } // assumes the managed target is locked func (tm *TargetsManager) start(mt *ManagedTarget) error { tm.logger.Info("starting target", "name", mt.Name) if tm.getTargetStateStr(mt.Name) == collstore.StateRunning { return nil } mt.clearLastError() tm.setTargetState(mt.Name, collstore.StateStarting) ctx, cfn := context.WithCancel(tm.ctx) mt.T.Cfn = cfn tm.logger.Info("creating gNMI client", "name", mt.Name) err := mt.T.CreateGNMIClient(ctx, tm.targetGRPCOpts(ctx, mt)...) if err != nil { tm.logger.Error("failed to create gNMI client", "name", mt.Name, "error", err) mt.setLastError(err.Error()) tm.setTargetState(mt.Name, collstore.StateFailed) return err } if tm.locker != nil { tm.logger.Info("acquiring lock for target", "name", mt.Name) ok, err := tm.locker.Lock(ctx, tm.targetLockKey(mt.Name), []byte(tm.clustering.InstanceName)) if err != nil { tm.logger.Error("failed to acquire lock for target", "name", mt.Name, "error", err) mt.setLastError(err.Error()) tm.setTargetState(mt.Name, collstore.StateFailed) _ = tm.stop(mt) return err } if !ok { tm.logger.Error("failed to acquire lock for target", "name", mt.Name) mt.setLastError("lock not acquired") tm.setTargetState(mt.Name, collstore.StateFailed) _ = tm.stop(mt) return err } // keep lock go func() { doneCh, errCh := tm.locker.KeepLock(ctx, tm.targetLockKey(mt.Name)) for { select { case <-doneCh: tm.logger.Info("lock for target released", "name", mt.Name) return case err := <-errCh: tm.logger.Error("failed to maintain lock for target", "name", mt.Name, "error", err) _ = tm.stop(mt) mt.setLastError(err.Error()) tm.setTargetState(mt.Name, collstore.StateFailed) return case <-ctx.Done(): tm.logger.Info("lock for target released", "name", mt.Name) _ = tm.stop(mt) return } } }() } tm.logger.Info("gNMI client created", "name", mt.Name) tm.setTargetState(mt.Name, collstore.StateRunning) // Watch gRPC connectivity state changes and keep the state store current. go tm.watchConnState(ctx, mt) tm.logger.Info("target started", "name", mt.Name) _, err = mt.T.Capabilities(ctx) if err != nil { tm.logger.Error("failed capabilities request", "name", mt.Name, "error", err) mt.setLastError(err.Error()) tm.setTargetState(mt.Name, collstore.StateFailed) return err } tm.logger.Info("capabilities request successful", "name", mt.Name) // start subscriptions subs := mt.T.Config.Subscriptions if len(subs) == 0 { // if target has no explicit subs, attach all known subs tm.mu.RLock() subs = make([]string, 0, len(tm.subscriptions)) for name := range tm.subscriptions { subs = append(subs, name) } tm.mu.RUnlock() // reflect the effective subs into the target's config so future diffs see them mt.appliedSubscriptions = append(mt.appliedSubscriptions, subs...) } for _, sub := range subs { tm.logger.Info("starting target subscription", "name", sub, "target", mt.Name) tm.mu.RLock() cfg := tm.subscriptions[sub] tm.mu.RUnlock() if cfg == nil { obj, exists, err := tm.store.Config.Get("subscriptions", sub) if err != nil { tm.logger.Error("failed to get subscription", "name", sub, "target", mt.Name, "error", err) continue } if !exists { tm.logger.Error("subscription not found", "name", sub, "target", mt.Name) continue } c := obj.(*types.SubscriptionConfig) cfg = c } cfg.Name = sub err = tm.startTargetSubscription(mt, cfg) if err != nil { tm.logger.Error("failed to start target subscription", "name", sub, "target", mt.Name, "error", err) continue } } // Refresh state now that subscriptions have been kicked off. // Individual subscription goroutines will update state again // once their SubscribeClients are established. tm.setTargetState(mt.Name, collstore.StateRunning) return nil } func (tm *TargetsManager) targetGRPCOpts(ctx context.Context, mt *ManagedTarget) []grpc.DialOption { if mt.cfg.TunnelTargetType != "" { return []grpc.DialOption{grpc.WithContextDialer(tm.tunDialerFn(ctx, mt))} } return nil } func (tm *TargetsManager) tunDialerFn(ctx context.Context, mt *ManagedTarget) func(context.Context, string) (net.Conn, error) { return func(_ context.Context, _ string) (net.Conn, error) { tt := tunnel.Target{ID: mt.cfg.Name, Type: mt.cfg.TunnelTargetType} ctx, cancel := context.WithTimeout(ctx, mt.cfg.Timeout) defer cancel() conn, err := tunnel.ServerConn(ctx, tm.ts.tunServer, &tt) if err != nil { tm.logger.Error("failed dialing tunnel connection for target", "name", mt.Name, "error", err) return nil, err } return conn, nil } } func (tm *TargetsManager) stop(mt *ManagedTarget) error { if tm.getTargetStateStr(mt.Name) == collstore.StateStopped { return nil } mt.clearLastError() tm.setTargetState(mt.Name, collstore.StateStopping) // stop reader loop if mt.readerCancel != nil { mt.readerCancel() mt.readerWG.Wait() mt.readerCancel = nil } // stop all per-target subscriptions and locker if any if mt.T.Cfn != nil { mt.T.Cfn() } tm.logger.Info("closing target", "name", mt.Name) err := mt.T.Close() if err != nil { tm.logger.Error("failed to close target", "name", mt.Name, "error", err) } else { tm.logger.Info("closed target", "name", mt.Name) } tm.setTargetState(mt.Name, collstore.StateStopped) if tm.locker != nil { tm.logger.Info("releasing lock for target", "name", mt.Name) err := tm.locker.Unlock(tm.ctx, tm.targetLockKey(mt.Name)) if err != nil { tm.logger.Error("failed to release lock for target", "name", mt.Name, "error", err) } } return nil } func (tm *TargetsManager) remove(name string) { tm.mu.Lock() mt := tm.targets[name] delete(tm.targets, name) tm.mu.Unlock() if mt != nil { mt.Lock() _ = tm.stop(mt) mt.T = nil mt.outputs = nil mt.readerCtx = nil mt.readerCancel = nil mt.Unlock() } tm.store.State.Delete(collstore.KindTargets, name) } // apply subscription to all targets that reference it or to those that do not reference any subscription func (tm *TargetsManager) applySubscription(name string, cfg types.SubscriptionConfig) { tm.logger.Info("applying subscription", "name", name, "cfg", cfg) cfg.Name = name tm.mu.Lock() tm.subscriptions[name] = &cfg tm.logger.Info("subscriptions", "subscriptions", tm.subscriptions) for _, mt := range tm.targets { tm.logger.Info("target", "target", mt.Name, "subscriptions", mt.T.Config.Subscriptions) if len(mt.T.Config.Subscriptions) > 0 { if !slices.Contains(mt.T.Config.Subscriptions, name) { tm.logger.Info("subscription not in target's explicit list", "subscription", name, "target", mt.Name) continue } } tm.logger.Info("(re)starting target subscription", "name", name, "target", mt.Name) // Stop and WAIT for the old subscription to fully terminate mt.mu.Lock() cfn, exists := mt.readersCfn[name] if exists { tm.logger.Info("canceling subscription context", "name", name, "target", mt.Name) cfn() // Cancel the context tm.logger.Info("deleted subscription context", "name", name, "target", mt.Name) delete(mt.readersCfn, name) // Remove from map } mt.mu.Unlock() tm.logger.Info("stopping target subscription", "name", name, "target", mt.Name) mt.T.StopSubscription(name) tm.logger.Info("stopped target subscription", "name", name, "target", mt.Name) // Wait for the reader goroutine to finish mt.T.Subscriptions[name] = &cfg err := tm.startTargetSubscription(mt, &cfg) if err != nil { tm.logger.Error("failed to start target subscription", "subscription", name, "target", mt.Name, "error", err) } } tm.mu.Unlock() } // remove subscription from targets that already reference it and have it running func (tm *TargetsManager) removeSubscription(name string) { tm.mu.Lock() delete(tm.subscriptions, name) for _, mt := range tm.targets { mt.mu.Lock() cfn, exists := mt.readersCfn[name] if exists { cfn() delete(mt.readersCfn, name) } mt.mu.Unlock() mt.T.StopSubscription(name) delete(mt.T.Subscriptions, name) } tm.mu.Unlock() } func (tm *TargetsManager) reconcileAssignment(name string) { if !tm.amIAssigned(name) { if mt := tm.Lookup(name); mt != nil && tm.getTargetStateStr(name) == collstore.StateRunning { _ = tm.stop(mt) } return } // get targetConfig cfg, ok := tm.getConfig(name) if !ok { tm.logger.Info("assigned but config not present yet; will retry on next event", "target", name) return } // Ensure ManagedTarget exists tm.mu.Lock() mt := tm.targets[name] if mt == nil { mt = newManagedTarget(name, cfg, tm.ts.tunServer) tm.targets[name] = mt } tm.mu.Unlock() // lock managed target mt.Lock() defer mt.Unlock() // check if config has changed if reflect.DeepEqual(mt.T.Config, cfg) { return } // check if should reconnect shouldReconnect := shouldReconnect(mt.T.Config, cfg) if !shouldReconnect { return } // simply reconnect err := tm.stop(mt) if err != nil { tm.logger.Error("failed to stop target", "name", name, "error", err) mt.setLastError(err.Error()) tm.setTargetState(name, collstore.StateFailed) } mt.T.Config = cfg err = tm.start(mt) if err != nil { tm.logger.Error("failed to start target", "name", name, "error", err) mt.setLastError(err.Error()) tm.setTargetState(name, collstore.StateFailed) } } func (tm *TargetsManager) getConfig(name string) (*types.TargetConfig, bool) { v, ok, err := tm.store.Config.Get("targets", name) if err != nil || !ok || v == nil { return nil, false } cfg, ok := v.(*types.TargetConfig) return cfg, ok } func (tm *TargetsManager) Lookup(name string) *ManagedTarget { tm.mu.RLock() defer tm.mu.RUnlock() return tm.targets[name] } func (tm *TargetsManager) ForEach(fn func(*ManagedTarget)) { tm.mu.RLock() defer tm.mu.RUnlock() for _, mt := range tm.targets { fn(mt) } } func (tm *TargetsManager) SetIntendedState(name string, state string) bool { tm.mu.Lock() defer tm.mu.Unlock() mt := tm.targets[name] if mt == nil { return false } mt.Lock() defer mt.Unlock() currentState := tm.getTargetStateStr(name) switch state { case collstore.IntendedStateEnabled: if currentState == collstore.StateRunning || currentState == collstore.StateStarting { return false } _ = tm.start(mt) case collstore.IntendedStateDisabled: if currentState == collstore.StateStopped || currentState == collstore.StateStopping { return false } _ = tm.stop(mt) } return true } func (tm *TargetsManager) GetIntendedState(name string) string { ts := tm.GetTargetState(name) if ts == nil { return "" } return ts.IntendedState } func (tm *TargetsManager) startTargetSubscription(mt *ManagedTarget, cfg *types.SubscriptionConfig) error { var defaultEncoding = "json" defaultEncodingVal, exists, err := tm.store.Config.Get("globalConfig", "defaultEncoding") if err != nil { tm.logger.Error("failed to get default encoding", "error", err) return err } if exists { var ok bool defaultEncoding, ok = defaultEncodingVal.(string) if !ok { tm.logger.Error("default encoding is not a string", "defaultEncodingVal", defaultEncodingVal) } } subreq, err := utils.CreateSubscribeRequest(cfg, mt.T.Config, defaultEncoding) if err != nil { tm.stats.subscriptionFailedCount.WithLabelValues(mt.Name, cfg.Name, subscriptionRequestErrorTypeCONFIG).Inc() tm.logger.Error("failed to create subscribe request", "target", mt.Name, "subscription", cfg.Name, "error", err) return err } tm.logger.Info("starting target Subscribe RPC", "name", cfg.Name, "target", mt.Name) mt.T.Subscriptions[cfg.Name] = cfg mt.readerWG.Add(1) sctx, cfn := context.WithCancel(tm.ctx) mt.mu.Lock() mt.readersCfn[cfg.Name] = cfn mt.mu.Unlock() subscriptionOutputs := make(map[string]struct{}, len(cfg.Outputs)) for _, output := range cfg.Outputs { subscriptionOutputs[output] = struct{}{} } respCh, errCh := mt.T.SubscribeChan(sctx, subreq, cfg.Name) go func() { defer mt.readerWG.Done() // When the goroutine exits (subscription stopped/cancelled), refresh // the target state so the subscriptions map is up-to-date. defer func() { currentState := tm.getTargetStateStr(mt.Name) if currentState != "" { tm.setTargetState(mt.Name, currentState) } }() initialResponse := true for { select { case <-sctx.Done(): return case resp, ok := <-respCh: if !ok { return } // The first response confirms the subscription is connected. // Refresh target state so the subscriptions map shows "running". if initialResponse { initialResponse = false mt.clearLastError() tm.setTargetState(mt.Name, collstore.StateRunning) } tm.stats.subscribeResponseReceived.WithLabelValues(mt.Name, resp.SubscriptionName).Inc() outs := func() map[string]struct{} { if len(subscriptionOutputs) > 0 { cp := make(map[string]struct{}, len(subscriptionOutputs)) maps.Copy(cp, subscriptionOutputs) return cp } mt.RLock() defer mt.RUnlock() cp := make(map[string]struct{}, len(mt.outputs)) for k := range mt.outputs { cp[k] = struct{}{} } return cp }() select { case tm.out <- &pipeline.Msg{ Msg: resp.Response, Meta: outputs.Meta{ "source": mt.Name, "subscription-name": resp.SubscriptionName, }, Outputs: outs, }: default: tm.stats.droppedSubscribeResponses.WithLabelValues(mt.Name, resp.SubscriptionName).Inc() // If downstream is slow, you can drop, count, or block; here we drop to keep reader healthy. tm.logger.Warn("pipeline backpressure: dropping response", "target", mt.Name) } case err, ok := <-errCh: if !ok { return } // Reset so the next successful response after retry // triggers a state update back to "running". initialResponse = true mt.setLastError(err.Err.Error()) currentState := tm.getTargetStateStr(mt.Name) if currentState != "" { tm.setTargetState(mt.Name, currentState) } tm.stats.subscriptionFailedCount.WithLabelValues(mt.Name, err.SubscriptionName, subscriptionRequestErrorTypeGRPC).Inc() tm.logger.Error("subscription error", "error", err) } } }() return nil } func shouldReconnect(old, new *types.TargetConfig) bool { if old == nil && new != nil { return true } if new == nil && old != nil { return true } ho, _ := hashConnSpec(old) hn, _ := hashConnSpec(new) return ho != hn } // TODO: optimize this func (tm *TargetsManager) compareSubscriptions(old, new []string) (added, removed []string) { var subscriptionsList []string var err error if len(new) == 0 || len(old) == 0 { // get all subscriptions from the store subscriptionsList, err = tm.store.Config.Keys("subscriptions") if err != nil { tm.logger.Error("failed to get subscriptions from store", "error", err) return nil, nil } } if len(new) == 0 { new = subscriptionsList } if len(old) == 0 { old = subscriptionsList } oldSubs := make(map[string]struct{}, len(old)) newSubs := make(map[string]struct{}, len(new)) for _, sub := range old { oldSubs[sub] = struct{}{} } for _, sub := range new { newSubs[sub] = struct{}{} } for _, sub := range old { if _, ok := newSubs[sub]; !ok { removed = append(removed, sub) } } for _, sub := range new { if _, ok := oldSubs[sub]; !ok { added = append(added, sub) } } return added, removed } func (tm *TargetsManager) compareOutputs(old, new *types.TargetConfig) (added, removed []string) { if len(new.Outputs) == 0 { // get all outputs from the store outputs, err := tm.store.Config.List("outputs") if err != nil { tm.logger.Error("failed to get outputs", "error", err) return nil, nil } new.Outputs = keys(outputs) } if len(old.Outputs) == 0 { // get all outputs from the store outputs, err := tm.store.Config.List("outputs") if err != nil { tm.logger.Error("failed to get outputs", "error", err) return nil, nil } old.Outputs = keys(outputs) return nil, old.Outputs } oldOutputs := make(map[string]struct{}, len(old.Outputs)) newOutputs := make(map[string]struct{}, len(new.Outputs)) for _, output := range old.Outputs { oldOutputs[output] = struct{}{} } for _, output := range new.Outputs { newOutputs[output] = struct{}{} } for _, output := range old.Outputs { if _, ok := newOutputs[output]; !ok { removed = append(removed, output) } } for _, output := range new.Outputs { if _, ok := oldOutputs[output]; !ok { added = append(added, output) } } return added, removed } func keys[T any](m map[string]T) []string { keys := make([]string, 0, len(m)) for k := range m { keys = append(keys, k) } return keys } // connSpec is the set of target parameters that affect the connection type connSpec struct { Address string Username string Password string AuthScheme string Token string Proxy string Timeout time.Duration TCPKeepalive time.Duration GRPCKeepalive *types.ClientKeepalive // TLS Insecure bool TLSCA string TLSCert string TLSKey string SkipVerify bool TLSServerName string TLSMinVersion string TLSMaxVersion string TLSVersion string CipherSuites []string // Dial options that affect transport Encoding string Gzip bool } func hashConnSpec(cfg *types.TargetConfig) (uint64, error) { spec := connSpecFrom(cfg) b, err := json.Marshal(spec) if err != nil { return 0, err } h := fnv.New64a() _, _ = h.Write(b) return h.Sum64(), nil } func connSpecFrom(tc *types.TargetConfig) connSpec { cs := make([]string, len(tc.CipherSuites)) copy(cs, tc.CipherSuites) sort.Strings(cs) spec := connSpec{ Address: tc.Address, Username: val(tc.Username), Password: val(tc.Password), AuthScheme: tc.AuthScheme, Token: val(tc.Token), Proxy: tc.Proxy, Timeout: tc.Timeout, TCPKeepalive: tc.TCPKeepalive, GRPCKeepalive: tc.GRPCKeepalive, Insecure: val(tc.Insecure), TLSCA: val(tc.TLSCA), TLSCert: val(tc.TLSCert), TLSKey: val(tc.TLSKey), SkipVerify: val(tc.SkipVerify), TLSServerName: tc.TLSServerName, TLSMinVersion: tc.TLSMinVersion, TLSMaxVersion: tc.TLSMaxVersion, TLSVersion: tc.TLSVersion, CipherSuites: cs, Encoding: val(tc.Encoding), Gzip: val(tc.Gzip), } return spec } func val[T any](p *T) T { var z T if p == nil { return z } return *p } // watchConnState monitors the gRPC connectivity state of a target and updates // the state store whenever it changes. It runs until ctx is cancelled (i.e. // the target is stopped). func (tm *TargetsManager) watchConnState(ctx context.Context, mt *ManagedTarget) { for { currentState := mt.T.ConnectivityState() // Block until the gRPC connection transitions away from currentState // or the context is cancelled. changed := mt.T.WaitForConnStateChange(ctx, currentState) if !changed { // ctx was cancelled — target is shutting down return } newState := mt.T.ConnectivityState() tm.logger.Debug("target connectivity state changed", "name", mt.Name, "from", currentState.String(), "to", newState.String(), ) // Refresh the full target state in the store (picks up the new // ConnectionState via mt.T.ConnState()). targetState := tm.getTargetStateStr(mt.Name) if targetState != "" { tm.setTargetState(mt.Name, targetState) } } } // State store helpers // setTargetState writes the full TargetState (including connection state and // per-subscription states) to the state store. The failed reason is read from // the ManagedTarget's lastError field (protected by mt.mu), so callers that // want to set or clear an error must call mt.setLastError before this method. // When the ManagedTarget is not found (e.g. after removal), failedReason // defaults to empty. func (tm *TargetsManager) setTargetState(name, state string) { intended := collstore.IntendedStateEnabled if state == collstore.StateStopped { intended = collstore.IntendedStateDisabled } ts := &collstore.TargetState{ ComponentState: collstore.ComponentState{ // Name: name, IntendedState: intended, State: state, LastUpdated: time.Now(), }, } // Enrich with live target data when available. tm.mu.RLock() mt := tm.targets[name] tm.mu.RUnlock() if mt != nil && mt.T != nil { ts.FailedReason = mt.getLastError() // gRPC connection state ts.ConnectionState = mt.T.ConnState() // Per-subscription states (snapshot taken under Target's internal lock // to avoid racing with attemptSubscription/StopSubscription). if subStates := mt.T.SubscribeClientStates(); len(subStates) > 0 { ts.Subscriptions = make(map[string]string, len(subStates)) for subName, active := range subStates { if active { ts.Subscriptions[subName] = collstore.StateRunning } else { ts.Subscriptions[subName] = collstore.StateStopped } } } } tm.store.State.Set(collstore.KindTargets, name, ts) } func (tm *TargetsManager) getTargetStateStr(name string) string { ts := tm.GetTargetState(name) if ts == nil { return "" } return ts.State } // GetTargetState returns the runtime state of a target from the state store. func (tm *TargetsManager) GetTargetState(name string) *collstore.TargetState { v, ok, err := tm.store.State.Get(collstore.KindTargets, name) if err != nil || !ok { return nil } ts, ok := v.(*collstore.TargetState) if !ok { return nil } return ts } // ListTargetStates returns all target states from the state store. func (tm *TargetsManager) ListTargetStates() []*collstore.TargetState { states := make([]*collstore.TargetState, 0) tm.store.State.List(collstore.KindTargets, func(name string, v any) bool { if ts, ok := v.(*collstore.TargetState); ok { states = append(states, ts) } return false }) return states } ================================================ FILE: pkg/collector/managers/targets/tunnel_server.go ================================================ package targets_manager import ( "context" "fmt" "io" "log/slog" "net" "regexp" "sort" "strings" "sync" "time" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/logging" tpb "github.com/openconfig/grpctunnel/proto/tunnel" "github.com/openconfig/grpctunnel/tunnel" "github.com/prometheus/client_golang/prometheus" "github.com/zestor-dev/zestor/store" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) // smaller scoped config than the one used when loading the config from the file type tunnelServerConfig struct { Address string `mapstructure:"address,omitempty" json:"address,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty" json:"enable-metrics,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` } // tunnelServer watches tunnel server config and reconciles // the connected targets when the config changes. type tunnelServer struct { config *tunnelServerConfig grpcTunnelSrv *grpc.Server tunServer *tunnel.Server store store.Store[any] logger *slog.Logger reg *prometheus.Registry // track currently connected tunnel targets so we can reconcile when // tunnel-target-matches are created, updated, or deleted. mu sync.RWMutex connectedTargets map[string]tunnel.Target // key = target ID } func newTunnelServer(s store.Store[any], reg *prometheus.Registry) *tunnelServer { ts := &tunnelServer{ grpcTunnelSrv: grpc.NewServer(), store: s, reg: reg, connectedTargets: make(map[string]tunnel.Target), } return ts } func (ts *tunnelServer) gRPCTunnelServerOpts() ([]grpc.ServerOption, error) { opts := make([]grpc.ServerOption, 0) if ts.config == nil { return opts, nil } if ts.config.EnableMetrics && ts.reg != nil { grpcMetrics := grpc_prometheus.NewServerMetrics() opts = append(opts, grpc.StreamInterceptor(grpcMetrics.StreamServerInterceptor()), grpc.UnaryInterceptor(grpcMetrics.UnaryServerInterceptor()), ) ts.reg.MustRegister(grpcMetrics) } if ts.config.TLS == nil { return opts, nil } tlscfg, err := utils.NewTLSConfig( ts.config.TLS.CaFile, ts.config.TLS.CertFile, ts.config.TLS.KeyFile, ts.config.TLS.ClientAuth, false, true, ) if err != nil { return nil, err } if tlscfg != nil { opts = append(opts, grpc.Creds(credentials.NewTLS(tlscfg))) } return opts, nil } func (ts *tunnelServer) startTunnelServer(ctx context.Context) error { tscfg, found, err := ts.store.Get("tunnel-server", "tunnel-server") if err != nil { return err } if !found { return nil } if tscfg == nil { return nil } logger := logging.NewLogger(ts.store, "component", "tunnel-server") ts.logger = logger originalConfig, ok := tscfg.(*config.TunnelServer) if !ok { return fmt.Errorf("tunnel-server config is malfomatted") } if originalConfig == nil { return nil } ts.config = &tunnelServerConfig{ Address: originalConfig.Address, TLS: originalConfig.TLS, EnableMetrics: originalConfig.EnableMetrics, Debug: originalConfig.Debug, } ts.logger.Info("building tunnel server") ts.tunServer, err = tunnel.NewServer(tunnel.ServerConfig{ AddTargetHandler: ts.addTargetHandler, DeleteTargetHandler: ts.deleteTargetHandler, RegisterHandler: ts.registerHandler, Handler: ts.serverHandler, }) if err != nil { return err } opts, err := ts.gRPCTunnelServerOpts() if err != nil { return err } ts.grpcTunnelSrv = grpc.NewServer(opts...) tpb.RegisterTunnelServer(ts.grpcTunnelSrv, ts.tunServer) var l net.Listener network := "tcp" addr := ts.config.Address if strings.HasPrefix(ts.config.Address, "unix://") { network = "unix" addr = strings.TrimPrefix(addr, "unix://") } ctx, cancel := context.WithCancel(ctx) for { var err error l, err = net.Listen(network, addr) if err != nil { ts.logger.Error("failed to start gRPC tunnel server listener", "error", err) time.Sleep(time.Second) continue } break } // watch tunnel-target-matches for CRUD operations and reconcile connected targets var matchesCh <-chan *store.Event[any] var matchesCancel func() for { var err error matchesCh, matchesCancel, err = ts.store.Watch("tunnel-target-matches") if err != nil { ts.logger.Error("failed to watch tunnel-target-matches", "error", err) time.Sleep(time.Second) continue } break } go ts.watchTunnelTargetMatches(ctx, matchesCh, matchesCancel) go func() { ts.logger.Info("starting gRPC tunnel server") err := ts.grpcTunnelSrv.Serve(l) if err != nil { ts.logger.Error("gRPC tunnel server shutdown", "error", err) } cancel() }() defer ts.grpcTunnelSrv.Stop() for range ctx.Done() { } return ctx.Err() } // Tunnel Server handlers // addTargetHandler is called when a tunnel target connects (registers) func (ts *tunnelServer) addTargetHandler(tt tunnel.Target) error { ts.logger.Info("tunnel server target register request", "target", tt) // track the connected target so we can reconcile when matches change ts.mu.Lock() ts.connectedTargets[tt.ID] = tt ts.mu.Unlock() tc := ts.getTunnelTargetMatch(tt) if tc == nil { ts.logger.Info("target ignored, not matching any rule", "target", tt) return nil } ts.logger.Info("target matched", "target", tc) _, err := ts.store.Set("targets", tc.Name, tc) if err != nil { return err } return nil } // deleteTargetHandler is called when a tunnel target disconnects (deregisters) func (ts *tunnelServer) deleteTargetHandler(tt tunnel.Target) error { ts.logger.Info("tunnel server target deregister request", "target", tt) // remove from connected targets tracking ts.mu.Lock() delete(ts.connectedTargets, tt.ID) ts.mu.Unlock() _, _, err := ts.store.Delete("targets", tt.ID) if err != nil { ts.logger.Error("failed to delete tunnel target from configStore", "error", err) } return nil } func (ts *tunnelServer) registerHandler(ss tunnel.ServerSession) error { return nil } func (ts *tunnelServer) serverHandler(ss tunnel.ServerSession, rwc io.ReadWriteCloser) error { return nil } func (ts *tunnelServer) getTunnelTargetMatch(tt tunnel.Target) *types.TargetConfig { matchingConfigs, err := ts.store.List("tunnel-target-matches", func(key string, value any) bool { switch tm := value.(type) { case *config.TunnelTargetMatch: // check if the registering target matches corresponding ID ok, err := regexp.MatchString(tm.ID, tt.ID) if err != nil { ts.logger.Error("regex eval failed with string", "error", err, "id", tm.ID, "target", tt.ID) return false } if !ok { return false } // check if the registering target matches corresponding type ok, err = regexp.MatchString(tm.Type, tt.Type) if err != nil { ts.logger.Error("regex eval failed with string", "error", err, "type", tm.Type, "target", tt.Type) return false } if !ok { return false } // target has a match, tc := new(types.TargetConfig) *tc = tm.Config tc.Name = tt.ID tc.TunnelTargetType = tt.Type err = config.SetTargetConfigDefaults(ts.store, tc) if err != nil { ts.logger.Error("failed to set target config defaults", "error", err, "id", tt.ID, "type", tt.Type) return false } } return false }) if err != nil { ts.logger.Error("failed to list tunnel target matches", "error", err) return nil } if len(matchingConfigs) == 0 { return nil } // get keys and sort them keys := make([]string, 0, len(matchingConfigs)) for key := range matchingConfigs { keys = append(keys, key) } sort.Strings(keys) // take the first match and set the target config defaults mconfig := matchingConfigs[keys[0]].(*config.TunnelTargetMatch) tc := new(types.TargetConfig) *tc = mconfig.Config tc.Name = tt.ID tc.TunnelTargetType = tt.Type err = config.SetTargetConfigDefaults(ts.store, tc) if err != nil { ts.logger.Error("failed to set target config defaults", "error", err, "id", tt.ID, "type", tt.Type) return nil } return tc } // watchTunnelTargetMatches watches for changes to tunnel-target-matches and // reconciles all connected tunnel targets when a match is created, updated, or deleted. func (ts *tunnelServer) watchTunnelTargetMatches(ctx context.Context, ch <-chan *store.Event[any], cancel func()) { defer cancel() ts.logger.Info("starting tunnel-target-matches watcher") for { select { case <-ctx.Done(): ts.logger.Info("tunnel-target-matches watcher stopped") return case ev, ok := <-ch: if !ok { ts.logger.Info("tunnel-target-matches watch channel closed") return } ts.logger.Info("tunnel-target-match changed, reconciling connected targets", "eventType", ev.EventType, "matchID", ev.Name, ) ts.reconcileConnectedTargets() } } } // reconcileConnectedTargets re-evaluates all connected tunnel targets against // the current set of tunnel-target-matches. This is called when a match rule // is created, updated, or deleted. // // For each connected target: // - If it matches a rule: upsert the target config (create or update) // - If it doesn't match any rule: delete the target config // // We hold the lock for the entire reconciliation to prevent a race where a target // deregisters (and gets deleted from the store) while we're processing it, which // would cause us to recreate an orphaned target config. func (ts *tunnelServer) reconcileConnectedTargets() { ts.mu.Lock() defer ts.mu.Unlock() ts.logger.Info("reconciling connected tunnel targets", "count", len(ts.connectedTargets)) for _, tt := range ts.connectedTargets { tc := ts.getTunnelTargetMatch(tt) if tc != nil { // target matches a rule ==> upsert the target config ts.logger.Debug("tunnel target matches rule, upserting config", "targetID", tt.ID, "targetType", tt.Type, ) _, err := ts.store.Set("targets", tc.Name, tc) if err != nil { ts.logger.Error("failed to upsert tunnel target config", "targetID", tt.ID, "error", err, ) } } else { // target no longer matches any rule ==> delete the target config ts.logger.Debug("tunnel target no longer matches any rule, deleting config", "targetID", tt.ID, "targetType", tt.Type, ) _, _, err := ts.store.Delete("targets", tt.ID) if err != nil { ts.logger.Error("failed to delete tunnel target config", "targetID", tt.ID, "error", err, ) } } } ts.logger.Info("tunnel target reconciliation complete", "count", len(ts.connectedTargets)) } ================================================ FILE: pkg/collector/store/store.go ================================================ // © 2026 Nokia. // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package store import ( zstore "github.com/zestor-dev/zestor/store" "github.com/zestor-dev/zestor/store/gomap" ) // Store wraps both the config store and the state store. // The config store holds user-defined configuration (targets, subscriptions, outputs, inputs, etc.). // The state store holds runtime state for each component (running, stopped, failed, etc.). type Store struct { Config zstore.Store[any] State zstore.Store[any] } // NewStore creates a new Store with the given config store and a fresh // in-memory state store. func NewStore(configStore zstore.Store[any]) *Store { return &Store{ Config: configStore, State: gomap.NewMemStore(zstore.StoreOptions[any]{}), } } ================================================ FILE: pkg/collector/store/types.go ================================================ // © 2026 Nokia. // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package store import "time" // State constants shared across all component types. const ( IntendedStateEnabled = "enabled" IntendedStateDisabled = "disabled" StateRunning = "running" StateStopped = "stopped" StateStarting = "starting" StateFailed = "failed" StatePaused = "paused" StateStopping = "stopping" ) // Kind names used in the state store. const ( KindTargets = "targets" KindOutputs = "outputs" KindInputs = "inputs" KindSubscriptions = "subscriptions" KindProcessors = "processors" KindAssignments = "assignments" KindTunnelTargetMatches = "tunnel-target-matches" ) // ComponentState is the base state shared by all managed components. type ComponentState struct { // Name string `json:"name"` IntendedState string `json:"intended-state"` // enabled|disabled State string `json:"state"` // running|stopped|starting|failed|paused|stopping FailedReason string `json:"failed-reason,omitempty"` // last error message LastUpdated time.Time `json:"last-updated"` // timestamp of last state transition } // TargetState extends ComponentState with target-specific fields. type TargetState struct { ComponentState ConnectionState string `json:"connection-state,omitempty"` // gRPC connectivity: READY|CONNECTING|TRANSIENT_FAILURE|... Subscriptions map[string]string `json:"subscriptions,omitempty"` // subscription_name -> running|stopped } // OutputState extends ComponentState with output-specific fields. type OutputState struct { ComponentState } // InputState extends ComponentState with input-specific fields. type InputState struct { ComponentState } // SubscriptionState tracks a subscription's aggregate state across targets. type SubscriptionState struct { ComponentState Targets map[string]string `json:"targets,omitempty"` // target_name -> running|stopped|starting|failed|paused|stopping } ================================================ FILE: pkg/config/actions.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "fmt" "github.com/openconfig/gnmic/pkg/actions" ) func (c *Config) GetActions() (map[string]map[string]interface{}, error) { for name, actc := range c.FileConfig.GetStringMap("actions") { switch actc := actc.(type) { case map[string]interface{}: c.logger.Printf("validating action %q config", name) err := c.validateActionsConfig(actc) if err != nil { return nil, err } // set action name if not configured if cname, ok := actc["name"]; !ok || cname == "" { actc["name"] = name } for nn, a := range actc { actc[nn] = convert(a) } c.Actions[name] = actc case nil: return nil, fmt.Errorf("empty action %q config", name) default: c.logger.Printf("malformed action config, %+v", actc) return nil, fmt.Errorf("malformed action config, got %T", actc) } } for n := range c.Actions { expandMapEnv(c.Actions[n], expandExcept( "target", "paths", "values", // gnmi action templates "url", "body", // http action templates "template", // template action templates )) } if c.Debug { c.logger.Printf("actions: %+v", c.Actions) } return c.Actions, nil } func (c *Config) validateActionsConfig(acfg map[string]interface{}) error { if aType, ok := acfg["type"]; ok { switch aType := aType.(type) { case string: if !strInlist(aType, actions.ActionTypes) { return fmt.Errorf("unknown action type: %s, must be one of %q", aType, actions.ActionTypes) } default: return fmt.Errorf("unexpected action type variable type, expecting string, got %T", aType) } return nil } return fmt.Errorf("missing action type under %+v", acfg) } ================================================ FILE: pkg/config/api_server.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "fmt" "os" "time" "github.com/openconfig/gnmic/pkg/api/types" ) const ( defaultAPIServerAddress = ":7890" defaultAPIServerTimeout = 10 * time.Second trueString = "true" ) type APIServer struct { Address string `mapstructure:"address,omitempty" json:"address,omitempty"` Timeout time.Duration `mapstructure:"timeout,omitempty" json:"timeout,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty" json:"tls,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty" json:"enable-metrics,omitempty"` EnableProfiling bool `mapstructure:"enable-profiling,omitempty" json:"enable-profiling,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` HealthzDisableLogging bool `mapstructure:"healthz-disable-logging,omitempty" json:"healthz-disable-logging,omitempty"` } func (c *Config) GetAPIServer() error { if !c.FileConfig.IsSet("api-server") && c.API == "" { return nil } c.APIServer = new(APIServer) c.APIServer.Address = os.ExpandEnv(c.FileConfig.GetString("api-server/address")) if c.APIServer.Address == "" { c.APIServer.Address = os.ExpandEnv(c.FileConfig.GetString("api")) } c.APIServer.Timeout = c.FileConfig.GetDuration("api-server/timeout") if c.FileConfig.IsSet("api-server/tls") { c.APIServer.TLS = new(types.TLSConfig) c.APIServer.TLS.CaFile = os.ExpandEnv(c.FileConfig.GetString("api-server/tls/ca-file")) c.APIServer.TLS.CertFile = os.ExpandEnv(c.FileConfig.GetString("api-server/tls/cert-file")) c.APIServer.TLS.KeyFile = os.ExpandEnv(c.FileConfig.GetString("api-server/tls/key-file")) c.APIServer.TLS.ClientAuth = os.ExpandEnv(c.FileConfig.GetString("api-server/tls/client-auth")) if err := c.APIServer.TLS.Validate(); err != nil { return fmt.Errorf("api-server TLS config error: %w", err) } } c.APIServer.EnableMetrics = os.ExpandEnv(c.FileConfig.GetString("api-server/enable-metrics")) == trueString c.APIServer.EnableProfiling = os.ExpandEnv(c.FileConfig.GetString("api-server/enable-profiling")) == trueString c.APIServer.Debug = os.ExpandEnv(c.FileConfig.GetString("api-server/debug")) == trueString c.APIServer.HealthzDisableLogging = os.ExpandEnv(c.FileConfig.GetString("api-server/healthz-disable-logging")) == trueString c.setAPIServerDefaults() return nil } func (c *Config) setAPIServerDefaults() { if c.APIServer.Address == "" { c.APIServer.Address = defaultAPIServerAddress } if c.APIServer.Timeout <= 0 { c.APIServer.Timeout = defaultAPIServerTimeout } } ================================================ FILE: pkg/config/clustering.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "fmt" "os" "time" "github.com/google/uuid" "github.com/openconfig/gnmic/pkg/api/types" ) const ( minTargetWatchTimer = 20 * time.Second defaultTargetAssignmentTimeout = 10 * time.Second defaultServicesWatchTimer = 1 * time.Minute defaultLeaderWaitTimer = 5 * time.Second ) type Clustering struct { ClusterName string `mapstructure:"cluster-name,omitempty" json:"cluster-name,omitempty" yaml:"cluster-name,omitempty"` InstanceName string `mapstructure:"instance-name,omitempty" json:"instance-name,omitempty" yaml:"instance-name,omitempty"` ServiceAddress string `mapstructure:"service-address,omitempty" json:"service-address,omitempty" yaml:"service-address,omitempty"` ServicesWatchTimer time.Duration `mapstructure:"services-watch-timer,omitempty" json:"services-watch-timer,omitempty" yaml:"services-watch-timer,omitempty"` TargetsWatchTimer time.Duration `mapstructure:"targets-watch-timer,omitempty" json:"targets-watch-timer,omitempty" yaml:"targets-watch-timer,omitempty"` TargetAssignmentTimeout time.Duration `mapstructure:"target-assignment-timeout,omitempty" json:"target-assignment-timeout,omitempty" yaml:"target-assignment-timeout,omitempty"` LeaderWaitTimer time.Duration `mapstructure:"leader-wait-timer,omitempty" json:"leader-wait-timer,omitempty" yaml:"leader-wait-timer,omitempty"` Tags []string `mapstructure:"tags,omitempty" json:"tags,omitempty" yaml:"tags,omitempty"` Locker map[string]interface{} `mapstructure:"locker,omitempty" json:"locker,omitempty" yaml:"locker,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty" json:"tls,omitempty" yaml:"tls,omitempty"` } func (c *Config) GetClustering() error { if !c.FileConfig.IsSet("clustering") { return nil } c.Clustering = new(Clustering) c.Clustering.ClusterName = os.ExpandEnv(c.FileConfig.GetString("clustering/cluster-name")) c.Clustering.InstanceName = os.ExpandEnv(c.FileConfig.GetString("clustering/instance-name")) c.Clustering.ServiceAddress = os.ExpandEnv(c.FileConfig.GetString("clustering/service-address")) c.Clustering.TargetsWatchTimer = c.FileConfig.GetDuration("clustering/targets-watch-timer") c.Clustering.TargetAssignmentTimeout = c.FileConfig.GetDuration("clustering/target-assignment-timeout") c.Clustering.ServicesWatchTimer = c.FileConfig.GetDuration("clustering/services-watch-timer") c.Clustering.LeaderWaitTimer = c.FileConfig.GetDuration("clustering/leader-wait-timer") c.Clustering.Tags = c.FileConfig.GetStringSlice("clustering/tags") for i := range c.Clustering.Tags { c.Clustering.Tags[i] = os.ExpandEnv(c.Clustering.Tags[i]) } if c.FileConfig.IsSet("clustering/tls") { c.Clustering.TLS = new(types.TLSConfig) c.Clustering.TLS.CaFile = os.ExpandEnv(c.FileConfig.GetString("clustering/tls/ca-file")) c.Clustering.TLS.CertFile = os.ExpandEnv(c.FileConfig.GetString("clustering/tls/cert-file")) c.Clustering.TLS.KeyFile = os.ExpandEnv(c.FileConfig.GetString("clustering/tls/key-file")) c.Clustering.TLS.SkipVerify = os.ExpandEnv(c.FileConfig.GetString("clustering/tls/skip-verify")) == trueString if err := c.APIServer.TLS.Validate(); err != nil { return fmt.Errorf("clustering TLS config error: %w", err) } } c.setClusteringDefaults() return c.getLocker() } func (c *Config) setClusteringDefaults() { // set $clustering.cluster-name to $cluster-name if it's empty string if c.Clustering.ClusterName == "" { c.Clustering.ClusterName = c.ClusterName // otherwise, set $cluster-name to $clustering.cluster-name } else { c.ClusterName = c.Clustering.ClusterName } // set clustering.instance-name to instance-name if c.Clustering.InstanceName == "" { if c.InstanceName != "" { c.Clustering.InstanceName = c.InstanceName } else { c.Clustering.InstanceName = "gnmic-" + uuid.New().String() } } else { c.InstanceName = c.Clustering.InstanceName } // the timers are set to less than the min allowed value, // make them default to that min value. if c.Clustering.TargetsWatchTimer < minTargetWatchTimer { c.Clustering.TargetsWatchTimer = minTargetWatchTimer } if c.Clustering.TargetAssignmentTimeout < defaultTargetAssignmentTimeout { c.Clustering.TargetAssignmentTimeout = defaultTargetAssignmentTimeout } if c.Clustering.ServicesWatchTimer <= defaultServicesWatchTimer { c.Clustering.ServicesWatchTimer = defaultServicesWatchTimer } if c.Clustering.LeaderWaitTimer <= defaultLeaderWaitTimer { c.Clustering.LeaderWaitTimer = defaultLeaderWaitTimer } } ================================================ FILE: pkg/config/config.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "log" "os" "path/filepath" "strings" "text/template" "time" "github.com/adrg/xdg" "github.com/itchyny/gojq" "github.com/mitchellh/go-homedir" "github.com/openconfig/gnmi/proto/gnmi" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/spf13/viper" "gopkg.in/natefinch/lumberjack.v2" yaml "gopkg.in/yaml.v2" "github.com/openconfig/gnmic/pkg/api" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" gfile "github.com/openconfig/gnmic/pkg/file" "github.com/zestor-dev/zestor/store" ) const ( configName = ".gnmic" configLogPrefix = "[config] " envPrefix = "GNMIC" trimChars = " \r\n\t" ) var ErrInvalidConfig = errors.New("invalid configuration") var osPathFlags = []string{"tls-ca", "tls-cert", "tls-key"} type Config struct { GlobalFlags `mapstructure:",squash"` LocalFlags `mapstructure:",squash"` FileConfig *viper.Viper `mapstructure:"-" json:"-" yaml:"-" ` Targets map[string]*types.TargetConfig `mapstructure:"targets,omitempty" json:"targets,omitempty" yaml:"targets,omitempty"` Subscriptions map[string]*types.SubscriptionConfig `mapstructure:"subscriptions,omitempty" json:"subscriptions,omitempty" yaml:"subscriptions,omitempty"` Outputs map[string]map[string]any `mapstructure:"outputs,omitempty" json:"outputs,omitempty" yaml:"outputs,omitempty"` Inputs map[string]map[string]any `mapstructure:"inputs,omitempty" json:"inputs,omitempty" yaml:"inputs,omitempty"` Processors map[string]map[string]any `mapstructure:"processors,omitempty" json:"processors,omitempty" yaml:"processors,omitempty"` Clustering *Clustering `mapstructure:"clustering,omitempty" json:"clustering,omitempty" yaml:"clustering,omitempty"` GnmiServer *GNMIServer `mapstructure:"gnmi-server,omitempty" json:"gnmi-server,omitempty" yaml:"gnmi-server,omitempty"` APIServer *APIServer `mapstructure:"api-server,omitempty" json:"api-server,omitempty" yaml:"api-server,omitempty"` Loader map[string]any `mapstructure:"loader,omitempty" json:"loader,omitempty" yaml:"loader,omitempty"` Actions map[string]map[string]any `mapstructure:"actions,omitempty" json:"actions,omitempty" yaml:"actions,omitempty"` TunnelServer *TunnelServer `mapstructure:"tunnel-server,omitempty" json:"tunnel-server,omitempty" yaml:"tunnel-server,omitempty"` // logger *log.Logger setRequestTemplate []*template.Template setRequestVars map[string]any } var ValueTypes = []string{"json", "json_ietf", "string", "int", "uint", "bool", "decimal", "float", "bytes", "ascii"} type GlobalFlags struct { CfgFile string Address []string `mapstructure:"address,omitempty" json:"address,omitempty" yaml:"address,omitempty"` Username string `mapstructure:"username,omitempty" json:"username,omitempty" yaml:"username,omitempty"` Password string `mapstructure:"password,omitempty" json:"password,omitempty" yaml:"password,omitempty"` Port string `mapstructure:"port,omitempty" json:"port,omitempty" yaml:"port,omitempty"` Encoding string `mapstructure:"encoding,omitempty" json:"encoding,omitempty" yaml:"encoding,omitempty"` Insecure bool `mapstructure:"insecure,omitempty" json:"insecure,omitempty" yaml:"insecure,omitempty"` TLSCa string `mapstructure:"tls-ca,omitempty" json:"tls-ca,omitempty" yaml:"tls-ca,omitempty"` TLSCert string `mapstructure:"tls-cert,omitempty" json:"tls-cert,omitempty" yaml:"tls-cert,omitempty"` TLSKey string `mapstructure:"tls-key,omitempty" json:"tls-key,omitempty" yaml:"tls-key,omitempty"` TLSMinVersion string `mapstructure:"tls-min-version,omitempty" json:"tls-min-version,omitempty" yaml:"tls-min-version,omitempty"` TLSMaxVersion string `mapstructure:"tls-max-version,omitempty" json:"tls-max-version,omitempty" yaml:"tls-max-version,omitempty"` TLSVersion string `mapstructure:"tls-version,omitempty" json:"tls-version,omitempty" yaml:"tls-version,omitempty"` TLSServerName string `mapstructure:"tls-server-name,omitempty" json:"tls-server-name,omitempty" yaml:"tls-server-name,omitempty"` LogTLSSecret bool `mapstructure:"log-tls-secret,omitempty" json:"log-tls-secret,omitempty" yaml:"log-tls-secret,omitempty"` Timeout time.Duration `mapstructure:"timeout,omitempty" json:"timeout,omitempty" yaml:"timeout,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty" yaml:"debug,omitempty"` EnablePprof bool `mapstructure:"enable-pprof,omitempty" json:"enable-pprof,omitempty" yaml:"enable-pprof,omitempty"` PprofAddr string `mapstructure:"pprof-addr,omitempty" json:"pprof-addr,omitempty" yaml:"pprof-addr,omitempty"` SkipVerify bool `mapstructure:"skip-verify,omitempty" json:"skip-verify,omitempty" yaml:"skip-verify,omitempty"` NoPrefix bool `mapstructure:"no-prefix,omitempty" json:"no-prefix,omitempty" yaml:"no-prefix,omitempty"` ProxyFromEnv bool `mapstructure:"proxy-from-env,omitempty" json:"proxy-from-env,omitempty" yaml:"proxy-from-env,omitempty"` Format string `mapstructure:"format,omitempty" json:"format,omitempty" yaml:"format,omitempty"` LogFile string `mapstructure:"log-file,omitempty" json:"log-file,omitempty" yaml:"log-file,omitempty"` Log bool `mapstructure:"log,omitempty" json:"log,omitempty" yaml:"log,omitempty"` LogMaxSize int `mapstructure:"log-max-size,omitempty" json:"log-max-size,omitempty" yaml:"log-max-size,omitempty"` LogMaxBackups int `mapstructure:"log-max-backups,omitempty" json:"log-max-backups,omitempty" yaml:"log-max-backups,omitempty"` LogCompress bool `mapstructure:"log-compress,omitempty" json:"log-compress,omitempty" yaml:"log-compress,omitempty"` MaxMsgSize int `mapstructure:"max-msg-size,omitempty" json:"max-msg-size,omitempty" yaml:"max-msg-size,omitempty"` //PrometheusAddress string `mapstructure:"prometheus-address,omitempty" json:"prometheus-address,omitempty" yaml:"prometheus-address,omitempty"` PrintRequest bool `mapstructure:"print-request,omitempty" json:"print-request,omitempty" yaml:"print-request,omitempty"` Retry time.Duration `mapstructure:"retry,omitempty" json:"retry,omitempty" yaml:"retry,omitempty"` TargetBufferSize uint `mapstructure:"target-buffer-size,omitempty" json:"target-buffer-size,omitempty" yaml:"target-buffer-size,omitempty"` ClusterName string `mapstructure:"cluster-name,omitempty" json:"cluster-name,omitempty" yaml:"cluster-name,omitempty"` InstanceName string `mapstructure:"instance-name,omitempty" json:"instance-name,omitempty" yaml:"instance-name,omitempty"` API string `mapstructure:"api,omitempty" json:"api,omitempty" yaml:"api,omitempty"` ProtoFile []string `mapstructure:"proto-file,omitempty" json:"proto-file,omitempty" yaml:"proto-file,omitempty"` ProtoDir []string `mapstructure:"proto-dir,omitempty" json:"proto-dir,omitempty" yaml:"proto-dir,omitempty"` RegisteredExtensions []string `mapstructure:"registered-extensions,omitempty" json:"registered-extensions,omitempty" yaml:"registered-extensions,omitempty"` RequestExtensions string `mapstructure:"request-extensions,omitempty" json:"request-extensions,omitempty" yaml:"request-extensions,omitempty"` TargetsFile string `mapstructure:"targets-file,omitempty" json:"targets-file,omitempty" yaml:"targets-file,omitempty"` Gzip bool `mapstructure:"gzip,omitempty" json:"gzip,omitempty" yaml:"gzip,omitempty"` File []string `mapstructure:"file,omitempty" json:"file,omitempty" yaml:"file,omitempty"` Dir []string `mapstructure:"dir,omitempty" json:"dir,omitempty" yaml:"dir,omitempty"` Exclude []string `mapstructure:"exclude,omitempty" json:"exclude,omitempty" yaml:"exclude,omitempty"` Token string `mapstructure:"token,omitempty" json:"token,omitempty" yaml:"token,omitempty"` UseTunnelServer bool `mapstructure:"use-tunnel-server,omitempty" json:"use-tunnel-server,omitempty" yaml:"use-tunnel-server,omitempty"` AuthScheme string `mapstructure:"auth-scheme,omitempty" json:"auth-scheme,omitempty" yaml:"auth-scheme,omitempty"` CalculateLatency bool `mapstructure:"calculate-latency,omitempty" json:"calculate-latency,omitempty" yaml:"calculate-latency,omitempty"` Metadata map[string]string `mapstructure:"metadata,omitempty" json:"metadata,omitempty" yaml:"metadata,omitempty"` PluginProcessorsPath string `mapstructure:"plugin-processors-path,omitempty" yaml:"plugin-processors-path,omitempty" json:"plugin-processors-path,omitempty"` } type LocalFlags struct { // Capabilities CapabilitiesVersion bool `mapstructure:"capabilities-version,omitempty" json:"capabilities-version,omitempty" yaml:"capabilities-version,omitempty"` // Get GetPath []string `mapstructure:"get-path,omitempty" json:"get-path,omitempty" yaml:"get-path,omitempty"` GetPrefix string `mapstructure:"get-prefix,omitempty" json:"get-prefix,omitempty" yaml:"get-prefix,omitempty"` GetModel []string `mapstructure:"get-model,omitempty" json:"get-model,omitempty" yaml:"get-model,omitempty"` GetType string `mapstructure:"get-type,omitempty" json:"get-type,omitempty" yaml:"get-type,omitempty"` GetTarget string `mapstructure:"get-target,omitempty" json:"get-target,omitempty" yaml:"get-target,omitempty"` GetValuesOnly bool `mapstructure:"get-values-only,omitempty" json:"get-values-only,omitempty" yaml:"get-values-only,omitempty"` GetProcessor []string `mapstructure:"get-processor,omitempty" json:"get-processor,omitempty" yaml:"get-processor,omitempty"` GetDepth uint32 `mapstructure:"get-depth,omitempty" yaml:"get-depth,omitempty" json:"get-depth,omitempty"` GetDryRun bool `mapstructure:"get-dry-run,omitempty" json:"get-dry-run,omitempty" yaml:"get-dry-run,omitempty"` // Set SetPrefix string `mapstructure:"set-prefix,omitempty" json:"set-prefix,omitempty" yaml:"set-prefix,omitempty"` SetDelete []string `mapstructure:"set-delete,omitempty" json:"set-delete,omitempty" yaml:"set-delete,omitempty"` SetReplace []string `mapstructure:"set-replace,omitempty" json:"set-replace,omitempty" yaml:"set-replace,omitempty"` SetUnionReplace []string `mapstructure:"set-union-replace,omitempty" json:"set-union-replace,omitempty" yaml:"set-union-replace,omitempty"` SetUpdate []string `mapstructure:"set-update,omitempty" json:"set-update,omitempty" yaml:"set-update,omitempty"` SetReplacePath []string `mapstructure:"set-replace-path,omitempty" json:"set-replace-path,omitempty" yaml:"set-replace-path,omitempty"` SetUpdatePath []string `mapstructure:"set-update-path,omitempty" json:"set-update-path,omitempty" yaml:"set-update-path,omitempty"` SetReplaceFile []string `mapstructure:"set-replace-file,omitempty" json:"set-replace-file,omitempty" yaml:"set-replace-file,omitempty"` SetUpdateFile []string `mapstructure:"set-update-file,omitempty" json:"set-update-file,omitempty" yaml:"set-update-file,omitempty"` SetReplaceValue []string `mapstructure:"set-replace-value,omitempty" json:"set-replace-value,omitempty" yaml:"set-replace-value,omitempty"` SetUpdateValue []string `mapstructure:"set-update-value,omitempty" json:"set-update-value,omitempty" yaml:"set-update-value,omitempty"` SetUnionReplacePath []string `mapstructure:"set-union-replace-path,omitempty" yaml:"set-union-replace-path,omitempty" json:"set-union-replace-path,omitempty"` SetUnionReplaceValue []string `mapstructure:"set-union-replace-value,omitempty" yaml:"set-union-replace-value,omitempty" json:"set-union-replace-value,omitempty"` SetUnionReplaceFile []string `mapstructure:"set-union-replace-file,omitempty" yaml:"set-union-replace-file,omitempty" json:"set-union-replace-file,omitempty"` SetDelimiter string `mapstructure:"set-delimiter,omitempty" json:"set-delimiter,omitempty" yaml:"set-delimiter,omitempty"` SetTarget string `mapstructure:"set-target,omitempty" json:"set-target,omitempty" yaml:"set-target,omitempty"` SetRequestFile []string `mapstructure:"set-request-file,omitempty" json:"set-request-file,omitempty" yaml:"set-request-file,omitempty"` SetRequestVars string `mapstructure:"set-request-vars,omitempty" json:"set-request-vars,omitempty" yaml:"set-request-vars,omitempty"` SetRequestProtoFile []string `mapstructure:"set-proto-request-file,omitempty" yaml:"set-proto-request-file,omitempty" json:"set-proto-request-file,omitempty"` SetDryRun bool `mapstructure:"set-dry-run,omitempty" json:"set-dry-run,omitempty" yaml:"set-dry-run,omitempty"` SetNoTrim bool `mapstructure:"set-no-trim,omitempty" json:"set-no-trim,omitempty" yaml:"set-no-trim,omitempty"` SetReplaceCli []string `mapstructure:"set-replace-cli,omitempty" yaml:"set-replace-cli,omitempty" json:"set-replace-cli,omitempty"` SetReplaceCliFile string `mapstructure:"set-replace-cli-file,omitempty" yaml:"set-replace-cli-file,omitempty" json:"set-replace-cli-file,omitempty"` SetUpdateCli []string `mapstructure:"set-update-cli,omitempty" yaml:"set-update-cli,omitempty" json:"set-update-cli,omitempty"` SetUpdateCliFile string `mapstructure:"set-update-cli-file,omitempty" yaml:"set-update-cli-file,omitempty" json:"set-update-cli-file,omitempty"` SetCommitId string `mapstructure:"set-commit-id,omitempty" yaml:"set-commit-id,omitempty" json:"set-commit-id,omitempty"` SetCommitRequest bool `mapstructure:"set-commit-request,omitempty" yaml:"set-commit-request,omitempty" json:"set-commit-request,omitempty"` SetCommitRollbackDuration time.Duration `mapstructure:"set-commit-rollback-duration,omitempty" yaml:"set-commit-rollback-duration,omitempty" json:"set-commit-rollback-duration,omitempty"` SetCommitCancel bool `mapstructure:"set-commit-cancel,omitempty" yaml:"set-commit-cancel,omitempty" json:"set-commit-cancel,omitempty"` SetCommitConfirm bool `mapstructure:"set-commit-confirm,omitempty" yaml:"set-commit-confirm,omitempty" json:"set-commit-confirm,omitempty"` // Sub SubscribePrefix string `mapstructure:"subscribe-prefix,omitempty" json:"subscribe-prefix,omitempty" yaml:"subscribe-prefix,omitempty"` SubscribePath []string `mapstructure:"subscribe-path,omitempty" json:"subscribe-path,omitempty" yaml:"subscribe-path,omitempty"` SubscribeQos uint32 `mapstructure:"subscribe-qos,omitempty" json:"subscribe-qos,omitempty" yaml:"subscribe-qos,omitempty"` SubscribeUpdatesOnly bool `mapstructure:"subscribe-updates-only,omitempty" json:"subscribe-updates-only,omitempty" yaml:"subscribe-updates-only,omitempty"` SubscribeMode string `mapstructure:"subscribe-mode,omitempty" json:"subscribe-mode,omitempty" yaml:"subscribe-mode,omitempty"` SubscribeStreamMode string `mapstructure:"subscribe-stream_mode,omitempty" json:"subscribe-stream-mode,omitempty" yaml:"subscribe-stream-mode,omitempty"` SubscribeSampleInterval time.Duration `mapstructure:"subscribe-sample-interval,omitempty" json:"subscribe-sample-interval,omitempty" yaml:"subscribe-sample-interval,omitempty"` SubscribeSuppressRedundant bool `mapstructure:"subscribe-suppress-redundant,omitempty" json:"subscribe-suppress-redundant,omitempty" yaml:"subscribe-suppress-redundant,omitempty"` SubscribeHeartbeatInterval time.Duration `mapstructure:"subscribe-heartbeat-interval,omitempty" json:"subscribe-heartbeat-interval,omitempty" yaml:"subscribe-heartbeat-interval,omitempty"` SubscribeModel []string `mapstructure:"subscribe-model,omitempty" json:"subscribe-model,omitempty" yaml:"subscribe-model,omitempty"` SubscribeQuiet bool `mapstructure:"subscribe-quiet,omitempty" json:"subscribe-quiet,omitempty" yaml:"subscribe-quiet,omitempty"` SubscribeTarget string `mapstructure:"subscribe-target,omitempty" json:"subscribe-target,omitempty" yaml:"subscribe-target,omitempty"` SubscribeSetTarget bool `mapstructure:"subscribe-set-target,omitempty" json:"subscribe-set-target,omitempty" yaml:"subscribe-set-target,omitempty"` SubscribeName []string `mapstructure:"subscribe-name,omitempty" json:"subscribe-name,omitempty" yaml:"subscribe-name,omitempty"` SubscribeOutput []string `mapstructure:"subscribe-output,omitempty" json:"subscribe-output,omitempty" yaml:"subscribe-output,omitempty"` SubscribeWatchConfig bool `mapstructure:"subscribe-watch-config,omitempty" json:"subscribe-watch-config,omitempty" yaml:"subscribe-watch-config,omitempty"` SubscribeBackoff time.Duration `mapstructure:"subscribe-backoff,omitempty" json:"subscribe-backoff,omitempty" yaml:"subscribe-backoff,omitempty"` SubscribeLockRetry time.Duration `mapstructure:"subscribe-lock-retry,omitempty" json:"subscribe-lock-retry,omitempty" yaml:"subscribe-lock-retry,omitempty"` SubscribeHistorySnapshot string `mapstructure:"subscribe-history-snapshot,omitempty" json:"subscribe-history-snapshot,omitempty" yaml:"subscribe-history-snapshot,omitempty"` SubscribeHistoryStart string `mapstructure:"subscribe-history-start,omitempty" json:"subscribe-history-start,omitempty" yaml:"subscribe-history-start,omitempty"` SubscribeHistoryEnd string `mapstructure:"subscribe-history-end,omitempty" json:"subscribe-history-end,omitempty" yaml:"subscribe-history-end,omitempty"` SubscribeDepth uint32 `mapstructure:"subscribe-depth,omitempty" yaml:"subscribe-depth,omitempty" json:"subscribe-depth,omitempty"` // Path PathPathType string `mapstructure:"path-path-type,omitempty" json:"path-path-type,omitempty" yaml:"path-path-type,omitempty"` PathWithDescr bool `mapstructure:"path-descr,omitempty" json:"path-descr,omitempty" yaml:"path-descr,omitempty"` PathWithPrefix bool `mapstructure:"path-with-prefix,omitempty" json:"path-with-prefix,omitempty" yaml:"path-with-prefix,omitempty"` PathWithTypes bool `mapstructure:"path-types,omitempty" json:"path-types,omitempty" yaml:"path-types,omitempty"` PathSearch bool `mapstructure:"path-search,omitempty" json:"path-search,omitempty" yaml:"path-search,omitempty"` PathState bool `mapstructure:"path-state,omitempty" json:"path-state,omitempty" yaml:"path-state,omitempty"` PathConfig bool `mapstructure:"path-config,omitempty" json:"path-config,omitempty" yaml:"path-config,omitempty"` // Prompt PromptFile []string `mapstructure:"prompt-file,omitempty" json:"prompt-file,omitempty" yaml:"prompt-file,omitempty"` PromptExclude []string `mapstructure:"prompt-exclude,omitempty" json:"prompt-exclude,omitempty" yaml:"prompt-exclude,omitempty"` PromptDir []string `mapstructure:"prompt-dir,omitempty" json:"prompt-dir,omitempty" yaml:"prompt-dir,omitempty"` PromptMaxSuggestions uint16 `mapstructure:"prompt-max-suggestions,omitempty" json:"prompt-max-suggestions,omitempty" yaml:"prompt-max-suggestions,omitempty"` PromptPrefixColor string `mapstructure:"prompt-prefix-color,omitempty" json:"prompt-prefix-color,omitempty" yaml:"prompt-prefix-color,omitempty"` PromptSuggestionsBGColor string `mapstructure:"prompt-suggestions-bg-color,omitempty" json:"prompt-suggestions-bg-color,omitempty" yaml:"prompt-suggestions-bg-color,omitempty"` PromptDescriptionBGColor string `mapstructure:"prompt-description-bg-color,omitempty" json:"prompt-description-bg-color,omitempty" yaml:"prompt-description-bg-color,omitempty"` PromptSuggestAllFlags bool `mapstructure:"prompt-suggest-all-flags,omitempty" json:"prompt-suggest-all-flags,omitempty" yaml:"prompt-suggest-all-flags,omitempty"` PromptDescriptionWithPrefix bool `mapstructure:"prompt-description-with-prefix,omitempty" json:"prompt-description-with-prefix,omitempty" yaml:"prompt-description-with-prefix,omitempty"` PromptDescriptionWithTypes bool `mapstructure:"prompt-description-with-types,omitempty" json:"prompt-description-with-types,omitempty" yaml:"prompt-description-with-types,omitempty"` PromptSuggestWithOrigin bool `mapstructure:"prompt-suggest-with-origin,omitempty" json:"prompt-suggest-with-origin,omitempty" yaml:"prompt-suggest-with-origin,omitempty"` // Listen ListenMaxConcurrentStreams uint32 `mapstructure:"listen-max-concurrent-streams,omitempty" json:"listen-max-concurrent-streams,omitempty" yaml:"listen-max-concurrent-streams,omitempty"` ListenPrometheusAddress string `mapstructure:"listen-prometheus-address,omitempty" json:"listen-prometheus-address,omitempty" yaml:"listen-prometheus-address,omitempty"` // VersionUpgrade UpgradeUsePkg bool `mapstructure:"upgrade-use-pkg" json:"upgrade-use-pkg,omitempty" yaml:"upgrade-use-pkg,omitempty"` // GetSet GetSetPrefix string `mapstructure:"getset-prefix,omitempty" json:"getset-prefix,omitempty" yaml:"getset-prefix,omitempty"` GetSetGet string `mapstructure:"getset-get,omitempty" json:"getset-get,omitempty" yaml:"getset-get,omitempty"` GetSetModel []string `mapstructure:"get-set-model,omitempty" yaml:"get-set-model,omitempty" json:"get-set-model,omitempty"` GetSetTarget string `mapstructure:"getset-target,omitempty" json:"getset-target,omitempty" yaml:"getset-target,omitempty"` GetSetType string `mapstructure:"getset-type,omitempty" json:"getset-type,omitempty" yaml:"getset-type,omitempty"` GetSetCondition string `mapstructure:"getset-condition,omitempty" json:"getset-condition,omitempty" yaml:"getset-condition,omitempty"` GetSetUpdate string `mapstructure:"getset-update,omitempty" json:"getset-update,omitempty" yaml:"getset-update,omitempty"` GetSetReplace string `mapstructure:"getset-replace,omitempty" json:"getset-replace,omitempty" yaml:"getset-replace,omitempty"` GetSetDelete string `mapstructure:"getset-delete,omitempty" json:"getset-delete,omitempty" yaml:"getset-delete,omitempty"` GetSetValue string `mapstructure:"getset-value,omitempty" json:"getset-value,omitempty" yaml:"getset-value,omitempty"` // Generate GenerateOutput string `mapstructure:"generate-output,omitempty" json:"generate-output,omitempty" yaml:"generate-output,omitempty"` GenerateJSON bool `mapstructure:"generate-json,omitempty" json:"generate-json,omitempty" yaml:"generate-json,omitempty"` GenerateConfigOnly bool `mapstructure:"generate-config-only,omitempty" json:"generate-config-only,omitempty" yaml:"generate-config-only,omitempty"` GeneratePath string `mapstructure:"generate-path,omitempty" json:"generate-path,omitempty" yaml:"generate-path,omitempty"` GenerateCamelCase bool `mapstructure:"generate-camel-case,omitempty" json:"generate-camel-case,omitempty" yaml:"generate-camel-case,omitempty"` GenerateSnakeCase bool `mapstructure:"generate-snake-case,omitempty" json:"generate-snake-case,omitempty" yaml:"generate-snake-case,omitempty"` // Generate Set Request GenerateSetRequestUpdatePath []string `mapstructure:"generate-update-path,omitempty" json:"generate-update-path,omitempty" yaml:"generate-update-path,omitempty"` GenerateSetRequestReplacePath []string `mapstructure:"generate-replace-path,omitempty" json:"generate-replace-path,omitempty" yaml:"generate-replace-path,omitempty"` // Generate path GeneratePathWithDescr bool `mapstructure:"generate-descr,omitempty" json:"generate-descr,omitempty" yaml:"generate-descr,omitempty"` GeneratePathWithPrefix bool `mapstructure:"generate-with-prefix,omitempty" json:"generate-with-prefix,omitempty" yaml:"generate-with-prefix,omitempty"` GeneratePathWithTypes bool `mapstructure:"generate-types,omitempty" json:"generate-types,omitempty" yaml:"generate-types,omitempty"` GeneratePathSearch bool `mapstructure:"generate-search,omitempty" json:"generate-search,omitempty" yaml:"generate-search,omitempty"` GeneratePathPathType string `mapstructure:"generate-path-path-type,omitempty" json:"generate-path-path-type,omitempty" yaml:"generate-path-path-type,omitempty"` GeneratePathState bool `mapstructure:"generate-path-state,omitempty" json:"generate-path-state,omitempty" yaml:"generate-path-state,omitempty"` GeneratePathConfig bool `mapstructure:"generate-path-config,omitempty" json:"generate-path-config,omitempty" yaml:"generate-path-config,omitempty"` GeneratePathWithNonLeaves bool `mapstructure:"generate-path-with-non-leaves,omitempty" json:"generate-path-with-non-leaves,omitempty" yaml:"generate-path-with-non-leaves,omitempty"` // DiffPath []string `mapstructure:"diff-path,omitempty" json:"diff-path,omitempty" yaml:"diff-path,omitempty"` DiffPrefix string `mapstructure:"diff-prefix,omitempty" json:"diff-prefix,omitempty" yaml:"diff-prefix,omitempty"` DiffModel []string `mapstructure:"diff-model,omitempty" json:"diff-model,omitempty" yaml:"diff-model,omitempty"` DiffType string `mapstructure:"diff-type,omitempty" json:"diff-type,omitempty" yaml:"diff-type,omitempty"` DiffTarget string `mapstructure:"diff-target,omitempty" json:"diff-target,omitempty" yaml:"diff-target,omitempty"` DiffSub bool `mapstructure:"diff-sub,omitempty" json:"diff-sub,omitempty" yaml:"diff-sub,omitempty"` DiffRef string `mapstructure:"diff-ref,omitempty" json:"diff-ref,omitempty" yaml:"diff-ref,omitempty"` DiffCompare []string `mapstructure:"diff-compare,omitempty" json:"diff-compare,omitempty" yaml:"diff-compare,omitempty"` DiffQos uint32 `mapstructure:"diff-qos,omitempty" json:"diff-qos,omitempty" yaml:"diff-qos,omitempty"` DiffSetRequestRef string `mapstructure:"diff-setrequest-ref,omitempty" json:"diff-setrequest-ref,omitempty" yaml:"diff-setrequest-ref,omitempty"` DiffSetRequestNew string `mapstructure:"diff-setrequest-new,omitempty" json:"diff-setrequest-new,omitempty" yaml:"diff-setrequest-new,omitempty"` DiffSetRequestFull bool `mapstructure:"diff-setrequest-full,omitempty" json:"diff-setrequest-full,omitempty" yaml:"diff-setrequest-full,omitempty"` DiffSetToNotifsSet string `mapstructure:"diff-set-to-notifs-set,omitempty" json:"diff-set-to-notifs-set,omitempty" yaml:"diff-set-to-notifs-set,omitempty"` DiffSetToNotifsResponse string `mapstructure:"diff-set-to-notifs-response,omitempty" json:"diff-set-to-notifs-response,omitempty" yaml:"diff-set-to-notifs-response,omitempty"` DiffSetToNotifsFull bool `mapstructure:"diff-set-to-notifs-full,omitempty" json:"diff-set-to-notifs-full,omitempty" yaml:"diff-set-to-notifs-full,omitempty"` // TunnelServerSubscribe bool `mapstructure:"tunnel-server-subscribe,omitempty" yaml:"tunnel-server-subscribe,omitempty" json:"tunnel-server-subscribe,omitempty"` // Processor ProcessorInput string `mapstructure:"processor-input,omitempty" yaml:"processor-input,omitempty" json:"processor-input,omitempty"` ProcessorInputDelimiter string `mapstructure:"processor-input-delimiter,omitempty" yaml:"processor-input-delimiter,omitempty" json:"processor-input-delimiter,omitempty"` ProcessorName []string `mapstructure:"processor-name,omitempty" yaml:"processor-name,omitempty" json:"processor-name,omitempty"` ProcessorOutput string `mapstructure:"processor-output,omitempty" yaml:"processor-output,omitempty" json:"processor-output,omitempty"` // Tree TreeFlat bool `mapstructure:"tree-flat,omitempty" yaml:"tree-flat,omitempty" json:"tree-flat,omitempty"` TreeDetails bool `mapstructure:"tree-details,omitempty" yaml:"tree-details,omitempty" json:"tree-details,omitempty"` } func New() *Config { return &Config{ GlobalFlags{}, LocalFlags{}, viper.NewWithOptions(viper.KeyDelimiter("/")), make(map[string]*types.TargetConfig), make(map[string]*types.SubscriptionConfig), make(map[string]map[string]interface{}), make(map[string]map[string]interface{}), make(map[string]map[string]interface{}), nil, nil, nil, nil, nil, nil, log.New(io.Discard, configLogPrefix, utils.DefaultLoggingFlags), nil, make(map[string]interface{}), } } func (c *Config) Load(ctx context.Context) error { c.FileConfig.SetEnvPrefix(envPrefix) c.FileConfig.SetEnvKeyReplacer(strings.NewReplacer("/", "_", "-", "_")) c.FileConfig.AutomaticEnv() if c.GlobalFlags.CfgFile != "" { // configuration file path is explicitly set c.FileConfig.SetConfigFile(c.GlobalFlags.CfgFile) configBytes, err := gfile.ReadFile(ctx, c.FileConfig.ConfigFileUsed()) if err != nil { return err } err = c.FileConfig.ReadConfig(bytes.NewBuffer(configBytes)) if err != nil { return err } } else { // discover gnmic config file home, err := homedir.Dir() if err != nil { return err } c.FileConfig.AddConfigPath(".") c.FileConfig.AddConfigPath(home) c.FileConfig.AddConfigPath(xdg.ConfigHome) c.FileConfig.AddConfigPath(xdg.ConfigHome + "/gnmic") c.FileConfig.SetConfigName(configName) err = c.FileConfig.ReadInConfig() if err != nil { if _, ok := err.(viper.ConfigFileNotFoundError); !ok { return err } } } err := c.FileConfig.Unmarshal(c) if err != nil { return err } c.mergeEnvVars() return c.expandOSPathFlagValues() } func (c *Config) ToStore(s store.Store[any]) error { targets := make(map[string]any) subscriptions := make(map[string]any) processors := make(map[string]any) outputs := make(map[string]any) inputs := make(map[string]any) actions := make(map[string]any) _, err := c.GetTargets() if err != nil { if !errors.Is(err, ErrNoTargetsFound) { return err } } // targets for n, t := range c.Targets { targets[n] = t } // subscriptions for n, s := range c.Subscriptions { subscriptions[n] = s } // processors for n, p := range c.Processors { processors[n] = p } // outputs for n, o := range c.Outputs { outputs[n] = o } // inputs for n, i := range c.Inputs { inputs[n] = i } // actions for n, a := range c.Actions { actions[n] = a } // set all err = s.SetAll("targets", targets) if err != nil { return err } err = s.SetAll("subscriptions", subscriptions) if err != nil { return err } // actions err = s.SetAll("actions", actions) if err != nil { return err } err = s.SetAll("processors", processors) if err != nil { return err } err = s.SetAll("outputs", outputs) if err != nil { return err } err = s.SetAll("inputs", inputs) if err != nil { return err } // _, err = s.Set("global-flags", "global-flags", c.GlobalFlags) if err != nil { return err } // clustering _, err = s.Set("clustering", "clustering", c.Clustering) if err != nil { return err } // gnmi server _, err = s.Set("gnmi-server", "gnmi-server", c.GnmiServer) if err != nil { return err } // api server _, err = s.Set("api-server", "api-server", c.APIServer) if err != nil { return err } // loader _, err = s.Set("loader", "loader", c.Loader) if err != nil { return err } // tunnel server _, err = s.Set("tunnel-server", "tunnel-server", c.TunnelServer) if err != nil { return err } return nil } func (c *Config) SetLogger() (io.Writer, int, error) { var f io.Writer = io.Discard var loggingFlags = c.logger.Flags() var err error if c.LogFile != "" { if c.LogMaxSize > 0 { f = &lumberjack.Logger{ Filename: c.LogFile, MaxSize: c.LogMaxSize, MaxBackups: c.LogMaxBackups, Compress: c.LogCompress, } } else { f, err = os.OpenFile(c.LogFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { return nil, 0, err } } } else { if c.Debug { c.Log = true } if c.Log { f = os.Stderr } } if c.Debug { loggingFlags |= log.Llongfile } c.logger.SetOutput(f) c.logger.SetFlags(loggingFlags) return f, loggingFlags, nil } func (c *Config) SetPersistentFlagsFromFile(cmd *cobra.Command) { // set debug and log values from file before other persistent flags cmd.PersistentFlags().VisitAll(func(f *pflag.Flag) { if f.Name == "debug" || f.Name == "log" { if !f.Changed && c.FileConfig.IsSet(f.Name) { c.setFlagValue(cmd, f.Name, c.FileConfig.Get(f.Name)) } } }) // cmd.PersistentFlags().VisitAll(func(f *pflag.Flag) { if f.Name == "debug" || f.Name == "log" { return } if c.Debug { c.logger.Printf("cmd=%s, flagName=%s, changed=%v, isSetInFile=%v", cmd.Name(), f.Name, f.Changed, c.FileConfig.IsSet(f.Name)) } if !f.Changed && c.FileConfig.IsSet(f.Name) { c.setFlagValue(cmd, f.Name, c.FileConfig.Get(f.Name)) } }) } func (c *Config) SetLocalFlagsFromFile(cmd *cobra.Command) { cmd.LocalFlags().VisitAll(func(f *pflag.Flag) { flagName := fmt.Sprintf("%s-%s", cmd.Name(), f.Name) if c.Debug { c.logger.Printf("cmd=%s, flagName=%s, changed=%v, isSetInFile=%v", cmd.Name(), f.Name, f.Changed, c.FileConfig.IsSet(flagName)) } if !f.Changed && c.FileConfig.IsSet(flagName) { c.setFlagValue(cmd, f.Name, c.FileConfig.Get(flagName)) } }) } func (c *Config) setFlagValue(cmd *cobra.Command, fName string, val interface{}) { switch val := val.(type) { case []interface{}: if c.Debug { c.logger.Printf("cmd=%s, flagName=%s, valueType=%T, length=%d, value=%#v", cmd.Name(), fName, val, len(val), val) } nVal := make([]string, 0, len(val)) for _, v := range val { nVal = append(nVal, fmt.Sprintf("%v", v)) } cmd.Flags().Set(fName, strings.Join(nVal, ",")) default: if c.Debug { c.logger.Printf("cmd=%s, flagName=%s, valueType=%T, value=%#v", cmd.Name(), fName, val, val) } cmd.Flags().Set(fName, fmt.Sprintf("%v", val)) } } func flagIsSet(cmd *cobra.Command, name string) bool { if cmd == nil { return false } var isSet bool cmd.Flags().VisitAll(func(f *pflag.Flag) { if f.Name == name && f.Changed { isSet = true return } }) return isSet } func (c *Config) CreateGetRequest(tc *types.TargetConfig) (*gnmi.GetRequest, error) { if c == nil { return nil, fmt.Errorf("%w", ErrInvalidConfig) } gnmiOpts := make([]api.GNMIOption, 0, 4+len(c.LocalFlags.GetPath)) enc := c.Encoding if tc.Encoding != nil { enc = *tc.Encoding } gnmiOpts = append(gnmiOpts, api.Encoding(enc), api.DataType(c.LocalFlags.GetType), api.Prefix(c.LocalFlags.GetPrefix), api.Target(c.LocalFlags.GetTarget), ) for _, p := range c.LocalFlags.GetPath { gnmiOpts = append(gnmiOpts, api.Path(strings.TrimSpace(p))) } if c.LocalFlags.GetDepth > 0 { gnmiOpts = append(gnmiOpts, api.Extension_Depth(c.LocalFlags.GetDepth)) } exts, err := c.parseAdditionalRequestExtensions() if err != nil { return nil, err } gnmiOpts = append(gnmiOpts, exts...) return api.NewGetRequest(gnmiOpts...) } func (c *Config) CreateGASGetRequest() (*gnmi.GetRequest, error) { if c == nil { return nil, fmt.Errorf("%w", ErrInvalidConfig) } return api.NewGetRequest( api.Encoding(c.Encoding), api.DataType(c.LocalFlags.GetSetType), api.Prefix(c.LocalFlags.GetSetPrefix), api.Target(c.LocalFlags.GetSetTarget), api.Path(strings.TrimSpace(c.LocalFlags.GetSetGet))) } func (c *Config) CreateGASSetRequest(input interface{}) (*gnmi.SetRequest, error) { gnmiOpts := make([]api.GNMIOption, 0, 3) gnmiOpts = append(gnmiOpts, api.Prefix(c.LocalFlags.GetSetPrefix)) gnmiOpts = append(gnmiOpts, api.Target(c.LocalFlags.GetSetTarget)) delPath, err := c.execPathTemplate(c.LocalFlags.GetSetDelete, input) if err != nil { return nil, err } if delPath != "" { gnmiOpts = append(gnmiOpts, api.Delete(delPath)) } // updatePath, err := c.execPathTemplate(c.LocalFlags.GetSetUpdate, input) if err != nil { return nil, err } replacePath, err := c.execPathTemplate(c.LocalFlags.GetSetReplace, input) if err != nil { return nil, err } val, err := c.execValueTemplate(c.LocalFlags.GetSetValue, input) if err != nil { return nil, err } if updatePath != "" { gnmiOpts = append(gnmiOpts, api.Update( api.Path(updatePath), api.Value(val, c.Encoding), )) } else if replacePath != "" { gnmiOpts = append(gnmiOpts, api.Replace( api.Path(replacePath), api.Value(val, c.Encoding), )) } return api.NewSetRequest(gnmiOpts...) } func (c *Config) execPathTemplate(tplString string, input interface{}) (string, error) { if tplString == "" { return "", nil } tplString = os.ExpandEnv(tplString) q, err := gojq.Parse(tplString) if err != nil { return "", err } code, err := gojq.Compile(q) if err != nil { return "", err } iter := code.Run(input) var res interface{} var ok bool res, ok = iter.Next() if !ok { if c.Debug { c.logger.Printf("jq input: %+v", input) c.logger.Printf("jq result: %+v", res) } return "", fmt.Errorf("unexpected jq result type: %T", res) } switch v := res.(type) { case error: return "", v case string: c.logger.Printf("path jq expression result: %s", v) return v, nil default: if c.Debug { c.logger.Printf("jq input: %+v", input) c.logger.Printf("jq result: %+v", v) } return "", fmt.Errorf("unexpected jq result type: %T", v) } } func (c *Config) execValueTemplate(tplString string, input interface{}) (string, error) { if tplString == "" { return "", nil } tplString = os.ExpandEnv(tplString) q, err := gojq.Parse(tplString) if err != nil { return "", err } code, err := gojq.Compile(q) if err != nil { return "", err } iter := code.Run(input) var res interface{} var ok bool res, ok = iter.Next() if !ok { if c.Debug { c.logger.Printf("jq input: %+v", input) c.logger.Printf("jq result: %+v", res) } return "", fmt.Errorf("unexpected jq result type: %T", res) } switch v := res.(type) { case error: return "", v case string: c.logger.Printf("path jq expression result: %s", v) return trimQuotes(v), nil default: if c.Debug { c.logger.Printf("jq input: %+v", input) c.logger.Printf("jq result: %+v", v) } return "", fmt.Errorf("unexpected jq result type: %T", v) } } func (c *Config) CreateSetRequest(targetName string) ([]*gnmi.SetRequest, error) { if len(c.SetRequestProtoFile) > 0 { return c.CreateSetRequestFromProtoFile() } if len(c.SetRequestFile) > 0 { return c.CreateSetRequestFromFile(targetName) } if c.Debug { c.logger.Printf("Set input delete: %+v", &c.LocalFlags.SetDelete) c.logger.Printf("Set input update: %+v", &c.LocalFlags.SetUpdate) c.logger.Printf("Set input update path(s): %+v", &c.LocalFlags.SetUpdatePath) c.logger.Printf("Set input update value(s): %+v", &c.LocalFlags.SetUpdateValue) c.logger.Printf("Set input update file(s): %+v", &c.LocalFlags.SetUpdateFile) c.logger.Printf("Set input replace: %+v", &c.LocalFlags.SetReplace) c.logger.Printf("Set input replace path(s): %+v", &c.LocalFlags.SetReplacePath) c.logger.Printf("Set input replace value(s): %+v", &c.LocalFlags.SetReplaceValue) c.logger.Printf("Set input replace file(s): %+v", &c.LocalFlags.SetReplaceFile) c.logger.Printf("Set input union replace path(s): %+v", &c.LocalFlags.SetUnionReplacePath) c.logger.Printf("Set input union replace value(s): %+v", &c.LocalFlags.SetUnionReplaceValue) c.logger.Printf("Set input union replace file(s): %+v", &c.LocalFlags.SetUnionReplaceFile) } gnmiOpts := make([]api.GNMIOption, 0, 2+ // prefix+target len(c.LocalFlags.SetDelete)+len(c.LocalFlags.SetUpdate)+len(c.LocalFlags.SetReplace)+len(c.LocalFlags.SetUnionReplace)+ len(c.LocalFlags.SetUpdatePath)+len(c.LocalFlags.SetReplacePath)+len(c.LocalFlags.SetUnionReplacePath)+ 1+1+ // updateCli + replaceCli 1+1, // updateCliFile + replaceCliFile ) gnmiOpts = append(gnmiOpts, api.Prefix(c.LocalFlags.SetPrefix), api.Target(c.LocalFlags.SetTarget), ) for _, p := range c.LocalFlags.SetDelete { gnmiOpts = append(gnmiOpts, api.Delete(strings.TrimSpace(p))) } for _, u := range c.LocalFlags.SetUpdate { singleUpdate := strings.SplitN(u, c.LocalFlags.SetDelimiter, 3) if len(singleUpdate) < 3 { return nil, fmt.Errorf("invalid inline update format: %s", c.LocalFlags.SetUpdate) } gnmiOpts = append(gnmiOpts, api.Update( api.Path(strings.TrimSpace(singleUpdate[0])), api.Value(singleUpdate[2], singleUpdate[1]), ), ) } for _, r := range c.LocalFlags.SetReplace { singleReplace := strings.SplitN(r, c.LocalFlags.SetDelimiter, 3) if len(singleReplace) < 3 { return nil, fmt.Errorf("invalid inline replace format: %s", c.LocalFlags.SetReplace) } gnmiOpts = append(gnmiOpts, api.Replace( api.Path(strings.TrimSpace(singleReplace[0])), api.Value(singleReplace[2], singleReplace[1]), ), ) } for _, r := range c.LocalFlags.SetUnionReplace { singleUnionReplace := strings.SplitN(r, c.LocalFlags.SetDelimiter, 3) if len(singleUnionReplace) < 3 { return nil, fmt.Errorf("invalid inline union-replace format: %s'", c.LocalFlags.SetReplace) } gnmiOpts = append(gnmiOpts, api.UnionReplace( api.Path(strings.TrimSpace(singleUnionReplace[0])), api.Value(singleUnionReplace[2], singleUnionReplace[1]), ), ) } useUpdateFiles := len(c.LocalFlags.SetUpdateFile) > 0 && len(c.LocalFlags.SetUpdateValue) == 0 useReplaceFiles := len(c.LocalFlags.SetReplaceFile) > 0 && len(c.LocalFlags.SetReplaceValue) == 0 useUnionReplaceFiles := len(c.LocalFlags.SetUnionReplaceFile) > 0 && len(c.LocalFlags.SetUnionReplaceValue) == 0 for i, p := range c.LocalFlags.SetUpdatePath { var updOpt api.GNMIOption if useUpdateFiles { updateData, err := readFile(c.LocalFlags.SetUpdateFile[i]) if err != nil { c.logger.Printf("error reading data from file '%s': %v", c.LocalFlags.SetUpdateFile[i], err) return nil, err } trim := "" if !c.LocalFlags.SetNoTrim { trim = trimChars } updOpt = api.Update( api.Path(strings.TrimSpace(p)), api.Value(string(bytes.Trim(updateData, trim)), c.Encoding), ) } else { updOpt = api.Update( api.Path(strings.TrimSpace(p)), api.Value(c.LocalFlags.SetUpdateValue[i], c.Encoding), ) } gnmiOpts = append(gnmiOpts, updOpt) } for i, p := range c.LocalFlags.SetReplacePath { var replaceOpt api.GNMIOption if useReplaceFiles { replaceData, err := readFile(c.LocalFlags.SetReplaceFile[i]) if err != nil { c.logger.Printf("error reading data from file '%s': %v", c.LocalFlags.SetReplaceFile[i], err) return nil, err } trim := "" if !c.LocalFlags.SetNoTrim { trim = trimChars } replaceOpt = api.Replace( api.Path(strings.TrimSpace(p)), api.Value(string(bytes.Trim(replaceData, trim)), c.Encoding), ) } else { replaceOpt = api.Replace( api.Path(strings.TrimSpace(p)), api.Value(c.LocalFlags.SetReplaceValue[i], c.Encoding), ) } gnmiOpts = append(gnmiOpts, replaceOpt) } for i, p := range c.LocalFlags.SetUnionReplacePath { var unionReplaceOpt api.GNMIOption if useUnionReplaceFiles { replaceData, err := readFile(c.LocalFlags.SetUnionReplaceFile[i]) if err != nil { c.logger.Printf("error reading data from file '%s': %v", c.LocalFlags.SetUnionReplaceFile[i], err) return nil, err } trim := "" if !c.LocalFlags.SetNoTrim { trim = trimChars } unionReplaceOpt = api.UnionReplace( api.Path(strings.TrimSpace(p)), api.Value(string(bytes.Trim(replaceData, trim)), c.Encoding), ) } else { unionReplaceOpt = api.UnionReplace( api.Path(strings.TrimSpace(p)), api.Value(c.LocalFlags.SetUnionReplaceValue[i], c.Encoding), ) } gnmiOpts = append(gnmiOpts, unionReplaceOpt) } if len(c.LocalFlags.SetUpdateCli) > 0 { gnmiOpts = append(gnmiOpts, api.Update( api.Path("cli:/"), api.Value(strings.Join(c.LocalFlags.SetUpdateCli, "\n"), "ascii"), ), ) } if len(c.LocalFlags.SetReplaceCli) > 0 { gnmiOpts = append(gnmiOpts, api.Replace( api.Path("cli:/"), api.Value(strings.Join(c.LocalFlags.SetReplaceCli, "\n"), "ascii"), ), ) } if c.LocalFlags.SetUpdateCliFile != "" { data, err := readFile(c.LocalFlags.SetUpdateCliFile) if err != nil { return nil, err } gnmiOpts = append(gnmiOpts, api.Update( api.Path("cli:/"), api.Value(string(data), "ascii"), ), ) } if c.LocalFlags.SetReplaceCliFile != "" { data, err := readFile(c.LocalFlags.SetReplaceCliFile) if err != nil { return nil, err } gnmiOpts = append(gnmiOpts, api.Replace( api.Path("cli:/"), api.Value(string(data), "ascii"), ), ) } if c.LocalFlags.SetCommitId != "" { if c.LocalFlags.SetCommitRequest { gnmiOpts = append(gnmiOpts, api.Extension_CommitRequest( c.LocalFlags.SetCommitId, c.LocalFlags.SetCommitRollbackDuration, )) } else if c.LocalFlags.SetCommitConfirm { gnmiOpts = append(gnmiOpts, api.Extension_CommitConfirm( c.LocalFlags.SetCommitId, )) } else if c.LocalFlags.SetCommitCancel { gnmiOpts = append(gnmiOpts, api.Extension_CommitCancel( c.LocalFlags.SetCommitId, )) } else { gnmiOpts = append(gnmiOpts, api.Extension_CommitSetRollbackDuration( c.LocalFlags.SetCommitId, c.LocalFlags.SetCommitRollbackDuration, )) } } exts, err := c.parseAdditionalRequestExtensions() if err != nil { return nil, err } gnmiOpts = append(gnmiOpts, exts...) // req, err := api.NewSetRequest(gnmiOpts...) return []*gnmi.SetRequest{req}, err } // readFile reads a json or yaml file. the the file is .yaml, converts it to json and returns []byte and an error func readFile(name string) ([]byte, error) { data, err := gfile.ReadFile(context.TODO(), name) if err != nil { return nil, err } switch filepath.Ext(name) { default: return data, nil case ".yaml", ".yml": return toJSONBytes(data) } } func toJSONBytes(data []byte) ([]byte, error) { var out interface{} var err error err = yaml.Unmarshal(data, &out) if err != nil { return nil, err } newStruct := convert(out) b := new(bytes.Buffer) enc := json.NewEncoder(b) enc.SetEscapeHTML(false) err = enc.Encode(newStruct) if err != nil { return nil, err } return b.Bytes(), nil } // SanitizeArrayFlagValue trims trailing and leading brackets ([]), // from each of ls elements only if both are present. func SanitizeArrayFlagValue(ls []string) []string { res := make([]string, 0, len(ls)) for i := range ls { if ls[i] == "[]" { continue } for strings.HasPrefix(ls[i], "[") && strings.HasSuffix(ls[i], "]") { ls[i] = ls[i][1 : len(ls[i])-1] } res = append(res, ls[i]) } return res } func ParseAddressField(addr []string) []string { res := make([]string, 0, len(addr)) for i := range addr { if addr[i] == "[]" { continue } for strings.HasPrefix(addr[i], "[") && strings.HasSuffix(addr[i], "]") { addr[i] = addr[i][1 : len(addr[i])-1] } res = append(res, strings.Split(addr[i], ",")...) } return res } func (c *Config) ValidateSetInput() error { var err error c.LocalFlags.SetDelete = SanitizeArrayFlagValue(c.LocalFlags.SetDelete) c.LocalFlags.SetUpdate = SanitizeArrayFlagValue(c.LocalFlags.SetUpdate) c.LocalFlags.SetReplace = SanitizeArrayFlagValue(c.LocalFlags.SetReplace) c.LocalFlags.SetUpdatePath = SanitizeArrayFlagValue(c.LocalFlags.SetUpdatePath) c.LocalFlags.SetReplacePath = SanitizeArrayFlagValue(c.LocalFlags.SetReplacePath) c.LocalFlags.SetUpdateValue = SanitizeArrayFlagValue(c.LocalFlags.SetUpdateValue) c.LocalFlags.SetReplaceValue = SanitizeArrayFlagValue(c.LocalFlags.SetReplaceValue) c.LocalFlags.SetUpdateFile = SanitizeArrayFlagValue(c.LocalFlags.SetUpdateFile) c.LocalFlags.SetReplaceFile = SanitizeArrayFlagValue(c.LocalFlags.SetReplaceFile) c.LocalFlags.SetRequestFile = SanitizeArrayFlagValue(c.LocalFlags.SetRequestFile) c.LocalFlags.SetUpdateFile, err = ExpandOSPaths(c.LocalFlags.SetUpdateFile) if err != nil { return err } c.LocalFlags.SetReplaceFile, err = ExpandOSPaths(c.LocalFlags.SetReplaceFile) if err != nil { return err } for i := range c.LocalFlags.SetRequestFile { c.LocalFlags.SetRequestFile[i], err = expandOSPath(c.LocalFlags.SetRequestFile[i]) if err != nil { return err } } c.LocalFlags.SetRequestVars, err = expandOSPath(c.LocalFlags.SetRequestVars) if err != nil { return err } if (len(c.LocalFlags.SetDelete)+len(c.LocalFlags.SetUpdate)+len(c.LocalFlags.SetReplace)+len(c.LocalFlags.SetUnionReplace)) == 0 && (len(c.LocalFlags.SetUpdatePath)+len(c.LocalFlags.SetReplacePath)+len(c.LocalFlags.SetUnionReplacePath)) == 0 && len(c.LocalFlags.SetRequestFile) == 0 && len(c.LocalFlags.SetReplaceCli) == 0 && len(c.LocalFlags.SetUpdateCli) == 0 && len(c.LocalFlags.SetReplaceCliFile) == 0 && len(c.LocalFlags.SetUpdateCliFile) == 0 && len(c.LocalFlags.SetRequestProtoFile) == 0 && c.LocalFlags.SetCommitId == "" { return errors.New("no paths or request file provided") } if len(c.LocalFlags.SetUpdateFile) > 0 && len(c.LocalFlags.SetUpdateValue) > 0 { return errors.New("set update from file and value are not supported in the same command") } if len(c.LocalFlags.SetReplaceFile) > 0 && len(c.LocalFlags.SetReplaceValue) > 0 { return errors.New("set replace from file and value are not supported in the same command") } if len(c.LocalFlags.SetUnionReplaceFile) > 0 && len(c.LocalFlags.SetUnionReplaceValue) > 0 { return errors.New("set union-replace from file and value are not supported in the same command") } if len(c.LocalFlags.SetUpdatePath) != len(c.LocalFlags.SetUpdateValue) && len(c.LocalFlags.SetUpdatePath) != len(c.LocalFlags.SetUpdateFile) { return errors.New("missing update value/file or path") } if len(c.LocalFlags.SetReplacePath) != len(c.LocalFlags.SetReplaceValue) && len(c.LocalFlags.SetReplacePath) != len(c.LocalFlags.SetReplaceFile) { return errors.New("missing replace value/file or path") } if len(c.LocalFlags.SetUnionReplacePath) != len(c.LocalFlags.SetUnionReplaceValue) && len(c.LocalFlags.SetUnionReplacePath) != len(c.LocalFlags.SetUnionReplaceFile) { return errors.New("missing union-replace value/file or path") } return nil } func ExpandOSPaths(paths []string) ([]string, error) { var err error for i := range paths { paths[i], err = expandOSPath(paths[i]) if err != nil { return nil, err } } return paths, nil } func expandOSPath(p string) (string, error) { if p == "-" || p == "" { return p, nil } if strings.HasPrefix(p, "http://") || strings.HasPrefix(p, "https://") || strings.HasPrefix(p, "sftp://") || strings.HasPrefix(p, "ftp://") { return p, nil } np, err := homedir.Expand(p) if err != nil { return "", fmt.Errorf("path %q: %v", p, err) } if !filepath.IsAbs(np) { cwd, err := os.Getwd() if err != nil { return "", fmt.Errorf("path %q: %v", p, err) } np = filepath.Join(cwd, np) } _, err = os.Stat(np) if err != nil { return "", err } return np, nil } func (c *Config) expandOSPathFlagValues() error { for _, flagName := range osPathFlags { if c.FileConfig.IsSet(flagName) { expandedPath, err := expandOSPath(c.FileConfig.GetString(flagName)) if err != nil { return err } c.FileConfig.Set(flagName, expandedPath) } } return nil } func trimQuotes(s string) string { if len(s) >= 2 { if s[0] == '"' && s[len(s)-1] == '"' { return s[1 : len(s)-1] } } return s } ================================================ FILE: pkg/config/config_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "errors" "log" "os" "strings" "testing" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api" "github.com/openconfig/gnmic/pkg/api/testutils" "github.com/openconfig/gnmic/pkg/api/types" ) var createGetRequestTestSet = map[string]struct { in *Config out *gnmi.GetRequest err error }{ "nil_input": { in: nil, out: nil, err: ErrInvalidConfig, }, "unknown_encoding_type": { in: &Config{ GlobalFlags{ Encoding: "dummy", }, LocalFlags{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: nil, err: api.ErrInvalidValue, }, "invalid_prefix": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{ GetPrefix: "/invalid/]prefix", }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: nil, err: api.ErrInvalidValue, }, "invalid_path": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{ GetPrefix: "/invalid/]path", }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: nil, err: api.ErrInvalidValue, }, "unknown_data_type": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{ GetPrefix: "/valid/path", GetType: "dummy", }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: nil, err: api.ErrInvalidValue, }, "basic_get_request": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{ GetPath: []string{"/valid/path"}, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, }, }, err: nil, }, "get_request_with_type": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{ GetPath: []string{"/valid/path"}, GetType: "state", }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, }, Type: gnmi.GetRequest_STATE, }, err: nil, }, "get_request_with_encoding": { in: &Config{ GlobalFlags{ Encoding: "proto", }, LocalFlags{ GetPath: []string{"/valid/path"}, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, }, Encoding: gnmi.Encoding_PROTO, }, err: nil, }, "get_request_with_prefix": { in: &Config{ GlobalFlags{ Encoding: "proto", }, LocalFlags{ GetPrefix: "/valid/prefix", GetPath: []string{"/valid/path"}, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.GetRequest{ Prefix: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "prefix"}, }, }, Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, }, Encoding: gnmi.Encoding_PROTO, }, err: nil, }, "get_request_with_2_paths": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{ GetPath: []string{ "/valid/path1", "/valid/path2", }, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.GetRequest{ Path: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path1"}, }, }, { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path2"}, }, }, }, }, err: nil, }, } var createSetRequestTestSet = map[string]struct { in *Config out *gnmi.SetRequest err error }{ "set_update_request": { in: &Config{ GlobalFlags{}, LocalFlags{ SetDelimiter: ":::", SetUpdate: []string{"/valid/path:::json:::value"}, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value\""), }, }, }, }, }, err: nil, }, "set_replace_request": { in: &Config{ GlobalFlags{}, LocalFlags{ SetDelimiter: ":::", SetReplace: []string{"/valid/path:::json:::value"}, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.SetRequest{ Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value\""), }, }, }, }, }, err: nil, }, "set_delete_request": { in: &Config{ GlobalFlags{}, LocalFlags{ SetDelete: []string{"/valid/path"}, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.SetRequest{ Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, }, }, err: nil, }, "set_multiple_update_request": { in: &Config{ GlobalFlags{}, LocalFlags{ SetDelimiter: ":::", SetUpdate: []string{ "/valid/path1:::json:::value1", "/valid/path2:::json_ietf:::value2", }, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path1"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value1\""), }, }, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path2"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("\"value2\""), }, }, }, }, }, err: nil, }, "set_multiple_replace_request": { in: &Config{ GlobalFlags{}, LocalFlags{ SetDelimiter: ":::", SetReplace: []string{ "/valid/path1:::json:::value1", "/valid/path2:::json_ietf:::value2", }, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.SetRequest{ Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path1"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value1\""), }, }, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path2"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("\"value2\""), }, }, }, }, }, err: nil, }, "set_multiple_delete_request": { in: &Config{ GlobalFlags{}, LocalFlags{ SetDelete: []string{ "/valid/path1", "/valid/path2", }, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.SetRequest{ Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path1"}, }, }, { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path2"}, }, }, }, }, err: nil, }, "set_combined_request": { in: &Config{ GlobalFlags{}, LocalFlags{ SetDelimiter: ":::", SetUpdate: []string{"/valid/path1:::json:::value1"}, SetReplace: []string{"/valid/path2:::json:::value2"}, SetDelete: []string{"/valid/path"}, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path1"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value1\""), }, }, }, }, Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path2"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value2\""), }, }, }, }, Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, }, }, err: nil, }, "set_update_path_request": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{ SetUpdatePath: []string{"/valid/path"}, SetUpdateValue: []string{"value"}, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value\""), }, }, }, }, }, err: nil, }, "set_replace_path_request": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{ SetReplacePath: []string{"/valid/path"}, SetReplaceValue: []string{"value"}, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.SetRequest{ Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value\""), }, }, }, }, }, err: nil, }, "set_union_replace_path_request": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{ SetUnionReplacePath: []string{"/valid/path"}, SetUnionReplaceValue: []string{"value"}, }, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, }, out: &gnmi.SetRequest{ UnionReplace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value\""), }, }, }, }, }, err: nil, }, } var execPathTemplateTestSet = map[string]struct { tpl string input interface{} out string }{ "nil": { tpl: "", input: nil, out: "", }, "simple": { tpl: `"/path/"`, input: nil, out: "/path/", }, "with_an_expression": { tpl: `"/interfaces/" + .name`, input: map[string]interface{}{ "name": "interface", }, out: "/interfaces/interface", }, } func TestCreateGetRequest(t *testing.T) { for name, data := range createGetRequestTestSet { t.Run(name, func(t *testing.T) { getReq, err := data.in.CreateGetRequest(&types.TargetConfig{}) t.Logf("exp value: %+v", data.out) t.Logf("got value: %+v", getReq) t.Logf("exp error: %+v", data.err) t.Logf("got error: %+v", err) if err != nil { uerr := errors.Unwrap(err) if !errors.Is(uerr, data.err) { t.Fail() } } if !testutils.GetRequestsEqual(getReq, data.out) { t.Fail() } }) } } func TestCreateSetRequest(t *testing.T) { for name, data := range createSetRequestTestSet { t.Run(name, func(t *testing.T) { setReq, err := data.in.CreateSetRequest("") t.Logf("exp value: %+v", data.out) t.Logf("exp error: %+v", data.err) t.Logf("got value: %+v", setReq) t.Logf("got error: %+v", err) if err != nil { if !strings.HasPrefix(err.Error(), data.err.Error()) { t.Fail() } } if !testutils.SetRequestsEqual(setReq[0], data.out) { t.Fail() } }) } } func TestExecPathTemplate(t *testing.T) { c := New() c.Debug = true c.logger = log.New(os.Stderr, "", log.LstdFlags) for name, data := range execPathTemplateTestSet { t.Run(name, func(t *testing.T) { o, err := c.execPathTemplate(data.tpl, data.input) if err != nil { t.Logf("failed: %v", err) t.Fail() } t.Logf("exp value: %+v", data.out) t.Logf("got value: %+v", o) if data.out != o { t.Fail() } }) } } ================================================ FILE: pkg/config/diff.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "fmt" "strings" "github.com/openconfig/gnmi/proto/gnmi" "github.com/spf13/cobra" "github.com/openconfig/gnmic/pkg/api" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/utils" ) func (c *Config) CreateDiffSubscribeRequest(cmd *cobra.Command) (*gnmi.SubscribeRequest, error) { sc := &types.SubscriptionConfig{ Name: "diff-sub", Models: c.DiffModel, Prefix: c.DiffPrefix, Target: c.DiffTarget, Paths: c.DiffPath, Mode: "ONCE", Encoding: &c.Encoding, } if flagIsSet(cmd, "qos") { sc.Qos = &c.DiffQos } return utils.CreateSubscribeRequest(sc, nil, c.Encoding) } func (c *Config) CreateDiffGetRequest() (*gnmi.GetRequest, error) { if c == nil { return nil, fmt.Errorf("%w", ErrInvalidConfig) } gnmiOpts := make([]api.GNMIOption, 0, 4+len(c.LocalFlags.DiffPath)) gnmiOpts = append(gnmiOpts, api.Encoding(c.Encoding), api.DataType(c.LocalFlags.DiffType), api.Prefix(c.LocalFlags.DiffPrefix), api.Target(c.LocalFlags.DiffTarget), ) for _, p := range c.LocalFlags.DiffPath { gnmiOpts = append(gnmiOpts, api.Path(strings.TrimSpace(p))) } return api.NewGetRequest(gnmiOpts...) } ================================================ FILE: pkg/config/environment.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "os" "strings" "github.com/spf13/cobra" "github.com/spf13/pflag" ) func envToMap() map[string]any { m := map[string]any{} for _, e := range os.Environ() { if !strings.HasPrefix(e, envPrefix) { continue } pair := strings.SplitN(e, "=", 2) if len(pair) < 2 { continue // malformed env var } pair[0] = strings.ToLower(strings.TrimPrefix(pair[0], envPrefix+"_")) items := strings.Split(pair[0], "_") mergeMap(m, items, pair[1]) } return m } func mergeMap(m map[string]any, items []string, v any) { nItems := len(items) if nItems == 0 { return } if nItems > 1 { if _, ok := m[items[0]]; !ok { m[items[0]] = map[string]any{} } asMap, ok := m[items[0]].(map[string]any) if !ok { return } mergeMap(asMap, items[1:], v) v = asMap } m[items[0]] = v } func (c *Config) mergeEnvVars() { envs := envToMap() if c.GlobalFlags.Debug { c.logger.Printf("merging env vars: %+v", envs) } c.FileConfig.MergeConfigMap(envs) } func (c *Config) SetGlobalsFromEnv(cmd *cobra.Command) { cmd.PersistentFlags().VisitAll(func(f *pflag.Flag) { // expand password and token global attr only if they start with '$' if f.Name == "password" || f.Name == "token" { if !f.Changed && c.FileConfig.IsSet(f.Name) { val := c.FileConfig.GetString(f.Name) if strings.HasPrefix(val, "$") { c.setFlagValue(cmd, f.Name, val) } } return } // other global flags if !f.Changed && c.FileConfig.IsSet(f.Name) { if val := os.ExpandEnv(c.FileConfig.GetString(f.Name)); val != "" { c.setFlagValue(cmd, f.Name, val) } } }) } func expandMapEnv(m map[string]interface{}, fn func(string, string) string) { for f := range m { switch v := m[f].(type) { case string: m[f] = fn(f, v) case map[string]interface{}: expandMapEnv(v, fn) m[f] = v case []any: for i, item := range v { switch item := item.(type) { case string: v[i] = fn(f, item) case map[string]interface{}: expandMapEnv(item, fn) case []any: expandSliceEnv(f, item, fn) } } m[f] = v } } } func expandSliceEnv(parent string, s []any, fn func(string, string) string) { for i, item := range s { switch item := item.(type) { case string: s[i] = fn(parent, item) case map[string]interface{}: expandMapEnv(item, fn) case []any: expandSliceEnv("", item, fn) } } } func expandExcept(except ...string) func(string, string) string { return func(k, v string) string { for _, e := range except { if k == e { return v } } return os.ExpandEnv(v) } } func expandAll() func(string, string) string { return expandExcept() } ================================================ FILE: pkg/config/gnmi_ext.go ================================================ package config import ( "encoding/json" "fmt" "strconv" "github.com/fullstorydev/grpcurl" "github.com/jhump/protoreflect/dynamic" "github.com/openconfig/gnmi/proto/gnmi_ext" "github.com/openconfig/gnmic/pkg/api" "github.com/openconfig/gnmic/pkg/utils" pkgUtils "github.com/openconfig/gnmic/pkg/utils" ) func createAdditionalRequestExtensions( extensions string, protoDir, protoFiles []string, extensionDecodeMap utils.RegisteredExtensions, ) ([]*gnmi_ext.Extension, error) { var exts []*gnmi_ext.Extension if len(protoFiles) == 0 { return exts, nil } descSource, err := grpcurl.DescriptorSourceFromProtoFiles(protoDir, protoFiles...) if err != nil { return nil, err } var extensionsMap map[string]any if err := json.Unmarshal([]byte(extensions), &extensionsMap); err != nil { return nil, fmt.Errorf("extensions JSON decoding error: %w", err) } for idMsg, extMsg := range extensionsMap { id, err := strconv.ParseInt(idMsg, 10, 32) if err != nil { return nil, err } msg, exists := extensionDecodeMap[int32(id)] if !exists { return nil, fmt.Errorf("custom extension for the request was not found in the provided registered extensions") } desc, err := descSource.FindSymbol(msg) if err != nil { return nil, err } pm := dynamic.NewMessage(desc.GetFile().FindMessage(msg)) msgBytes, err := json.Marshal(extMsg) if err != nil { return nil, err } if err = pm.UnmarshalJSON(msgBytes); err != nil { return nil, err } extBytes, err := pm.Marshal() if err != nil { return nil, err } ext := gnmi_ext.Extension_RegisteredExt{ RegisteredExt: &gnmi_ext.RegisteredExtension{ Id: gnmi_ext.ExtensionID(id), Msg: extBytes, }, } exts = append(exts, &gnmi_ext.Extension{Ext: &ext}) } return exts, nil } func (c *Config) parseAdditionalRequestExtensions() ([]api.GNMIOption, error) { gnmiOpts := []api.GNMIOption{} if c.GlobalFlags.RequestExtensions == "" { return gnmiOpts, nil } registeredExtensions, err := pkgUtils.ParseRegisteredExtensions(c.GlobalFlags.RegisteredExtensions) if err != nil { return nil, err } exts, err := createAdditionalRequestExtensions( c.GlobalFlags.RequestExtensions, c.GlobalFlags.ProtoDir, c.GlobalFlags.ProtoFile, registeredExtensions, ) if err != nil { return nil, err } for _, ext := range exts { gnmiOpts = append(gnmiOpts, api.Extension(ext)) } return gnmiOpts, nil } ================================================ FILE: pkg/config/gnmi_server.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "fmt" "os" "strconv" "time" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/cache" "google.golang.org/grpc/keepalive" ) const ( defaultAddress = ":57400" defaultMaxSubscriptions = 64 defaultMaxUnaryRPC = 64 minimumSampleInterval = 1 * time.Millisecond defaultSampleInterval = 1 * time.Second minimumHeartbeatInterval = 1 * time.Second // defaultServiceRegistrationAddress = "localhost:8500" defaultRegistrationCheckInterval = 5 * time.Second defaultMaxServiceFail = 3 ) type GNMIServer struct { Address string `mapstructure:"address,omitempty" json:"address,omitempty"` MinSampleInterval time.Duration `mapstructure:"min-sample-interval,omitempty" json:"min-sample-interval,omitempty"` DefaultSampleInterval time.Duration `mapstructure:"default-sample-interval,omitempty" json:"default-sample-interval,omitempty"` MinHeartbeatInterval time.Duration `mapstructure:"min-heartbeat-interval,omitempty" json:"min-heartbeat-interval,omitempty"` MaxSubscriptions int64 `mapstructure:"max-subscriptions,omitempty" json:"max-subscriptions,omitempty"` MaxUnaryRPC int64 `mapstructure:"max-unary-rpc,omitempty" json:"max-unary-rpc,omitempty"` MaxRecvMsgSize int `mapstructure:"max-recv-msg-size,omitempty" json:"max-recv-msg-size,omitempty"` MaxSendMsgSize int `mapstructure:"max-send-msg-size,omitempty" json:"max-send-msg-size,omitempty"` MaxConcurrentStreams uint32 `mapstructure:"max-concurrent-streams,omitempty" json:"max-concurrent-streams,omitempty"` TCPKeepalive time.Duration `mapstructure:"tcp-keepalive,omitempty" json:"tcp-keepalive,omitempty"` GRPCKeepalive *grpcKeepaliveConfig `mapstructure:"grpc-keepalive,omitempty" json:"grpc-keepalive,omitempty"` RateLimit int64 `mapstructure:"rate-limit,omitempty" json:"rate-limit,omitempty"` Timeout time.Duration `mapstructure:"timeout,omitempty" json:"timeout,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty" json:"tls,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty" json:"enable-metrics,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` // ServiceRegistration ServiceRegistration *serviceRegistration `mapstructure:"service-registration,omitempty" json:"service-registration,omitempty"` // cache config Cache *cache.Config `mapstructure:"cache,omitempty" json:"cache,omitempty"` } type serviceRegistration struct { Address string `mapstructure:"address,omitempty" json:"address,omitempty"` Datacenter string `mapstructure:"datacenter,omitempty" json:"datacenter,omitempty"` Username string `mapstructure:"username,omitempty" json:"username,omitempty"` Password string `mapstructure:"password,omitempty" json:"password,omitempty"` Token string `mapstructure:"token,omitempty" json:"token,omitempty"` Name string `mapstructure:"name,omitempty" json:"name,omitempty"` CheckInterval time.Duration `mapstructure:"check-interval,omitempty" json:"check-interval,omitempty"` MaxFail int `mapstructure:"max-fail,omitempty" json:"max-fail,omitempty"` Tags []string `mapstructure:"tags,omitempty" json:"tags,omitempty"` // DeregisterAfter string `mapstructure:"-" json:"-"` } // from keepalive.ServerParameters type grpcKeepaliveConfig struct { // MaxConnectionIdle is a duration for the amount of time after which an // idle connection would be closed by sending a GoAway. Idleness duration is // defined since the most recent time the number of outstanding RPCs became // zero or the connection establishment. MaxConnectionIdle time.Duration `mapstructure:"max-connection-idle,omitempty"` // The current default value is infinity. // MaxConnectionAge is a duration for the maximum amount of time a // connection may exist before it will be closed by sending a GoAway. A // random jitter of +/-10% will be added to MaxConnectionAge to spread out // connection storms. MaxConnectionAge time.Duration `mapstructure:"max-connection-age,omitempty"` // The current default value is infinity. // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after // which the connection will be forcibly closed. MaxConnectionAgeGrace time.Duration `mapstructure:"max-connection-age-grace,omitempty"` // The current default value is infinity. // After a duration of this time if the server doesn't see any activity it // pings the client to see if the transport is still alive. // If set below 1s, a minimum value of 1s will be used instead. Time time.Duration `mapstructure:"time,omitempty"` // The current default value is 2 hours. // After having pinged for keepalive check, the server waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. Timeout time.Duration `mapstructure:"timeout,omitempty"` // The current default value is 20 seconds. } func (gkc *grpcKeepaliveConfig) Convert() *keepalive.ServerParameters { if gkc == nil { return nil } return &keepalive.ServerParameters{ MaxConnectionIdle: gkc.MaxConnectionIdle, MaxConnectionAge: gkc.MaxConnectionAge, MaxConnectionAgeGrace: gkc.MaxConnectionAgeGrace, Time: gkc.Time, Timeout: gkc.Timeout, } } func (c *Config) GetGNMIServer() error { if !c.FileConfig.IsSet("gnmi-server") { return nil } c.GnmiServer = new(GNMIServer) c.GnmiServer.Address = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/address")) maxSubVal := os.ExpandEnv(c.FileConfig.GetString("gnmi-server/max-subscriptions")) if maxSubVal != "" { maxSub, err := strconv.Atoi(maxSubVal) if err != nil { return err } c.GnmiServer.MaxSubscriptions = int64(maxSub) } maxRPCVal := os.ExpandEnv(c.FileConfig.GetString("gnmi-server/max-unary-rpc")) if maxRPCVal != "" { maxUnaryRPC, err := strconv.Atoi(os.ExpandEnv(c.FileConfig.GetString("gnmi-server/max-unary-rpc"))) if err != nil { return err } c.GnmiServer.MaxUnaryRPC = int64(maxUnaryRPC) } if c.FileConfig.IsSet("gnmi-server/tls") { c.GnmiServer.TLS = new(types.TLSConfig) c.GnmiServer.TLS.CaFile = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/tls/ca-file")) c.GnmiServer.TLS.CertFile = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/tls/cert-file")) c.GnmiServer.TLS.KeyFile = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/tls/key-file")) c.GnmiServer.TLS.ClientAuth = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/tls/client-auth")) if err := c.GnmiServer.TLS.Validate(); err != nil { return fmt.Errorf("gnmi-server TLS config error: %w", err) } } c.GnmiServer.EnableMetrics = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/enable-metrics")) == trueString c.GnmiServer.Debug = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/debug")) == trueString c.GnmiServer.Timeout = c.FileConfig.GetDuration("gnmi-server/timeout") c.setGnmiServerDefaults() if c.FileConfig.IsSet("gnmi-server/service-registration") { c.GnmiServer.ServiceRegistration = new(serviceRegistration) c.GnmiServer.ServiceRegistration.Address = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/service-registration/address")) c.GnmiServer.ServiceRegistration.Datacenter = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/service-registration/datacenter")) c.GnmiServer.ServiceRegistration.Username = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/service-registration/username")) c.GnmiServer.ServiceRegistration.Password = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/service-registration/password")) c.GnmiServer.ServiceRegistration.Token = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/service-registration/token")) c.GnmiServer.ServiceRegistration.Name = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/service-registration/name")) c.GnmiServer.ServiceRegistration.CheckInterval = c.FileConfig.GetDuration("gnmi-server/service-registration/check-interval") c.GnmiServer.ServiceRegistration.MaxFail = c.FileConfig.GetInt("gnmi-server/service-registration/max-fail") c.GnmiServer.ServiceRegistration.Tags = c.FileConfig.GetStringSlice("gnmi-server/service-registration/tags") c.setGnmiServerServiceRegistrationDefaults() } if c.FileConfig.IsSet("gnmi-server/cache") { c.GnmiServer.Cache = new(cache.Config) c.GnmiServer.Cache.Type = cache.CacheType(os.ExpandEnv(c.FileConfig.GetString("gnmi-server/cache/type"))) c.GnmiServer.Cache.Address = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/cache/address")) c.GnmiServer.Cache.Timeout = c.FileConfig.GetDuration("gnmi-server/cache/timeout") c.GnmiServer.Cache.Expiration = c.FileConfig.GetDuration("gnmi-server/cache/expiration") c.GnmiServer.Cache.Debug = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/cache/debug")) == trueString c.GnmiServer.Cache.Username = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/cache/username")) c.GnmiServer.Cache.Password = os.ExpandEnv(c.FileConfig.GetString("gnmi-server/cache/password")) // c.GnmiServer.Cache.MaxBytes = c.FileConfig.GetInt64("gnmi-server/cache/max-bytes") c.GnmiServer.Cache.MaxMsgsPerSubscription = c.FileConfig.GetInt64("gnmi-server/cache/max-msgs-per-subscription") // c.GnmiServer.Cache.FetchBatchSize = c.FileConfig.GetInt("gnmi-server/cache/fetch-batch-size") c.GnmiServer.Cache.FetchWaitTime = c.FileConfig.GetDuration("gnmi-server/cache/fetch-wait-time") } return nil } func (c *Config) setGnmiServerDefaults() { if c.GnmiServer.Address == "" { c.GnmiServer.Address = defaultAddress } if c.GnmiServer.MaxSubscriptions <= 0 { c.GnmiServer.MaxSubscriptions = defaultMaxSubscriptions } if c.GnmiServer.MaxUnaryRPC <= 0 { c.GnmiServer.MaxUnaryRPC = defaultMaxUnaryRPC } if c.GnmiServer.MinSampleInterval <= 0 { c.GnmiServer.MinSampleInterval = minimumSampleInterval } if c.GnmiServer.DefaultSampleInterval <= 0 { c.GnmiServer.DefaultSampleInterval = defaultSampleInterval } if c.GnmiServer.MinHeartbeatInterval <= 0 { c.GnmiServer.MinHeartbeatInterval = minimumHeartbeatInterval } } func (c *Config) setGnmiServerServiceRegistrationDefaults() { if c.GnmiServer.ServiceRegistration.Address == "" { c.GnmiServer.ServiceRegistration.Address = defaultServiceRegistrationAddress } if c.GnmiServer.ServiceRegistration.CheckInterval <= 5*time.Second { c.GnmiServer.ServiceRegistration.CheckInterval = defaultRegistrationCheckInterval } if c.GnmiServer.ServiceRegistration.MaxFail <= 0 { c.GnmiServer.ServiceRegistration.MaxFail = defaultMaxServiceFail } deregisterTimer := c.GnmiServer.ServiceRegistration.CheckInterval * time.Duration(c.GnmiServer.ServiceRegistration.MaxFail) c.GnmiServer.ServiceRegistration.DeregisterAfter = deregisterTimer.String() } ================================================ FILE: pkg/config/inputs.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "fmt" "github.com/openconfig/gnmic/pkg/inputs" _ "github.com/openconfig/gnmic/pkg/inputs/all" ) func (c *Config) GetInputs() (map[string]map[string]interface{}, error) { errs := make([]error, 0) inputsDef := c.FileConfig.GetStringMap("inputs") for name, inputCfg := range inputsDef { inputCfgconv := convert(inputCfg) switch inputCfg := inputCfgconv.(type) { case map[string]interface{}: if outType, ok := inputCfg["type"]; ok { if !strInlist(outType.(string), inputs.InputTypes) { return nil, fmt.Errorf("unknown input type: %q", outType) } if _, ok := inputs.Inputs[outType.(string)]; ok { format, ok := inputCfg["format"] if !ok || (ok && format == "") { inputCfg["format"] = c.FileConfig.GetString("format") } c.Inputs[name] = inputCfg continue } err := fmt.Errorf("unknown input type '%s'", outType) c.logger.Print(err) errs = append(errs, err) continue } err := fmt.Errorf("missing input 'type' under %v", inputCfg) c.logger.Print(err) errs = append(errs, err) default: c.logger.Printf("unknown configuration format expecting a map[string]interface{}: got %T : %v", inputCfg, inputCfg) return nil, fmt.Errorf("unexpected inputs configuration format") } } if len(errs) > 0 { return nil, fmt.Errorf("there was %d error(s) when getting inputs configuration", len(errs)) } for n := range c.Inputs { expandMapEnv(c.Inputs[n], expandAll()) } if c.Debug { c.logger.Printf("inputs: %+v", c.Inputs) } return c.Inputs, nil } ================================================ FILE: pkg/config/loader.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "errors" "fmt" "os" "strings" "github.com/openconfig/gnmic/pkg/loaders" _ "github.com/openconfig/gnmic/pkg/loaders/all" ) func (c *Config) GetLoader() error { if c.GlobalFlags.TargetsFile != "" { c.Loader = map[string]interface{}{ "type": "file", "path": c.GlobalFlags.TargetsFile, } return nil } c.Loader = c.FileConfig.GetStringMap("loader") for k, v := range c.Loader { c.Loader[k] = convert(v) } if len(c.Loader) == 0 { return nil } if _, ok := c.Loader["type"]; !ok { return errors.New("missing type field under loader configuration") } if lds, ok := c.Loader["type"].(string); ok { for _, lt := range loaders.LoadersTypes { if lt == lds { expandMapEnv(c.Loader, func(k, v string) string { if k == "password" { if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") { return os.ExpandEnv(v) } return v } return os.ExpandEnv(v) }) return nil } } return fmt.Errorf("unknown loader type %q", lds) } return fmt.Errorf("field 'type' not a string, found a %T", c.Loader["type"]) } ================================================ FILE: pkg/config/locker.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "errors" "github.com/openconfig/gnmic/pkg/lockers" _ "github.com/openconfig/gnmic/pkg/lockers/all" ) func (c *Config) getLocker() error { if !c.FileConfig.IsSet("clustering/locker") { return errors.New("missing locker config") } c.Clustering.Locker = c.FileConfig.GetStringMap("clustering/locker") if len(c.Clustering.Locker) == 0 { return errors.New("missing locker config") } if lockerType, ok := c.Clustering.Locker["type"]; ok { switch lockerType := lockerType.(type) { case string: if _, ok := lockers.Lockers[lockerType]; !ok { return errors.New("unknown locker type") } default: return errors.New("wrong locker type format") } expandMapEnv(c.Clustering.Locker, expandAll()) return nil } return errors.New("missing locker type") } ================================================ FILE: pkg/config/outputs.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "encoding/json" "fmt" "sort" "github.com/openconfig/gnmic/pkg/outputs" _ "github.com/openconfig/gnmic/pkg/outputs/all" ) func (c *Config) GetOutputs() (map[string]map[string]any, error) { outDef := c.FileConfig.GetStringMap("outputs") if len(outDef) == 0 && !c.FileConfig.GetBool("subscribe-quiet") { stdoutConfig := map[string]any{ "type": "file", "file-type": "stdout", "format": c.FileConfig.GetString("format"), "calculate-latency": c.FileConfig.GetBool("calculate-latency"), } outDef["default-stdout"] = stdoutConfig } for name, outputCfg := range outDef { outputCfgconv := convert(outputCfg) switch outCfg := outputCfgconv.(type) { case map[string]any: if outType, ok := outCfg["type"]; ok { switch outType := outType.(type) { case string: if _, ok := outputs.OutputTypes[outType]; !ok { return nil, fmt.Errorf("unknown output type: %q", outType) } default: return nil, fmt.Errorf("unknown output type: %T", outType) } format, ok := outCfg["format"] if !ok || (ok && format == "") { outCfg["format"] = c.FileConfig.GetString("format") } c.Outputs[name] = outCfg continue } c.logger.Printf("missing output 'type' under %v", outCfg) default: c.logger.Printf("unknown configuration format expecting a map[string]interface{}: got %T : %v", outCfg, outCfg) } } for n := range c.Outputs { expandMapEnv(c.Outputs[n], expandExcept("msg-template", "target-template")) } namedOutputs := c.FileConfig.GetStringSlice("subscribe-output") if len(namedOutputs) == 0 { if c.Debug { c.logger.Printf("outputs: %+v", c.Outputs) } return c.Outputs, nil } filteredOutputs := make(map[string]map[string]interface{}) notFound := make([]string, 0) for _, name := range namedOutputs { if o, ok := c.Outputs[name]; ok { filteredOutputs[name] = o } else { notFound = append(notFound, name) } } if len(notFound) > 0 { return nil, fmt.Errorf("named output(s) not found in config file: %v", notFound) } if c.Debug { c.logger.Printf("outputs: %+v", filteredOutputs) } return filteredOutputs, nil } func convert(i interface{}) interface{} { switch x := i.(type) { case map[interface{}]interface{}: nm := map[string]interface{}{} for k, v := range x { nm[k.(string)] = convert(v) } return nm case map[string]interface{}: for k, v := range x { x[k] = convert(v) } case []interface{}: for i, v := range x { x[i] = convert(v) } } return i } type outputSuggestion struct { Name string Types []string } func (c *Config) GetOutputsSuggestions() []outputSuggestion { outDef := c.FileConfig.GetStringMap("outputs") suggestions := make([]outputSuggestion, 0, len(outDef)) for name, d := range outDef { dl := convert(d) sug := outputSuggestion{Name: name, Types: make([]string, 0)} switch outs := dl.(type) { case []interface{}: for _, ou := range outs { switch ou := ou.(type) { case map[string]interface{}: if outType, ok := ou["type"]; ok { sug.Types = append(sug.Types, outType.(string)) } } } } suggestions = append(suggestions, sug) } sort.Slice(suggestions, func(i, j int) bool { return suggestions[i].Name < suggestions[j].Name }) return suggestions } func (c *Config) GetOutputsConfigs() [][]string { outDef := c.FileConfig.GetStringMap("outputs") if outDef == nil { return nil } outList := make([][]string, 0, len(outDef)) for name, outputCfg := range outDef { b, err := json.Marshal(outputCfg) if err != nil { c.logger.Printf("could not marshal output config: %v", err) return nil } outList = append(outList, []string{name, string(b)}) } return outList } ================================================ FILE: pkg/config/outputs_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "bytes" "os" "reflect" "strings" "testing" ) var getOutputsTestSet = map[string]struct { envs []string in []byte out map[string]map[string]interface{} }{ "basic_outputs": { in: []byte(` outputs: output1: type: file file-type: stdout output2: type: nats `), out: map[string]map[string]interface{}{ "output1": { "type": "file", "file-type": "stdout", "format": "", }, "output2": { "type": "nats", "format": "", }, }, }, "basic_outputs_env": { envs: []string{ "NATS_ADDRESS=1.1.1.1", }, in: []byte(` outputs: output1: type: file file-type: stdout output2: type: nats address: ${NATS_ADDRESS}:1123 `), out: map[string]map[string]interface{}{ "output1": { "type": "file", "file-type": "stdout", "format": "", }, "output2": { "type": "nats", "format": "", "address": "1.1.1.1:1123", }, }, }, } func TestGetOutputs(t *testing.T) { for name, data := range getOutputsTestSet { t.Run(name, func(t *testing.T) { for _, e := range data.envs { p := strings.SplitN(e, "=", 2) os.Setenv(p[0], p[1]) } cfg := New() cfg.Debug = true cfg.SetLogger() cfg.FileConfig.SetConfigType("yaml") err := cfg.FileConfig.ReadConfig(bytes.NewBuffer(data.in)) if err != nil { t.Logf("failed reading config: %v", err) t.Fail() } v := cfg.FileConfig.Get("outputs") t.Logf("raw interface outputs: %+v", v) outs, err := cfg.GetOutputs() t.Logf("exp value: %+v", data.out) t.Logf("got value: %+v", outs) if err != nil { t.Logf("failed getting outputs: %v", err) t.Fail() } if !reflect.DeepEqual(outs, data.out) { t.Log("maps not equal") t.Fail() } }) } } ================================================ FILE: pkg/config/plugins.go ================================================ package config import ( "time" ) type PluginsConfig struct { Path string `mapstructure:"path,omitempty" json:"path,omitempty"` Glob string `mapstructure:"glob,omitempty" json:"glob,omitempty"` StartTimeout time.Duration `mapstructure:"start-timeout,omitempty" json:"start-timeout,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` } func (c *Config) GetPluginsConfig() (*PluginsConfig, error) { if !c.FileConfig.IsSet("plugins") && c.GlobalFlags.PluginProcessorsPath == "" { return nil, nil } pc := &PluginsConfig{} pc.Path = c.GlobalFlags.PluginProcessorsPath if pc.Path == "" { pc.Path = c.FileConfig.GetString("plugins/path") } pc.Glob = c.FileConfig.GetString("plugins/glob") if pc.Glob == "" { pc.Glob = "*" } pc.StartTimeout = c.FileConfig.GetDuration("plugins/start-timeout") pc.Debug = c.FileConfig.GetBool("plugins/debug") return pc, nil } ================================================ FILE: pkg/config/processors.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "fmt" "github.com/openconfig/gnmic/pkg/formatters" ) func (c *Config) GetEventProcessors() (map[string]map[string]interface{}, error) { eps := c.FileConfig.GetStringMap("processors") for name, epc := range eps { switch epc := epc.(type) { case map[string]interface{}: c.logger.Printf("validating processor %q config", name) err := c.validateProcessorConfig(epc) if err != nil { return nil, err } c.Processors[name] = epc case nil: return nil, fmt.Errorf("empty processor %q config", name) default: c.logger.Printf("malformed processors config, %+v", epc) return nil, fmt.Errorf("malformed processors config, got %T", epc) } } for n, es := range c.Processors { for nn, p := range es { es[nn] = convert(p) } c.Processors[n] = es } for n := range c.Processors { expandMapEnv(c.Processors[n], expandExcept( "expression", "condition", "value-names", "values", "tag-names", "tags", "old", "new", // strings.replace "source", // starlark )) } if c.Debug { c.logger.Printf("processors: %+v", c.Processors) } return c.Processors, nil } func (c *Config) validateProcessorConfig(pcfg map[string]interface{}) error { for epType := range pcfg { if !strInlist(epType, formatters.EventProcessorTypes) { return fmt.Errorf("unknown processors type: %s", epType) } } return nil } func strInlist(s string, ls []string) bool { for _, ss := range ls { if ss == s { return true } } return false } ================================================ FILE: pkg/config/processors_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "bytes" "os" "reflect" "strings" "testing" ) var getProcessorsTestSet = map[string]struct { envs []string in []byte out map[string]map[string]interface{} }{ "basic_processors": { in: []byte(` processors: proc-convert-integer: event-convert: value-names: - ".*" type: int proc-delete-tag-name: event-delete: tag-names: - "^subscription-name" proc-delete-value-name: event-delete: value-names: - ".*out-unicast-packets" `), out: map[string]map[string]interface{}{ "proc-convert-integer": { "event-convert": map[string]interface{}{ "value-names": []interface{}{".*"}, "type": "int", }, }, "proc-delete-tag-name": { "event-delete": map[string]interface{}{ "tag-names": []interface{}{"^subscription-name"}, }, }, "proc-delete-value-name": { "event-delete": map[string]interface{}{ "value-names": []interface{}{".*out-unicast-packets"}, }, }, }, }, "basic_processors_with_env": { envs: []string{ "PROC_CONVERT_TYPE=int", }, in: []byte(` processors: proc-convert-integer: event-convert: value-names: - ".*" type: ${PROC_CONVERT_TYPE} proc-delete-tag-name: event-delete: tag-names: - "^subscription-name" proc-delete-value-name: event-delete: value-names: - ".*out-unicast-packets" `), out: map[string]map[string]interface{}{ "proc-convert-integer": { "event-convert": map[string]interface{}{ "value-names": []interface{}{".*"}, "type": "int", }, }, "proc-delete-tag-name": { "event-delete": map[string]interface{}{ "tag-names": []interface{}{"^subscription-name"}, }, }, "proc-delete-value-name": { "event-delete": map[string]interface{}{ "value-names": []interface{}{".*out-unicast-packets"}, }, }, }, }, } func TestGetProcessors(t *testing.T) { for name, data := range getProcessorsTestSet { t.Run(name, func(t *testing.T) { for _, e := range data.envs { p := strings.SplitN(e, "=", 2) os.Setenv(p[0], p[1]) } cfg := New() cfg.Debug = true cfg.SetLogger() cfg.FileConfig.SetConfigType("yaml") err := cfg.FileConfig.ReadConfig(bytes.NewBuffer(data.in)) if err != nil { t.Logf("failed reading config: %v", err) t.Fail() } v := cfg.FileConfig.Get("processors") t.Logf("raw interface processors: %+v", v) outs, err := cfg.GetEventProcessors() t.Logf("exp value: %+v", data.out) t.Logf("got value: %+v", outs) if err != nil { t.Logf("failed getting processors: %v", err) t.Fail() } //assert.EqualValues(t, data.out, outs) if !reflect.DeepEqual(outs, data.out) { t.Log("maps not equal") t.Fail() } }) } } ================================================ FILE: pkg/config/set.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "bytes" "context" "encoding/json" "errors" "fmt" "os" "path/filepath" "strings" "text/template" "time" "google.golang.org/protobuf/encoding/prototext" "gopkg.in/yaml.v2" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api" gfile "github.com/openconfig/gnmic/pkg/file" "github.com/openconfig/gnmic/pkg/gtemplate" ) const ( varFileSuffix = "_vars" ) type UpdateItem struct { Path string `json:"path,omitempty" yaml:"path,omitempty"` Value interface{} `json:"value,omitempty" yaml:"value,omitempty"` Encoding string `json:"encoding,omitempty" yaml:"encoding,omitempty"` } type SetRequestFile struct { Updates []*UpdateItem `json:"updates,omitempty" yaml:"updates,omitempty"` Replaces []*UpdateItem `json:"replaces,omitempty" yaml:"replaces,omitempty"` UnionReplaces []*UpdateItem `json:"union-replaces,omitempty" yaml:"union-replaces,omitempty"` Deletes []string `json:"deletes,omitempty" yaml:"deletes,omitempty"` CommitID string `yaml:"commit-id,omitempty" json:"commit-id,omitempty"` CommitAction commitAction `yaml:"commit-action,omitempty" json:"commit-action,omitempty"` RollbackDuration time.Duration `yaml:"rollback-duration,omitempty" json:"rollback-duration,omitempty"` } type commitAction string const ( commitActionRequest commitAction = "request" commitActionCancel commitAction = "cancel" commitActionConfirm commitAction = "confirm" commitActionSetRollbackDuration commitAction = "set-rollback-duration" ) func (c *Config) ReadSetRequestTemplate() error { if len(c.SetRequestFile) == 0 { return nil } c.setRequestTemplate = make([]*template.Template, len(c.SetRequestFile)) for i, srf := range c.SetRequestFile { b, err := gfile.ReadFile(context.TODO(), srf) if err != nil { return err } if c.Debug { c.logger.Printf("set request file %d content: %s", i, string(b)) } // read template c.setRequestTemplate[i], err = gtemplate.CreateTemplate(fmt.Sprintf("set-request-%d", i), string(b)) if err != nil { return err } } return c.readTemplateVarsFile() } func (c *Config) readTemplateVarsFile() error { if c.SetRequestVars == "" { ext := filepath.Ext(c.SetRequestFile[0]) c.SetRequestVars = fmt.Sprintf("%s%s%s", c.SetRequestFile[0][0:len(c.SetRequestFile[0])-len(ext)], varFileSuffix, ext) c.logger.Printf("trying to find variable file %q", c.SetRequestVars) _, err := os.Stat(c.SetRequestVars) if os.IsNotExist(err) { c.SetRequestVars = "" return nil } else if err != nil { return err } } b, err := readFile(c.SetRequestVars) if err != nil { return err } if c.setRequestVars == nil { c.setRequestVars = make(map[string]interface{}) } err = yaml.Unmarshal(b, &c.setRequestVars) if err != nil { return err } tempInterface := convert(c.setRequestVars) switch t := tempInterface.(type) { case map[string]interface{}: c.setRequestVars = t default: return errors.New("unexpected variables file format") } if c.Debug { c.logger.Printf("request vars content: %v", c.setRequestVars) } return nil } func (c *Config) CreateSetRequestFromFile(targetName string) ([]*gnmi.SetRequest, error) { if len(c.setRequestTemplate) == 0 { return nil, errors.New("missing set request template") } reqs := make([]*gnmi.SetRequest, 0, len(c.setRequestTemplate)) buf := new(bytes.Buffer) for _, srf := range c.setRequestTemplate { buf.Reset() err := srf.Execute(buf, templateInput{ TargetName: targetName, Vars: c.setRequestVars, }) if err != nil { return nil, err } if c.Debug { c.logger.Printf("target %q template result:\n%s", targetName, buf.String()) } // reqFile := new(SetRequestFile) err = yaml.Unmarshal(buf.Bytes(), reqFile) if err != nil { return nil, err } gnmiOpts := make([]api.GNMIOption, 0) buf.Reset() for _, upd := range reqFile.Updates { if upd.Path == "" { upd.Path = "/" } enc := upd.Encoding if enc == "" { enc = c.GlobalFlags.Encoding } buf.Reset() switch { case strings.HasPrefix(upd.Path, "cli:/"): val, ok := upd.Value.(string) if !ok { return nil, fmt.Errorf("value %v is not a string", upd.Value) } buf.WriteString(val) default: err = json.NewEncoder(buf).Encode(convert(upd.Value)) if err != nil { return nil, err } } gnmiOpts = append(gnmiOpts, api.Update( api.Path(strings.TrimSpace(upd.Path)), api.Value(strings.TrimSpace(buf.String()), enc), ), ) } for _, upd := range reqFile.Replaces { if upd.Path == "" { upd.Path = "/" } enc := upd.Encoding if enc == "" { enc = c.GlobalFlags.Encoding } buf.Reset() switch { case upd.Path == "cli:/": val, ok := upd.Value.(string) if !ok { return nil, fmt.Errorf("value %v is not a string", upd.Value) } buf.WriteString(val) default: err = json.NewEncoder(buf).Encode(convert(upd.Value)) if err != nil { return nil, err } } gnmiOpts = append(gnmiOpts, api.Replace( api.Path(strings.TrimSpace(upd.Path)), api.Value(strings.TrimSpace(buf.String()), enc), ), ) } for _, upd := range reqFile.UnionReplaces { if upd.Path == "" { upd.Path = "/" } enc := upd.Encoding if enc == "" { enc = c.GlobalFlags.Encoding } buf.Reset() switch { case upd.Path == "cli:/": val, ok := upd.Value.(string) if !ok { return nil, fmt.Errorf("value %v is not a string", upd.Value) } buf.WriteString(val) default: err = json.NewEncoder(buf).Encode(convert(upd.Value)) if err != nil { return nil, err } } gnmiOpts = append(gnmiOpts, api.UnionReplace( api.Path(strings.TrimSpace(upd.Path)), api.Value(strings.TrimSpace(buf.String()), enc), ), ) } for _, s := range reqFile.Deletes { gnmiOpts = append(gnmiOpts, api.Delete(strings.TrimSpace(s))) } if reqFile.CommitID != "" { switch reqFile.CommitAction { case commitActionRequest: gnmiOpts = append(gnmiOpts, api.Extension_CommitRequest( c.LocalFlags.SetCommitId, c.LocalFlags.SetCommitRollbackDuration, )) case commitActionCancel: gnmiOpts = append(gnmiOpts, api.Extension_CommitCancel( c.LocalFlags.SetCommitId, )) case commitActionConfirm: gnmiOpts = append(gnmiOpts, api.Extension_CommitConfirm( c.LocalFlags.SetCommitId, )) case commitActionSetRollbackDuration: gnmiOpts = append(gnmiOpts, api.Extension_CommitSetRollbackDuration( c.LocalFlags.SetCommitId, c.LocalFlags.SetCommitRollbackDuration, )) default: return nil, fmt.Errorf("unknown commit action %s", reqFile.CommitAction) } } setReq, err := api.NewSetRequest(gnmiOpts...) if err != nil { return nil, err } reqs = append(reqs, setReq) } return reqs, nil } type templateInput struct { TargetName string Vars map[string]interface{} } func (c *Config) CreateSetRequestFromProtoFile() ([]*gnmi.SetRequest, error) { reqs := make([]*gnmi.SetRequest, 0, len(c.SetRequestProtoFile)) for _, r := range c.SetRequestProtoFile { b, err := os.ReadFile(r) if err != nil { return nil, err } req := new(gnmi.SetRequest) err = prototext.Unmarshal(b, req) if err != nil { return nil, err } reqs = append(reqs, req) } return reqs, nil } ================================================ FILE: pkg/config/set_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "strings" "testing" "text/template" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/testutils" ) var createSetRequestFromFileTestSet = map[string]struct { in *Config targetName string out *gnmi.SetRequest err error }{ "set_update_request_from_file": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []*template.Template{ template.Must(template.New("set-request").Parse(`{ "updates": [ { "path": "valid/path", "value": "value" } ] }`))}, nil, }, out: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value\""), }, }, }, }, }, err: nil, }, "set_replace_request_from_file": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []*template.Template{ template.Must(template.New("set-request").Parse(`{ "replaces": [ { "path": "valid/path", "value": "value" } ] }`))}, nil, }, out: &gnmi.SetRequest{ Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value\""), }, }, }, }, }, err: nil, }, "set_delete_request_from_file": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []*template.Template{ template.Must(template.New("set-request").Parse(`{ "deletes": [ "valid/path" ] }`))}, nil, }, out: &gnmi.SetRequest{ Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, }, }, err: nil, }, "set_multiple_update_request": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []*template.Template{ template.Must(template.New("set-request").Parse(`{ "updates": [ { "path": "valid/path1", "value": "value1" }, { "path": "valid/path2", "value": "value2", "encoding": "json_ietf" } ] }`))}, nil, }, out: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path1"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value1\""), }, }, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path2"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("\"value2\""), }, }, }, }, }, err: nil, }, "set_multiple_replace_request_from_file": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []*template.Template{ template.Must(template.New("set-request").Parse(`{ "replaces": [ { "path": "valid/path1", "value": "value1" }, { "path": "valid/path2", "value": "value2", "encoding": "json_ietf" } ] }`))}, nil, }, out: &gnmi.SetRequest{ Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path1"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value1\""), }, }, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path2"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte("\"value2\""), }, }, }, }, }, err: nil, }, "set_multiple_delete_request_from_file": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []*template.Template{ template.Must(template.New("set-request").Parse(`{ "deletes": [ "valid/path1", "valid/path2" ] }`))}, nil, }, out: &gnmi.SetRequest{ Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path1"}, }, }, { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path2"}, }, }, }, }, err: nil, }, "set_combined_request": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []*template.Template{template.Must(template.New("set-request").Parse(`{ "updates": [ { "path": "/valid/path1", "value": "value1" } ], "replaces": [ { "path": "/valid/path2", "value": "value2" } ], "deletes": [ "valid/path" ] }`))}, nil, }, out: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path1"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value1\""), }, }, }, }, Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path2"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte("\"value2\""), }, }, }, }, Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ {Name: "valid"}, {Name: "path"}, }, }, }, }, err: nil, }, "template_based_set_request": { in: &Config{ GlobalFlags{ Encoding: "json", }, LocalFlags{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []*template.Template{ template.Must(template.New("set-request").Parse(`replaces: {{- range $interface := index .Vars .TargetName "interfaces" }} - path: "/interface[name={{ index $interface "name" }}]" encoding: "json_ietf" value: admin-state: {{ index $interface "admin-state" }} {{- range $index, $subinterface := index $interface "subinterfaces" }} subinterface: - index: {{ $index }} admin-state: {{ index $subinterface "admin-state"}} ipv4: address: - ip-prefix: {{ index $subinterface "ipv4-address"}} {{- end }} {{- end }}`))}, map[string]interface{}{ "target1": map[string]interface{}{ "interfaces": []interface{}{ map[string]interface{}{ "name": "ethernet-1/1", "admin-state": "enable", "subinterfaces": []interface{}{ map[string]interface{}{ "admin-state": "enable", "ipv4-address": "192.168.88.1/30", }, }, }, }, }, }, }, targetName: "target1", out: &gnmi.SetRequest{ Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonIetfVal{ JsonIetfVal: []byte(`{"admin-state":"enable","subinterface":[{"admin-state":"enable","index":0,"ipv4":{"address":[{"ip-prefix":"192.168.88.1/30"}]}}]}`), }, }, }, }, }, err: nil, }, "set_replace_origin_cli": { in: &Config{ GlobalFlags{}, LocalFlags{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []*template.Template{ template.Must(template.New("set-request").Parse(`{ "replaces": [ { "path": "cli:/", "value": "set interface ethernet-1/1 admin-state enable\nset interface ethernet-1/2 admin-state enable", "encoding": "ascii", } ] }`))}, nil, }, out: &gnmi.SetRequest{ Replace: []*gnmi.Update{ { Path: &gnmi.Path{ Origin: "cli", }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{ AsciiVal: "set interface ethernet-1/1 admin-state enable\nset interface ethernet-1/2 admin-state enable", }, }, }, }, }, err: nil, }, "set_update_origin_cli": { in: &Config{ GlobalFlags{ Encoding: "ascii", }, LocalFlags{}, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []*template.Template{ template.Must(template.New("set-request").Parse(`{ "updates": [ { "path": "cli:/", "value": "set interface ethernet-1/1 admin-state enable" } ] }`))}, nil, }, out: &gnmi.SetRequest{ Update: []*gnmi.Update{ { Path: &gnmi.Path{ Origin: "cli", }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{ AsciiVal: "set interface ethernet-1/1 admin-state enable", }, }, }, }, }, err: nil, }, } func TestCreateSetRequestFromFile(t *testing.T) { for name, data := range createSetRequestFromFileTestSet { t.Run(name, func(t *testing.T) { setReq, err := data.in.CreateSetRequestFromFile(data.targetName) t.Logf("exp value: %+v", data.out) t.Logf("exp error: %+v", data.err) t.Logf("got value: %+v", setReq) t.Logf("got error: %+v", err) if err != nil { if !strings.HasPrefix(err.Error(), data.err.Error()) { t.Fail() } } if !testutils.SetRequestsEqual(setReq[0], data.out) { t.Fail() } }) } } ================================================ FILE: pkg/config/subscriptions.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "errors" "fmt" "os" "sort" "strings" "time" "github.com/AlekSi/pointer" "github.com/mitchellh/mapstructure" "github.com/spf13/cobra" "github.com/openconfig/gnmic/pkg/api/types" ) const ( SubscriptionMode_STREAM = "STREAM" SubscriptionMode_ONCE = "ONCE" SubscriptionMode_POLL = "POLL" SubscriptionStreamMode_TARGET_DEFINED = "TARGET_DEFINED" SubscriptionStreamMode_ON_CHANGE = "ON_CHANGE" SubscriptionStreamMode_SAMPLE = "SAMPLE" ) const ( subscriptionDefaultMode = SubscriptionMode_STREAM subscriptionDefaultStreamMode = SubscriptionStreamMode_TARGET_DEFINED subscriptionDefaultEncoding = "JSON" ) var ErrConfig = errors.New("config error") func (c *Config) GetSubscriptions(cmd *cobra.Command) (map[string]*types.SubscriptionConfig, error) { if len(c.LocalFlags.SubscribePath) > 0 && len(c.LocalFlags.SubscribeName) > 0 { return nil, fmt.Errorf("flags --path and --name cannot be mixed") } // subscriptions from cli flags if len(c.LocalFlags.SubscribePath) > 0 { return c.subscriptionConfigFromFlags(cmd) } // subscriptions from file subDef := c.FileConfig.GetStringMap("subscriptions") if c.Debug { c.logger.Printf("subscriptions map: %#v", subDef) } // decode subscription config for sn, s := range subDef { switch s := s.(type) { case map[string]any: sub, err := c.decodeSubscriptionConfig(sn, s, cmd) if err != nil { return nil, err } c.Subscriptions[sn] = sub default: return nil, fmt.Errorf("%w: subscriptions map: unexpected type %T", ErrConfig, s) } } // named subscription if len(c.LocalFlags.SubscribeName) == 0 { if c.Debug { c.logger.Printf("subscriptions: %s", c.Subscriptions) } err := validateSubscriptionsConfig(c.Subscriptions) if err != nil { return nil, err } return c.Subscriptions, nil } filteredSubscriptions := make(map[string]*types.SubscriptionConfig) notFound := make([]string, 0) for _, name := range c.LocalFlags.SubscribeName { if s, ok := c.Subscriptions[name]; ok { filteredSubscriptions[name] = s } else { notFound = append(notFound, name) } } if len(notFound) > 0 { return nil, fmt.Errorf("named subscription(s) not found in config file: %v", notFound) } if c.Debug { c.logger.Printf("subscriptions: %s", filteredSubscriptions) } err := validateSubscriptionsConfig(filteredSubscriptions) if err != nil { return nil, err } return filteredSubscriptions, nil } func (c *Config) subscriptionConfigFromFlags(cmd *cobra.Command) (map[string]*types.SubscriptionConfig, error) { sub := &types.SubscriptionConfig{ Name: fmt.Sprintf("default-%d", time.Now().Unix()), Models: []string{}, Prefix: c.LocalFlags.SubscribePrefix, Target: c.LocalFlags.SubscribeTarget, SetTarget: c.LocalFlags.SubscribeSetTarget, Paths: c.LocalFlags.SubscribePath, Mode: c.LocalFlags.SubscribeMode, Depth: c.LocalFlags.SubscribeDepth, } // if globalFlagIsSet(cmd, "encoding") { // sub.Encoding = &c.Encoding // } if flagIsSet(cmd, "qos") { sub.Qos = &c.LocalFlags.SubscribeQos } sub.StreamMode = c.LocalFlags.SubscribeStreamMode if flagIsSet(cmd, "heartbeat-interval") { sub.HeartbeatInterval = &c.LocalFlags.SubscribeHeartbeatInterval } if flagIsSet(cmd, "sample-interval") { sub.SampleInterval = &c.LocalFlags.SubscribeSampleInterval } sub.SuppressRedundant = c.LocalFlags.SubscribeSuppressRedundant sub.UpdatesOnly = c.LocalFlags.SubscribeUpdatesOnly sub.Models = c.LocalFlags.SubscribeModel if flagIsSet(cmd, "history-snapshot") { snapshot, err := time.Parse(time.RFC3339Nano, c.LocalFlags.SubscribeHistorySnapshot) if err != nil { return nil, fmt.Errorf("history-snapshot: %v", err) } sub.History = &types.HistoryConfig{ Snapshot: snapshot, } } if flagIsSet(cmd, "history-start") && flagIsSet(cmd, "history-end") { start, err := time.Parse(time.RFC3339Nano, c.LocalFlags.SubscribeHistoryStart) if err != nil { return nil, fmt.Errorf("history-start: %v", err) } end, err := time.Parse(time.RFC3339Nano, c.LocalFlags.SubscribeHistoryEnd) if err != nil { return nil, fmt.Errorf("history-end: %v", err) } sub.History = &types.HistoryConfig{ Start: start, End: end, } } c.Subscriptions[sub.Name] = sub if c.Debug { c.logger.Printf("subscriptions: %s", c.Subscriptions) } return c.Subscriptions, nil } func (c *Config) decodeSubscriptionConfig(sn string, s any, cmd *cobra.Command) (*types.SubscriptionConfig, error) { sub := new(types.SubscriptionConfig) decoder, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), Result: sub, }) if err != nil { return nil, err } err = decoder.Decode(s) if err != nil { return nil, err } sub.Name = sn // inherit global "subscribe-*" option if it's not set if err := c.setSubscriptionFieldsFromFlags(sub, cmd); err != nil { return nil, err } expandSubscriptionEnv(sub) return sub, nil } func (c *Config) setSubscriptionFieldsFromFlags(sub *types.SubscriptionConfig, cmd *cobra.Command) error { if sub.SampleInterval == nil && flagIsSet(cmd, "sample-interval") { sub.SampleInterval = &c.LocalFlags.SubscribeSampleInterval } if sub.HeartbeatInterval == nil && flagIsSet(cmd, "heartbeat-interval") { sub.HeartbeatInterval = &c.LocalFlags.SubscribeHeartbeatInterval } // if sub.Encoding == nil && globalFlagIsSet(cmd, "encoding") { // sub.Encoding = &c.Encoding // } if sub.Mode == "" { sub.Mode = c.LocalFlags.SubscribeMode } if strings.ToUpper(sub.Mode) == SubscriptionMode_STREAM && sub.StreamMode == "" { sub.StreamMode = c.LocalFlags.SubscribeStreamMode } if sub.Qos == nil && flagIsSet(cmd, "qos") { sub.Qos = &c.LocalFlags.SubscribeQos } if flagIsSet(cmd, "depth") { sub.Depth = c.LocalFlags.SubscribeDepth } if sub.History == nil && flagIsSet(cmd, "history-snapshot") { snapshot, err := time.Parse(time.RFC3339Nano, c.LocalFlags.SubscribeHistorySnapshot) if err != nil { return fmt.Errorf("history-snapshot: %v", err) } sub.History = &types.HistoryConfig{ Snapshot: snapshot, } } if sub.History == nil && flagIsSet(cmd, "history-start") && flagIsSet(cmd, "history-end") { start, err := time.Parse(time.RFC3339Nano, c.LocalFlags.SubscribeHistoryStart) if err != nil { return fmt.Errorf("history-start: %v", err) } end, err := time.Parse(time.RFC3339Nano, c.LocalFlags.SubscribeHistoryEnd) if err != nil { return fmt.Errorf("history-end: %v", err) } sub.History = &types.HistoryConfig{ Start: start, End: end, } } return nil } func (c *Config) GetSubscriptionsFromFile() []*types.SubscriptionConfig { subs, err := c.GetSubscriptions(nil) if err != nil { return nil } subscriptions := make([]*types.SubscriptionConfig, 0) for _, sub := range subs { subscriptions = append(subscriptions, sub) } sort.Slice(subscriptions, func(i, j int) bool { return subscriptions[i].Name < subscriptions[j].Name }) return subscriptions } func validateSubscriptionsConfig(subs map[string]*types.SubscriptionConfig) error { var hasPoll bool var hasOnce bool var hasStream bool for _, sc := range subs { switch strings.ToUpper(sc.Mode) { case "POLL": hasPoll = true case "ONCE": hasOnce = true case "STREAM": hasStream = true } } if hasPoll && hasOnce || hasPoll && hasStream { return errors.New("subscriptions with mode Poll cannot be mixed with Stream or Once") } return nil } func expandSubscriptionEnv(sc *types.SubscriptionConfig) { sc.Name = os.ExpandEnv(sc.Name) for i := range sc.Models { sc.Models[i] = os.ExpandEnv(sc.Models[i]) } sc.Prefix = os.ExpandEnv(sc.Prefix) sc.Target = os.ExpandEnv(sc.Target) for i := range sc.Paths { sc.Paths[i] = os.ExpandEnv(sc.Paths[i]) } sc.Mode = os.ExpandEnv(sc.Mode) sc.StreamMode = os.ExpandEnv(sc.StreamMode) if sc.Encoding != nil { sc.Encoding = pointer.ToString(os.ExpandEnv(*sc.Encoding)) } } ================================================ FILE: pkg/config/subscriptions_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "bytes" "fmt" "os" "reflect" "strings" "testing" "time" "github.com/openconfig/gnmic/pkg/api/types" ) func mustParseTime(tm string) time.Time { tmi, err := time.Parse(time.RFC3339Nano, tm) if err != nil { panic(fmt.Sprintf("cannot parse time: %v", err)) } return tmi } var getSubscriptionsTestSet = map[string]struct { envs []string in []byte out map[string]*types.SubscriptionConfig outErr error }{ "no_globals": { in: []byte(` subscriptions: sub1: paths: - /valid/path `), out: map[string]*types.SubscriptionConfig{ "sub1": { Name: "sub1", Paths: []string{"/valid/path"}, }, }, outErr: nil, }, // "with_globals": { // in: []byte(` // subscribe-sample-interval: 10s // subscriptions: // sub1: // paths: // - /valid/path // `), // out: map[string]*types.SubscriptionConfig{ // "sub1": { // Name: "sub1", // Paths: []string{"/valid/path"}, // SampleInterval: pointer.ToDuration(10 * time.Second), // }, // }, // outErr: nil, // }, "2_subs": { in: []byte(` subscriptions: sub1: paths: - /valid/path sub2: paths: - /valid/path2 mode: stream stream-mode: on_change `), out: map[string]*types.SubscriptionConfig{ "sub1": { Name: "sub1", Paths: []string{"/valid/path"}, }, "sub2": { Name: "sub2", Paths: []string{"/valid/path2"}, Mode: "stream", StreamMode: "on_change", }, }, outErr: nil, }, // "2_subs_with_globals": { // in: []byte(` // subscribe-sample-interval: 10s // subscriptions: // sub1: // paths: // - /valid/path // sub2: // paths: // - /valid/path2 // mode: stream // stream-mode: on_change // `), // out: map[string]*types.SubscriptionConfig{ // "sub1": { // Name: "sub1", // Paths: []string{"/valid/path"}, // SampleInterval: pointer.ToDuration(10 * time.Second), // }, // "sub2": { // Name: "sub2", // Paths: []string{"/valid/path2"}, // Mode: "stream", // StreamMode: "on_change", // SampleInterval: pointer.ToDuration(10 * time.Second), // }, // }, // outErr: nil, // }, "3_subs_with_env": { envs: []string{ "SUB1_PATH=/valid/path", "SUB2_PATH=/valid/path2", }, in: []byte(` subscriptions: sub1: paths: - ${SUB1_PATH} sub2: paths: - ${SUB2_PATH} mode: stream stream-mode: on_change `), out: map[string]*types.SubscriptionConfig{ "sub1": { Name: "sub1", Paths: []string{"/valid/path"}, }, "sub2": { Name: "sub2", Paths: []string{"/valid/path2"}, Mode: "stream", StreamMode: "on_change", }, }, outErr: nil, }, "history_snapshot": { in: []byte(` subscriptions: sub1: paths: - /valid/path history: snapshot: 2022-07-14T07:30:00.0Z `), out: map[string]*types.SubscriptionConfig{ "sub1": { Name: "sub1", Paths: []string{"/valid/path"}, History: &types.HistoryConfig{ Snapshot: mustParseTime("2022-07-14T07:30:00.0Z"), }, }, }, outErr: nil, }, "history_range": { in: []byte(` subscriptions: sub1: paths: - /valid/path history: start: 2021-07-14T07:30:00.0Z end: 2022-07-14T07:30:00.0Z `), out: map[string]*types.SubscriptionConfig{ "sub1": { Name: "sub1", Paths: []string{"/valid/path"}, History: &types.HistoryConfig{ Start: mustParseTime("2021-07-14T07:30:00.0Z"), End: mustParseTime("2022-07-14T07:30:00.0Z"), }, }, }, outErr: nil, }, "subscription_list": { in: []byte(` subscriptions: sub1: stream-subscriptions: - paths: - /valid/path1 stream-mode: sample - paths: - /valid/path2 stream-mode: on-change `), out: map[string]*types.SubscriptionConfig{ "sub1": { Name: "sub1", StreamSubscriptions: []*types.SubscriptionConfig{ { Paths: []string{"/valid/path1"}, StreamMode: "sample", }, { Paths: []string{"/valid/path2"}, StreamMode: "on-change", }, }, }, }, outErr: nil, }, } func TestGetSubscriptions(t *testing.T) { for name, data := range getSubscriptionsTestSet { t.Run(name, func(t *testing.T) { for _, e := range data.envs { p := strings.SplitN(e, "=", 2) os.Setenv(p[0], p[1]) } cfg := New() cfg.Debug = true cfg.SetLogger() cfg.FileConfig.SetConfigType("yaml") err := cfg.FileConfig.ReadConfig(bytes.NewBuffer(data.in)) if err != nil { t.Logf("failed reading config: %v", err) t.Fail() } err = cfg.FileConfig.Unmarshal(cfg) if err != nil { t.Logf("failed fileConfig.Unmarshal: %v", err) t.Fail() } v := cfg.FileConfig.Get("subscriptions") t.Logf("raw interface subscriptions: %+v", v) outs, err := cfg.GetSubscriptions(nil) t.Logf("exp value: %+v", data.out) t.Logf("got value: %+v", outs) if err != nil { t.Logf("failed getting subscriptions: %v", err) t.Fail() } if !reflect.DeepEqual(outs, data.out) { t.Log("maps not equal") t.Fail() } }) } } // func TestConfig_CreateSubscribeRequest(t *testing.T) { // type fields struct { // GlobalFlags GlobalFlags // LocalFlags LocalFlags // FileConfig *viper.Viper // Targets map[string]*types.TargetConfig // Subscriptions map[string]*types.SubscriptionConfig // Outputs map[string]map[string]interface{} // Inputs map[string]map[string]interface{} // Processors map[string]map[string]interface{} // Clustering *clustering // GnmiServer *gnmiServer // APIServer *APIServer // Loader map[string]interface{} // Actions map[string]map[string]interface{} // logger *log.Logger // setRequestTemplate []*template.Template // setRequestVars map[string]interface{} // } // type args struct { // sc *types.SubscriptionConfig // target *types.TargetConfig // } // tests := []struct { // name string // fields fields // args args // want *gnmi.SubscribeRequest // wantErr bool // }{ // { // name: "once_subscription", // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{ // "interface", // }, // Mode: "once", // Encoding: pointer.ToString("json_ietf"), // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "interface", // }}, // }, // }, // }, // Mode: gnmi.SubscriptionList_ONCE, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // { // name: "once_subscription_multiple_paths", // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{ // "interface", // "network-instance", // }, // Mode: "once", // Encoding: pointer.ToString("json_ietf"), // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Mode: gnmi.SubscriptionList_ONCE, // Subscription: []*gnmi.Subscription{ // { // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "interface", // }}, // }, // }, // { // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "network-instance", // }}, // }, // }, // }, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // { // name: "poll_subscription", // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{ // "interface", // }, // Mode: "poll", // Encoding: pointer.ToString("json_ietf"), // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "interface", // }}, // }, // }, // }, // Mode: gnmi.SubscriptionList_POLL, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // { // name: "poll_subscription_multiple_paths", // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{ // "interface", // "network-instance", // }, // Mode: "poll", // Encoding: pointer.ToString("json_ietf"), // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "interface", // }}, // }, // }, // { // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "network-instance", // }}, // }, // }, // }, // Mode: gnmi.SubscriptionList_POLL, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // { // name: "stream_subscription", // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{ // "interface", // }, // Mode: "stream", // Encoding: pointer.ToString("json_ietf"), // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "interface", // }}, // }, // }, // }, // Mode: gnmi.SubscriptionList_STREAM, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // { // name: "stream_subscription_multiple_paths", // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{ // "interface", // "network-instance", // }, // Mode: "stream", // Encoding: pointer.ToString("json_ietf"), // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "interface", // }}, // }, // }, // { // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "network-instance", // }}, // }, // }, // }, // Mode: gnmi.SubscriptionList_STREAM, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // { // name: "stream_sample_subscription", // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{ // "interface", // }, // StreamMode: "sample", // Encoding: pointer.ToString("json_ietf"), // SampleInterval: pointer.ToDuration(5 * time.Second), // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Mode: gnmi.SubscriptionMode_SAMPLE, // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "interface", // }}, // }, // }, // }, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // { // name: "stream_on_change_subscription", // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{ // "interface", // }, // StreamMode: "on-change", // Encoding: pointer.ToString("json_ietf"), // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Mode: gnmi.SubscriptionMode_ON_CHANGE, // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "interface", // }}, // }, // }, // }, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // { // name: "stream_target_defined_subscription", // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{ // "interface", // }, // StreamMode: "on_change", // Encoding: pointer.ToString("json_ietf"), // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Mode: gnmi.SubscriptionMode_ON_CHANGE, // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "interface", // }}, // }, // }, // }, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // { // name: "subscription_with_history_snapshot", // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{ // "interface", // }, // Mode: "once", // Encoding: pointer.ToString("json_ietf"), // History: &types.HistoryConfig{ // Snapshot: mustParseTime("2022-07-14T07:30:00.0Z"), // }, // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "interface", // }}, // }, // }, // }, // Encoding: gnmi.Encoding_JSON_IETF, // Mode: gnmi.SubscriptionList_ONCE, // }, // }, // Extension: []*gnmi_ext.Extension{ // { // Ext: &gnmi_ext.Extension_History{ // History: &gnmi_ext.History{ // Request: &gnmi_ext.History_SnapshotTime{ // SnapshotTime: 1657783800000000, // }, // }, // }, // }, // }, // }, // wantErr: false, // }, // { // name: "combined_on-change_and_sample", // args: args{ // sc: &types.SubscriptionConfig{ // Encoding: pointer.ToString("json_ietf"), // StreamSubscriptions: []*types.SubscriptionConfig{ // { // Paths: []string{ // "interface/admin-state", // }, // StreamMode: "ON_CHANGE", // }, // { // Paths: []string{ // "interface/statistics", // }, // StreamMode: "SAMPLE", // }, // }, // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Mode: gnmi.SubscriptionMode_ON_CHANGE, // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{ // { // Name: "interface", // }, // { // Name: "admin-state", // }, // }, // }, // }, // { // Mode: gnmi.SubscriptionMode_SAMPLE, // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{ // { // Name: "interface", // }, // { // Name: "statistics", // }, // }, // }, // }, // }, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // { // name: "combined_on-change_and_sample_multiple_paths", // args: args{ // sc: &types.SubscriptionConfig{ // Encoding: pointer.ToString("json_ietf"), // StreamSubscriptions: []*types.SubscriptionConfig{ // { // Paths: []string{ // "interface/admin-state", // "interface/oper-state", // }, // StreamMode: "ON_CHANGE", // }, // { // Paths: []string{ // "interface/statistics", // "interface/subinterface/statistics", // }, // StreamMode: "SAMPLE", // }, // }, // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Mode: gnmi.SubscriptionMode_ON_CHANGE, // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{ // { // Name: "interface", // }, // { // Name: "admin-state", // }, // }, // }, // }, // { // Mode: gnmi.SubscriptionMode_ON_CHANGE, // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{ // { // Name: "interface", // }, // { // Name: "oper-state", // }, // }, // }, // }, // { // Mode: gnmi.SubscriptionMode_SAMPLE, // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{ // { // Name: "interface", // }, // { // Name: "statistics", // }, // }, // }, // }, // { // Mode: gnmi.SubscriptionMode_SAMPLE, // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{ // { // Name: "interface", // }, // { // Name: "subinterface", // }, // { // Name: "statistics", // }, // }, // }, // }, // }, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // { // name: "invalid_combined_paths_and_subscriptions", // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{"network-instance"}, // Encoding: pointer.ToString("json_ietf"), // StreamSubscriptions: []*types.SubscriptionConfig{ // { // Paths: []string{ // "interface/admin-state", // }, // StreamMode: "ON_CHANGE", // }, // { // Paths: []string{ // "interface/statistics", // }, // StreamMode: "SAMPLE", // }, // }, // }, // }, // wantErr: true, // }, // { // name: "invalid_combined_subscriptions_mode", // args: args{ // sc: &types.SubscriptionConfig{ // Encoding: pointer.ToString("json_ietf"), // StreamSubscriptions: []*types.SubscriptionConfig{ // { // Paths: []string{ // "interface/admin-state", // }, // Mode: "ONCE", // }, // { // Paths: []string{ // "interface/statistics", // }, // StreamMode: "SAMPLE", // }, // }, // }, // }, // wantErr: true, // }, // { // name: "invalid_subscription mode", // args: args{ // sc: &types.SubscriptionConfig{ // Encoding: pointer.ToString("json_ietf"), // Mode: "ONCE", // StreamSubscriptions: []*types.SubscriptionConfig{ // { // Paths: []string{ // "interface/admin-state", // }, // Mode: "ON_CHANGE", // }, // { // Paths: []string{ // "interface/statistics", // }, // StreamMode: "SAMPLE", // }, // }, // }, // }, // wantErr: true, // }, // { // name: "encoding_from_target", // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{ // "interface", // }, // Mode: "once", // }, // target: &types.TargetConfig{ // Encoding: pointer.ToString("json_ietf"), // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "interface", // }}, // }, // }, // }, // Mode: gnmi.SubscriptionList_ONCE, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // { // name: "encoding_from_global", // fields: fields{ // GlobalFlags: GlobalFlags{Encoding: "json_ietf"}, // }, // args: args{ // sc: &types.SubscriptionConfig{ // Paths: []string{ // "interface", // }, // Mode: "once", // }, // }, // want: &gnmi.SubscribeRequest{ // Request: &gnmi.SubscribeRequest_Subscribe{ // Subscribe: &gnmi.SubscriptionList{ // Subscription: []*gnmi.Subscription{ // { // Path: &gnmi.Path{ // Elem: []*gnmi.PathElem{{ // Name: "interface", // }}, // }, // }, // }, // Mode: gnmi.SubscriptionList_ONCE, // Encoding: gnmi.Encoding_JSON_IETF, // }, // }, // }, // wantErr: false, // }, // } // for _, tt := range tests { // t.Run(tt.name, func(t *testing.T) { // c := &Config{ // GlobalFlags: tt.fields.GlobalFlags, // LocalFlags: tt.fields.LocalFlags, // FileConfig: tt.fields.FileConfig, // Targets: tt.fields.Targets, // Subscriptions: tt.fields.Subscriptions, // Outputs: tt.fields.Outputs, // Inputs: tt.fields.Inputs, // Processors: tt.fields.Processors, // Clustering: tt.fields.Clustering, // GnmiServer: tt.fields.GnmiServer, // APIServer: tt.fields.APIServer, // Loader: tt.fields.Loader, // Actions: tt.fields.Actions, // logger: tt.fields.logger, // setRequestTemplate: tt.fields.setRequestTemplate, // setRequestVars: tt.fields.setRequestVars, // } // got, err := c.CreateSubscribeRequest(tt.args.sc, tt.args.target) // if err != nil && tt.wantErr { // t.Logf("expected error: %v", err) // return // } // if (err != nil) != tt.wantErr { // t.Logf("Config.CreateSubscribeRequest() error = %v", err) // t.Logf("Config.CreateSubscribeRequest() wantErr = %v", tt.wantErr) // t.Fail() // return // } // t.Logf("got:\n%s", prototext.Format(got)) // if !testutils.SubscribeRequestsEqual(got, tt.want) { // t.Logf("Config.CreateSubscribeRequest() got = %v", got) // t.Logf("Config.CreateSubscribeRequest() want = %v", tt.want) // t.Fail() // } // }) // } // } ================================================ FILE: pkg/config/targets.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "errors" "fmt" "maps" "net" "os" "sort" "strings" "github.com/mitchellh/mapstructure" "github.com/openconfig/gnmic/pkg/api/types" "github.com/zestor-dev/zestor/store" ) const ( defaultTargetBufferSize = 100 ) var ErrNoTargetsFound = errors.New("no targets found") func (c *Config) GetTargets() (map[string]*types.TargetConfig, error) { var err error // case address is defined in .Address if len(c.Address) > 0 { for _, addr := range c.Address { tc := &types.TargetConfig{ Name: addr, Address: addr, } err = c.SetTargetConfigDefaults(tc) if err != nil { return nil, err } c.Targets[tc.Name] = tc } return c.Targets, nil } // case targets is defined in config file targetsInt := c.FileConfig.Get("targets") targetsMap := make(map[string]interface{}) switch targetsInt := targetsInt.(type) { case string: for _, addr := range strings.Split(targetsInt, " ") { targetsMap[addr] = nil } case map[string]interface{}: targetsMap = targetsInt case nil: return nil, ErrNoTargetsFound default: return nil, fmt.Errorf("unexpected targets format, got: %T", targetsInt) } if len(targetsMap) == 0 { return nil, ErrNoTargetsFound } newTargetsConfig := make(map[string]*types.TargetConfig) for name, t := range targetsMap { tc := new(types.TargetConfig) switch t := t.(type) { case map[string]interface{}: decoder, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), Result: tc, }, ) if err != nil { return nil, err } err = decoder.Decode(t) if err != nil { return nil, err } case nil: default: return nil, fmt.Errorf("unexpected targets format, got a %T", t) } if tc.Address == "" { tc.Address = name } if tc.Name == "" { tc.Name = name } err = c.SetTargetConfigDefaults(tc) if err != nil { return nil, err } err = expandCertPaths(tc) if err != nil { return nil, err } // due to a viper bug that changes env values to lowercase if read // as part of a StringMap or interface{}: // read the target password as a string to maintain its case. // if it's not an empty string set it explicitly pass := c.FileConfig.GetString(fmt.Sprintf("targets/%s/password", name)) if pass != "" { *tc.Password = pass } expandTargetEnv(tc) newTargetsConfig[name] = tc } c.Targets = newTargetsConfig subNames := c.FileConfig.GetStringSlice("subscribe-name") if len(subNames) == 0 { return c.Targets, nil } for n := range c.Targets { c.Targets[n].Subscriptions = subNames } return c.Targets, nil } func (c *Config) SetTargetConfigDefaults(tc *types.TargetConfig) error { return setTargetConfigDefaultsFromGlobalFlags(tc, &c.GlobalFlags, c.FileConfig.GetString("port")) } func setTargetConfigDefaultsFromGlobalFlags(tc *types.TargetConfig, gflags *GlobalFlags, defaultGRPCPort string) error { if gflags.Port == "" { gflags.Port = defaultGRPCPort } if !strings.HasPrefix(tc.Address, "unix://") { addrList := strings.Split(tc.Address, ",") addrs := make([]string, 0, len(addrList)) for _, addr := range addrList { addr = strings.TrimSpace(addr) if !gflags.UseTunnelServer { _, _, err := net.SplitHostPort(addr) if err != nil { if strings.Contains(err.Error(), "missing port in address") || strings.Contains(err.Error(), "too many colons in address") { addr = net.JoinHostPort(addr, gflags.Port) } else { return fmt.Errorf("error parsing address '%s': %v", addr, err) } } } addrs = append(addrs, addr) } tc.Address = strings.Join(addrs, ",") } if tc.Username == nil { tc.Username = &gflags.Username } if tc.Password == nil { tc.Password = &gflags.Password } if tc.Token == nil { tc.Token = &gflags.Token } if tc.AuthScheme == "" { tc.AuthScheme = gflags.AuthScheme } if tc.Timeout == 0 { tc.Timeout = gflags.Timeout } if tc.Insecure == nil { tc.Insecure = &gflags.Insecure } if tc.SkipVerify == nil { tc.SkipVerify = &gflags.SkipVerify } if tc.Insecure != nil && !*tc.Insecure { if tc.TLSCA == nil { if gflags.TLSCa != "" { tc.TLSCA = &gflags.TLSCa } } if tc.TLSCert == nil { tc.TLSCert = &gflags.TLSCert } if tc.TLSKey == nil { tc.TLSKey = &gflags.TLSKey } } if tc.RetryTimer == 0 { tc.RetryTimer = gflags.Retry } if tc.TLSVersion == "" { tc.TLSVersion = gflags.TLSVersion } if tc.TLSMinVersion == "" { tc.TLSMinVersion = gflags.TLSMinVersion } if tc.TLSMaxVersion == "" { tc.TLSMaxVersion = gflags.TLSMaxVersion } if tc.TLSServerName == "" { tc.TLSServerName = gflags.TLSServerName } if tc.LogTLSSecret == nil { tc.LogTLSSecret = &gflags.LogTLSSecret } if tc.Gzip == nil { tc.Gzip = &gflags.Gzip } if tc.BufferSize == 0 { tc.BufferSize = defaultTargetBufferSize } if tc.Metadata == nil && gflags.Metadata != nil { tc.Metadata = make(map[string]string) maps.Copy(tc.Metadata, gflags.Metadata) } return nil } func (c *Config) SetTargetConfigDefaultsExpandEnv(tc *types.TargetConfig) error { err := c.SetTargetConfigDefaults(tc) if err != nil { return err } expandTargetEnv(tc) return nil } func (c *Config) TargetsList() []*types.TargetConfig { targets := make([]*types.TargetConfig, 0, len(c.Targets)) for _, tc := range c.Targets { targets = append(targets, tc) } sort.Slice(targets, func(i, j int) bool { return targets[i].Name < targets[j].Name }) return targets } func expandCertPaths(tc *types.TargetConfig) error { if tc.Insecure != nil && !*tc.Insecure { var err error if tc.TLSCA != nil && *tc.TLSCA != "" { *tc.TLSCA, err = expandOSPath(*tc.TLSCA) if err != nil { return err } } if tc.TLSCert != nil && *tc.TLSCert != "" { *tc.TLSCert, err = expandOSPath(*tc.TLSCert) if err != nil { return err } } if tc.TLSKey != nil && *tc.TLSKey != "" { *tc.TLSKey, err = expandOSPath(*tc.TLSKey) if err != nil { return err } } } return nil } func expandTargetEnv(tc *types.TargetConfig) { tc.Name = os.ExpandEnv(tc.Name) tc.Address = os.ExpandEnv(tc.Address) if tc.Username != nil { *tc.Username = os.ExpandEnv(*tc.Username) } // expandEnv for the pasword field only if it starts with $ // https://github.com/karimra/gnmic/issues/496 if tc.Password != nil && strings.HasPrefix(*tc.Password, "$") { *tc.Password = os.ExpandEnv(*tc.Password) } if tc.Token != nil { *tc.Token = os.ExpandEnv(*tc.Token) } if tc.TLSCA != nil { *tc.TLSCA = os.ExpandEnv(*tc.TLSCA) } if tc.TLSCert != nil { *tc.TLSCert = os.ExpandEnv(*tc.TLSCert) } if tc.TLSKey != nil { *tc.TLSKey = os.ExpandEnv(*tc.TLSKey) } for i := range tc.Subscriptions { tc.Subscriptions[i] = os.ExpandEnv(tc.Subscriptions[i]) } for i := range tc.Outputs { tc.Outputs[i] = os.ExpandEnv(tc.Outputs[i]) } tc.TLSMinVersion = os.ExpandEnv(tc.TLSMinVersion) tc.TLSMaxVersion = os.ExpandEnv(tc.TLSMaxVersion) tc.TLSVersion = os.ExpandEnv(tc.TLSVersion) for i := range tc.ProtoFiles { tc.ProtoFiles[i] = os.ExpandEnv(tc.ProtoFiles[i]) } for i := range tc.ProtoDirs { tc.ProtoDirs[i] = os.ExpandEnv(tc.ProtoDirs[i]) } for i := range tc.Tags { tc.Tags[i] = os.ExpandEnv(tc.Tags[i]) } } func (c *Config) GetDiffTargets() (*types.TargetConfig, map[string]*types.TargetConfig, error) { targetsConfig, err := c.GetTargets() if err != nil { if !errors.Is(err, ErrNoTargetsFound) { return nil, nil, err } } var refConfig *types.TargetConfig if rc, ok := targetsConfig[c.DiffRef]; ok { refConfig = rc } else { refConfig = &types.TargetConfig{ Name: c.DiffRef, Address: c.DiffRef, } err = c.SetTargetConfigDefaults(refConfig) if err != nil { return nil, nil, err } } compareConfigs := make(map[string]*types.TargetConfig) for _, cmp := range c.DiffCompare { if cc, ok := targetsConfig[cmp]; ok { compareConfigs[cmp] = cc } else { compConfig := &types.TargetConfig{ Name: cmp, Address: cmp, } err = c.SetTargetConfigDefaults(compConfig) if err != nil { return nil, nil, err } compareConfigs[compConfig.Name] = compConfig } } return refConfig, compareConfigs, nil } func SetTargetConfigDefaults(s store.Store[any], tc *types.TargetConfig) error { gf, found, err := s.Get("global-flags", "global-flags") if err != nil { return err } if !found { return fmt.Errorf("global-flags not found") } gflags, ok := gf.(GlobalFlags) if !ok { return fmt.Errorf("global-flags is not a *GlobalFlags") } return setTargetConfigDefaultsFromGlobalFlags(tc, &gflags, "") } func SetTargetConfigDefaultsExpandEnv(s store.Store[any], tc *types.TargetConfig) error { err := SetTargetConfigDefaults(s, tc) if err != nil { return err } expandTargetEnv(tc) return nil } ================================================ FILE: pkg/config/targets_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "bytes" "os" "reflect" "strings" "testing" "github.com/AlekSi/pointer" "gopkg.in/yaml.v2" "github.com/openconfig/gnmic/pkg/api/types" ) var getTargetsTestSet = map[string]struct { envs []string in []byte out map[string]*types.TargetConfig outErr error }{ "from_address": { in: []byte(` port: 57400 username: admin password: admin address: 10.1.1.1 `), out: map[string]*types.TargetConfig{ "10.1.1.1": { Address: "10.1.1.1:57400", Name: "10.1.1.1", Password: pointer.ToString("admin"), Username: pointer.ToString("admin"), Token: pointer.ToString(""), TLSCert: pointer.ToString(""), TLSKey: pointer.ToString(""), LogTLSSecret: pointer.ToBool(false), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(false), Gzip: pointer.ToBool(false), BufferSize: uint(100), }, }, outErr: nil, }, "from_targets_only": { in: []byte(` targets: 10.1.1.1:57400: username: admin password: admin `), out: map[string]*types.TargetConfig{ "10.1.1.1:57400": { Address: "10.1.1.1:57400", Name: "10.1.1.1:57400", Password: pointer.ToString("admin"), Username: pointer.ToString("admin"), Token: pointer.ToString(""), TLSCert: pointer.ToString(""), TLSKey: pointer.ToString(""), LogTLSSecret: pointer.ToBool(false), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(false), Gzip: pointer.ToBool(false), BufferSize: uint(100), }, }, outErr: nil, }, "from_both_targets_and_main_section": { in: []byte(` metadata: key1: val1 key2: val2 username: admin password: admin skip-verify: true targets: 10.1.1.1:57400: metadata: override1: val2 `), out: map[string]*types.TargetConfig{ "10.1.1.1:57400": { Address: "10.1.1.1:57400", Name: "10.1.1.1:57400", Password: pointer.ToString("admin"), Username: pointer.ToString("admin"), Token: pointer.ToString(""), TLSCert: pointer.ToString(""), TLSKey: pointer.ToString(""), LogTLSSecret: pointer.ToBool(false), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(true), Gzip: pointer.ToBool(false), BufferSize: uint(100), Metadata: map[string]string{ "override1": "val2", }, }, }, outErr: nil, }, "multiple_targets": { in: []byte(` metadata: key1: val1 key2: val2 targets: 10.1.1.1:57400: username: admin password: admin 10.1.1.2:57400: username: admin password: admin `), out: map[string]*types.TargetConfig{ "10.1.1.1:57400": { Address: "10.1.1.1:57400", Name: "10.1.1.1:57400", Password: pointer.ToString("admin"), Username: pointer.ToString("admin"), Token: pointer.ToString(""), TLSCert: pointer.ToString(""), TLSKey: pointer.ToString(""), LogTLSSecret: pointer.ToBool(false), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(false), Gzip: pointer.ToBool(false), BufferSize: uint(100), Metadata: map[string]string{ "key1": "val1", "key2": "val2", }, }, "10.1.1.2:57400": { Address: "10.1.1.2:57400", Name: "10.1.1.2:57400", Password: pointer.ToString("admin"), Username: pointer.ToString("admin"), Token: pointer.ToString(""), TLSCert: pointer.ToString(""), TLSKey: pointer.ToString(""), LogTLSSecret: pointer.ToBool(false), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(false), Gzip: pointer.ToBool(false), BufferSize: uint(100), Metadata: map[string]string{ "key1": "val1", "key2": "val2", }, }, }, outErr: nil, }, "multiple_targets_from_main_section": { in: []byte(` skip-verify: true targets: 10.1.1.1:57400: username: admin password: admin 10.1.1.2:57400: username: admin password: admin `), out: map[string]*types.TargetConfig{ "10.1.1.1:57400": { Address: "10.1.1.1:57400", Name: "10.1.1.1:57400", Password: pointer.ToString("admin"), Username: pointer.ToString("admin"), Token: pointer.ToString(""), TLSCert: pointer.ToString(""), TLSKey: pointer.ToString(""), LogTLSSecret: pointer.ToBool(false), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(true), Gzip: pointer.ToBool(false), BufferSize: uint(100), }, "10.1.1.2:57400": { Address: "10.1.1.2:57400", Name: "10.1.1.2:57400", Password: pointer.ToString("admin"), Username: pointer.ToString("admin"), Token: pointer.ToString(""), TLSCert: pointer.ToString(""), TLSKey: pointer.ToString(""), LogTLSSecret: pointer.ToBool(false), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(true), Gzip: pointer.ToBool(false), BufferSize: uint(100), }, }, outErr: nil, }, "multiple_targets_with_gzip": { in: []byte(` skip-verify: true targets: 10.1.1.1:57400: username: admin password: admin gzip: true 10.1.1.2:57400: username: admin password: admin `), out: map[string]*types.TargetConfig{ "10.1.1.1:57400": { Address: "10.1.1.1:57400", Name: "10.1.1.1:57400", Password: pointer.ToString("admin"), Username: pointer.ToString("admin"), Token: pointer.ToString(""), TLSCert: pointer.ToString(""), TLSKey: pointer.ToString(""), LogTLSSecret: pointer.ToBool(false), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(true), Gzip: pointer.ToBool(true), BufferSize: uint(100), }, "10.1.1.2:57400": { Address: "10.1.1.2:57400", Name: "10.1.1.2:57400", Password: pointer.ToString("admin"), Username: pointer.ToString("admin"), Token: pointer.ToString(""), TLSCert: pointer.ToString(""), TLSKey: pointer.ToString(""), LogTLSSecret: pointer.ToBool(false), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(true), Gzip: pointer.ToBool(false), BufferSize: uint(100), }, }, outErr: nil, }, "with_envs": { envs: []string{ "SUB_NAME=sub1", "OUT_NAME=o1", }, in: []byte(` skip-verify: true targets: 10.1.1.1:57400: username: admin password: admin outputs: - ${OUT_NAME} subscriptions: - ${SUB_NAME} `), out: map[string]*types.TargetConfig{ "10.1.1.1:57400": { Address: "10.1.1.1:57400", Name: "10.1.1.1:57400", Password: pointer.ToString("admin"), Username: pointer.ToString("admin"), Token: pointer.ToString(""), TLSCert: pointer.ToString(""), TLSKey: pointer.ToString(""), LogTLSSecret: pointer.ToBool(false), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(true), Gzip: pointer.ToBool(false), BufferSize: uint(100), Subscriptions: []string{ "sub1", }, Outputs: []string{ "o1", }, }, }, outErr: nil, }, "target_with_multiple_addresses": { in: []byte(` port: 57400 targets: target1: username: admin password: admin address: 10.1.1.1,10.1.1.2 `), out: map[string]*types.TargetConfig{ "target1": { Address: "10.1.1.1:57400,10.1.1.2:57400", Name: "target1", Password: pointer.ToString("admin"), Username: pointer.ToString("admin"), Token: pointer.ToString(""), TLSCert: pointer.ToString(""), TLSKey: pointer.ToString(""), LogTLSSecret: pointer.ToBool(false), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(false), Gzip: pointer.ToBool(false), BufferSize: uint(100), }, }, outErr: nil, }, } func TestGetTargets(t *testing.T) { for name, data := range getTargetsTestSet { t.Run(name, func(t *testing.T) { for _, e := range data.envs { p := strings.SplitN(e, "=", 2) os.Setenv(p[0], p[1]) } cfg := New() cfg.Debug = true cfg.SetLogger() cfg.FileConfig.SetConfigType("yaml") err := cfg.FileConfig.ReadConfig(bytes.NewBuffer(data.in)) if err != nil { t.Logf("failed reading config: %v", err) t.Fail() } err = cfg.FileConfig.Unmarshal(cfg) if err != nil { t.Logf("failed fileConfig.Unmarshal: %v", err) t.Fail() } v := cfg.FileConfig.Get("targets") t.Logf("raw interface targets: %+v", v) outs, err := cfg.GetTargets() t.Logf("exp value: %+v", data.out) t.Logf("got value: %+v", outs) if err != nil { t.Logf("failed getting targets: %v", err) t.Fail() } if !reflect.DeepEqual(outs, data.out) { t.Log("maps not equal") t.Fail() } }) } } var setTargetLoaderConfigDefaultsTest = map[string]struct { envs []string in []byte out *types.TargetConfig outErr error }{ "from_address": { envs: []string{ "username=user1", "pass=pass1", }, in: []byte(` test1: name: test1.123 address: test1.123:9339 username: ${username} password: ${pass} subscriptions: - drivenets-sample `), out: &types.TargetConfig{ Address: "test1.123:9339", Name: "test1.123", Password: pointer.ToString("pass1"), Username: pointer.ToString("user1"), Token: pointer.ToString(""), TLSCert: pointer.ToString(""), TLSKey: pointer.ToString(""), LogTLSSecret: pointer.ToBool(false), Insecure: pointer.ToBool(false), SkipVerify: pointer.ToBool(false), Gzip: pointer.ToBool(false), BufferSize: uint(100), Subscriptions: []string{"drivenets-sample"}, }, outErr: nil, }, } func TestSetTargetLoaderConfigDefaults(t *testing.T) { for name, data := range setTargetLoaderConfigDefaultsTest { t.Run(name, func(t *testing.T) { for _, e := range data.envs { p := strings.SplitN(e, "=", 2) os.Setenv(p[0], p[1]) } var inputMap map[string]*types.TargetConfig err := yaml.Unmarshal(data.in, &inputMap) if err != nil { t.Logf("failed to unmarshal input: %v", err) t.Fail() } var input *types.TargetConfig for _, v := range inputMap { input = v break } cfg := New() err = cfg.SetTargetConfigDefaultsExpandEnv(input) if err != nil { t.Logf("SetTargetLoaderConfigDefaults error: %v", err) t.Fail() } if !reflect.DeepEqual(input, data.out) { t.Logf("expected: %+v", data.out) t.Logf("got: %+v", input) t.Fail() } }) } } ================================================ FILE: pkg/config/tunnel_server.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package config import ( "fmt" "os" "time" "github.com/mitchellh/mapstructure" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" ) const ( defaultTargetWaitTime = 2 * time.Second ) type TunnelServer struct { Address string `mapstructure:"address,omitempty" json:"address,omitempty"` // TLS TLS *types.TLSConfig `mapstructure:"tls,omitempty"` // TargetWaitTime time.Duration `mapstructure:"target-wait-time,omitempty" json:"target-wait-time,omitempty"` // EnableMetrics bool `mapstructure:"enable-metrics,omitempty" json:"enable-metrics,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` // targets Targets []*TunnelTargetMatch `mapstructure:"targets,omitempty" json:"targets,omitempty"` } type TunnelTargetMatch struct { // target Type as reported by the tunnel.Target to the Tunnel Server Type string `mapstructure:"type,omitempty" json:"type,omitempty"` // a Regex pattern to check the target ID as reported by // the tunnel.Target to the Tunnel Server ID string `mapstructure:"id,omitempty" json:"id,omitempty"` // Optional gnmic.Target Configuration that will be assigned to the target with // an ID matching the above regex Config types.TargetConfig `mapstructure:"config,omitempty" json:"config,omitempty"` } func (c *Config) GetTunnelServer() error { if !c.FileConfig.IsSet("tunnel-server") { return nil } c.TunnelServer = new(TunnelServer) c.TunnelServer.Address = os.ExpandEnv(c.FileConfig.GetString("tunnel-server/address")) if c.FileConfig.IsSet("tunnel-server/tls") { c.TunnelServer.TLS = new(types.TLSConfig) c.TunnelServer.TLS.CaFile = os.ExpandEnv(c.FileConfig.GetString("tunnel-server/tls/ca-file")) c.TunnelServer.TLS.CertFile = os.ExpandEnv(c.FileConfig.GetString("tunnel-server/tls/cert-file")) c.TunnelServer.TLS.KeyFile = os.ExpandEnv(c.FileConfig.GetString("tunnel-server/tls/key-file")) c.TunnelServer.TLS.ClientAuth = os.ExpandEnv(c.FileConfig.GetString("tunnel-server/tls/client-auth")) if err := c.TunnelServer.TLS.Validate(); err != nil { return fmt.Errorf("tunnel-server TLS config error: %w", err) } } c.TunnelServer.TargetWaitTime = c.FileConfig.GetDuration("tunnel-server/target-wait-time") c.TunnelServer.EnableMetrics = os.ExpandEnv(c.FileConfig.GetString("tunnel-server/enable-metrics")) == trueString c.TunnelServer.Debug = os.ExpandEnv(c.FileConfig.GetString("tunnel-server/debug")) == trueString var err error c.TunnelServer.Targets = make([]*TunnelTargetMatch, 0) targetMatches := c.FileConfig.Get("tunnel-server/targets") switch targetMatches := targetMatches.(type) { case []interface{}: for _, tmi := range targetMatches { tm := new(TunnelTargetMatch) err = mapstructure.Decode(utils.Convert(tmi), tm) if err != nil { return err } c.TunnelServer.Targets = append(c.TunnelServer.Targets, tm) } case nil: default: return fmt.Errorf("tunnel-server has an unexpected target configuration type %T", targetMatches) } c.setTunnelServerDefaults() return nil } func (c *Config) setTunnelServerDefaults() { if c.TunnelServer.Address == "" { c.TunnelServer.Address = defaultAddress } if c.TunnelServer.TargetWaitTime <= 0 { c.TunnelServer.TargetWaitTime = defaultTargetWaitTime } } ================================================ FILE: pkg/file/file.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package file import ( "bytes" "context" "crypto/tls" "fmt" "io" "net" "net/http" "net/url" "os" "path/filepath" "strings" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "golang.org/x/crypto/ssh/knownhosts" "github.com/jlaffaye/ftp" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/pkg/sftp" ) const ( defaultFTPPort = 21 defaultSFTPPort = 22 ) // ReadFile reads a local or remote file and returns the read bytes, // the location of the file is determined based on its prefix, // http(s), (s)ftp prefixes are supported. // no prefix means the file is local. `-` means stdin. func ReadFile(ctx context.Context, path string) ([]byte, error) { // read file bytes based on the path prefix switch { case strings.HasPrefix(path, "https://"): return readHTTPFile(ctx, path) case strings.HasPrefix(path, "http://"): return readHTTPFile(ctx, path) case strings.HasPrefix(path, "ftp://"): return readFTPFile(ctx, path) case strings.HasPrefix(path, "sftp://"): return readSFTPFile(ctx, path, false) default: return utils.ReadLocalFile(ctx, path) } } // readHTTPFile fetches a remote from from an HTTP server, // the response body can be yaml or json bytes. // it then unmarshal the received bytes into a map[string]*types.TargetConfig // and returns func readHTTPFile(ctx context.Context, path string) ([]byte, error) { _, err := url.Parse(path) if err != nil { return nil, err } client := new(http.Client) if strings.HasPrefix(path, "https://") { client.Transport = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } } req, err := http.NewRequestWithContext(ctx, http.MethodGet, path, new(bytes.Buffer)) if err != nil { return nil, err } r, err := client.Do(req) if err != nil { return nil, err } if r.StatusCode != 200 { return nil, fmt.Errorf("unexpected HTTP status code %d, GET from %s", r.StatusCode, path) } defer r.Body.Close() return io.ReadAll(r.Body) } // readFTPFile reads a file from a remote FTP server // unmarshals the content into a map[string]*types.TargetConfig // and returns func readFTPFile(ctx context.Context, path string) ([]byte, error) { parsedUrl, err := url.Parse(path) if err != nil { return nil, fmt.Errorf("failed to parse URL: %v", err) } // Get user name and pass user := parsedUrl.User.Username() pass, _ := parsedUrl.User.Password() // Parse Host and Port host := parsedUrl.Host _, _, err = net.SplitHostPort(host) if err != nil { host = fmt.Sprintf("%s:%d", host, defaultFTPPort) } // connect to server conn, err := ftp.Dial(host, ftp.DialWithContext(ctx)) if err != nil { return nil, fmt.Errorf("failed to connect to [%s]: %v", host, err) } err = conn.Login(user, pass) if err != nil { return nil, fmt.Errorf("failed to login to [%s]: %v", host, err) } r, err := conn.Retr(parsedUrl.RequestURI()) if err != nil { return nil, fmt.Errorf("failed to read remote file %q: %v", parsedUrl.RequestURI(), err) } defer r.Close() return io.ReadAll(r) } // readSFTPFile reads a file from a remote SFTP server // unmarshals the content into a map[string]*types.TargetConfig // and returns func readSFTPFile(_ context.Context, path string, checkHostKey bool) ([]byte, error) { parsedUrl, err := url.Parse(path) if err != nil { return nil, fmt.Errorf("failed to parse URL: %v", err) } // Get user name and pass user := parsedUrl.User.Username() pass, _ := parsedUrl.User.Password() // Parse Host and Port host := parsedUrl.Host _, _, err = net.SplitHostPort(host) if err != nil { host = fmt.Sprintf("%s:%d", host, defaultSFTPPort) } var auths []ssh.AuthMethod // Try to use $SSH_AUTH_SOCK which contains the path of the unix file socket that the sshd agent uses // for communication with other processes. if aconn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil { auths = append(auths, ssh.PublicKeysCallback(agent.NewClient(aconn).Signers)) } // Use password authentication if provided if pass != "" { auths = append(auths, ssh.Password(pass)) } // Initialize client configuration config := ssh.ClientConfig{ User: user, Auth: auths, } // if checkHostKey is set, try loading the know_hosts file if checkHostKey { knownHostsFile := filepath.Join(os.Getenv("HOME"), ".ssh", "known_hosts") // check ~/.ssh/known_hosts existence if !FileExists(knownHostsFile) { return nil, fmt.Errorf("known_hosts file %s does not exist", knownHostsFile) } // load the known_hosts file retrieving an ssh.HostKeyCallback config.HostKeyCallback, err = knownhosts.New(knownHostsFile) if err != nil { return nil, err } } else { // use the use the InsecureIgnoreHostKey implementation config.HostKeyCallback = ssh.InsecureIgnoreHostKey() } // Connect to server conn, err := ssh.Dial("tcp", host, &config) if err != nil { return nil, fmt.Errorf("failed to connect to [%s]: %v", host, err) } defer conn.Close() // Create new SFTP client sc, err := sftp.NewClient(conn) if err != nil { return nil, fmt.Errorf("unable to start SFTP subsystem: %v", err) } defer sc.Close() // open File file, err := sc.Open(parsedUrl.RequestURI()) if err != nil { return nil, fmt.Errorf("failed to open the remote file %q: %v", parsedUrl.RequestURI(), err) } defer file.Close() // stat file to get its size st, err := file.Stat() if err != nil { return nil, err } if st.IsDir() { return nil, fmt.Errorf("remote file %q is a directory", parsedUrl.RequestURI()) } // create a []byte with length equal to the file size b := make([]byte, st.Size()) // read the file _, err = file.Read(b) return b, err } // FileExists returns true if a file referenced by filename exists & accessible. func FileExists(filename string) bool { f, err := os.Stat(filename) if err != nil { return false } return !f.IsDir() } ================================================ FILE: pkg/formatters/all/all.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package all import ( _ "github.com/openconfig/gnmic/pkg/formatters/event_add_tag" _ "github.com/openconfig/gnmic/pkg/formatters/event_allow" _ "github.com/openconfig/gnmic/pkg/formatters/event_combine" _ "github.com/openconfig/gnmic/pkg/formatters/event_convert" _ "github.com/openconfig/gnmic/pkg/formatters/event_data_convert" _ "github.com/openconfig/gnmic/pkg/formatters/event_date_string" _ "github.com/openconfig/gnmic/pkg/formatters/event_delete" _ "github.com/openconfig/gnmic/pkg/formatters/event_drop" _ "github.com/openconfig/gnmic/pkg/formatters/event_duration_convert" _ "github.com/openconfig/gnmic/pkg/formatters/event_extract_tags" _ "github.com/openconfig/gnmic/pkg/formatters/event_group_by" _ "github.com/openconfig/gnmic/pkg/formatters/event_ieeefloat32" _ "github.com/openconfig/gnmic/pkg/formatters/event_jq" _ "github.com/openconfig/gnmic/pkg/formatters/event_merge" _ "github.com/openconfig/gnmic/pkg/formatters/event_override_ts" _ "github.com/openconfig/gnmic/pkg/formatters/event_rate_limit" _ "github.com/openconfig/gnmic/pkg/formatters/event_starlark" _ "github.com/openconfig/gnmic/pkg/formatters/event_strings" _ "github.com/openconfig/gnmic/pkg/formatters/event_time_epoch" _ "github.com/openconfig/gnmic/pkg/formatters/event_to_tag" _ "github.com/openconfig/gnmic/pkg/formatters/event_trigger" _ "github.com/openconfig/gnmic/pkg/formatters/event_value_tag" _ "github.com/openconfig/gnmic/pkg/formatters/event_value_tag_v2" _ "github.com/openconfig/gnmic/pkg/formatters/event_write" ) ================================================ FILE: pkg/formatters/event.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package formatters import ( "encoding/json" "fmt" "math" "strings" "sync" flattener "github.com/karimra/go-map-flattener" "github.com/openconfig/gnmi/proto/gnmi" ) var stringBuilderPool = sync.Pool{ New: func() any { return new(strings.Builder) }, } // EventMsg represents a gNMI update message, // The name is derived from the subscription in case the update was received in a subscribeResponse // the tags are derived from the keys in gNMI path as well as some metadata from the subscription. type EventMsg struct { Name string `json:"name,omitempty"` Timestamp int64 `json:"timestamp,omitempty"` Tags map[string]string `json:"tags,omitempty"` Values map[string]interface{} `json:"values,omitempty"` Deletes []string `json:"deletes,omitempty"` } func (e *EventMsg) String() string { b, _ := json.Marshal(e) return string(b) } // ResponseToEventMsgs // func ResponseToEventMsgs(name string, rsp *gnmi.SubscribeResponse, meta map[string]string, eps ...EventProcessor) ([]*EventMsg, error) { if rsp == nil { return nil, nil } evs := make([]*EventMsg, 0, len(rsp.GetUpdate().GetUpdate())+len(rsp.GetUpdate().GetDelete())) switch rsp := rsp.Response.(type) { case *gnmi.SubscribeResponse_Update: namePrefix, prefixTags := tagsFromGNMIPath(rsp.Update.GetPrefix()) // notification updates uevs, err := updatesToEvent(name, namePrefix, rsp.Update.GetTimestamp(), rsp.Update.GetUpdate(), prefixTags, meta) if err != nil { return nil, err } evs = append(evs, uevs...) // notification deletes for _, del := range rsp.Update.GetDelete() { e := deleteToEvent(name, namePrefix, rsp.Update.GetTimestamp(), del, prefixTags) addMetaTags(e, meta) if (e != nil && e != &EventMsg{}) { evs = append(evs, e) } } for _, ep := range eps { evs = ep.Apply(evs...) } } return evs, nil } func GetResponseToEventMsgs(rsp *gnmi.GetResponse, meta map[string]string, eps ...EventProcessor) ([]*EventMsg, error) { if rsp == nil { return nil, nil } evs := make([]*EventMsg, 0, len(rsp.GetNotification())) for _, notif := range rsp.GetNotification() { namePrefix, prefixTags := tagsFromGNMIPath(notif.GetPrefix()) uevs, err := updatesToEvent("get-request", namePrefix, notif.GetTimestamp(), notif.GetUpdate(), prefixTags, meta) if err != nil { return nil, err } evs = append(evs, uevs...) } for _, ep := range eps { evs = ep.Apply(evs...) } return evs, nil } func updatesToEvent(name, prefix string, ts int64, upds []*gnmi.Update, tags, meta map[string]string) ([]*EventMsg, error) { evs := make([]*EventMsg, 0, len(upds)) for _, upd := range upds { e, err := updateToEvent(name, prefix, ts, upd, tags) if err != nil { return nil, err } addMetaTags(e, meta) if (e != nil && e != &EventMsg{}) { evs = append(evs, e) } } return evs, nil } func updateToEvent(name, prefix string, ts int64, upd *gnmi.Update, tags map[string]string) (*EventMsg, error) { e := &EventMsg{ Name: name, Timestamp: ts, Tags: make(map[string]string), Values: make(map[string]interface{}), } for k, v := range tags { e.Tags[k] = v } pathName, pTags := tagsFromGNMIPath(upd.GetPath()) psb := stringBuilderPool.Get().(*strings.Builder) defer func() { psb.Reset() stringBuilderPool.Put(psb) }() psb.WriteString(strings.TrimRight(prefix, "/")) psb.WriteString("/") psb.WriteString(strings.TrimLeft(pathName, "/")) pathName = psb.String() for k, v := range pTags { if vv, ok := e.Tags[k]; ok { if v != vv { e.Tags[fmt.Sprintf("%s_%s", pathName, k)] = v } continue } e.Tags[k] = v } var err error e.Values, err = getValueFlat(pathName, upd.GetVal()) if err != nil { return nil, err } return e, nil } func deleteToEvent(name, prefix string, ts int64, del *gnmi.Path, tags map[string]string) *EventMsg { e := &EventMsg{ Name: name, Timestamp: ts, Tags: make(map[string]string), Deletes: make([]string, 0, 1), } for k, v := range tags { e.Tags[k] = v } pathName, pTags := tagsFromGNMIPath(del) psb := stringBuilderPool.Get().(*strings.Builder) defer func() { psb.Reset() stringBuilderPool.Put(psb) }() psb.WriteString(strings.TrimRight(prefix, "/")) psb.WriteString("/") psb.WriteString(strings.TrimLeft(pathName, "/")) pathName = psb.String() for k, v := range pTags { if vv, ok := e.Tags[k]; ok { if v != vv { e.Tags[fmt.Sprintf("%s_%s", pathName, k)] = v } continue } e.Tags[k] = v } e.Deletes = append(e.Deletes, pathName) return e } // tagsFromGNMIPath returns a string representation of the gNMI path without keys, // as well as a map of the keys in the path. // the key map will also contain a target value if present in the gNMI path. func tagsFromGNMIPath(p *gnmi.Path) (string, map[string]string) { if p == nil { return "", nil } tags := make(map[string]string) sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() if p.Origin != "" { sb.WriteString(p.Origin) sb.WriteString(":") } for _, e := range p.GetElem() { if e.Name != "" { sb.WriteString("/") sb.WriteString(e.Name) } if e.Key != nil { ksb := stringBuilderPool.Get().(*strings.Builder) for k, v := range e.Key { if e.Name == "" { tags[k] = v continue } elems := strings.Split(e.Name, ":") ksb.WriteString(elems[len(elems)-1]) ksb.WriteString("_") ksb.WriteString(k) tags[ksb.String()] = v ksb.Reset() } stringBuilderPool.Put(ksb) } } if p.GetTarget() != "" { tags["target"] = p.GetTarget() } return sb.String(), tags } func normalizeEmptyRFC7951(v any) any { switch t := v.(type) { case nil: // presence for 'empty' return true case []any: // handle single null element if len(t) == 1 && t[0] == nil { return true } // recurse to catch nested cases for i := range t { t[i] = normalizeEmptyRFC7951(t[i]) } return t case map[string]any: // recurse to catch nested cases for k, vv := range t { t[k] = normalizeEmptyRFC7951(vv) } return t default: return v } } func getValueFlat(prefix string, updValue *gnmi.TypedValue) (map[string]interface{}, error) { if updValue == nil { return nil, nil } var jsondata []byte values := make(map[string]interface{}) switch updValue.Value.(type) { case *gnmi.TypedValue_AsciiVal: values[prefix] = updValue.GetAsciiVal() case *gnmi.TypedValue_BoolVal: values[prefix] = updValue.GetBoolVal() case *gnmi.TypedValue_BytesVal: values[prefix] = updValue.GetBytesVal() case *gnmi.TypedValue_DecimalVal: //lint:ignore SA1019 still need DecimalVal for backward compatibility v := updValue.GetDecimalVal() values[prefix] = float64(v.Digits) / math.Pow10(int(v.Precision)) case *gnmi.TypedValue_FloatVal: //lint:ignore SA1019 still need GetFloatVal for backward compatibility values[prefix] = updValue.GetFloatVal() case *gnmi.TypedValue_DoubleVal: values[prefix] = updValue.GetDoubleVal() case *gnmi.TypedValue_IntVal: values[prefix] = updValue.GetIntVal() case *gnmi.TypedValue_StringVal: values[prefix] = updValue.GetStringVal() case *gnmi.TypedValue_UintVal: values[prefix] = updValue.GetUintVal() case *gnmi.TypedValue_LeaflistVal: leafListVals := make([]interface{}, 0) for _, tv := range updValue.GetLeaflistVal().GetElement() { v, err := getValue(tv) if err != nil { return nil, err } leafListVals = append(leafListVals, v) } values[prefix] = leafListVals case *gnmi.TypedValue_ProtoBytes: values[prefix] = updValue.GetProtoBytes() case *gnmi.TypedValue_AnyVal: values[prefix] = updValue.GetAnyVal() case *gnmi.TypedValue_JsonIetfVal: jsondata = updValue.GetJsonIetfVal() case *gnmi.TypedValue_JsonVal: jsondata = updValue.GetJsonVal() } if len(jsondata) != 0 { var value interface{} err := json.Unmarshal(jsondata, &value) if err != nil { return nil, err } value = normalizeEmptyRFC7951(value) switch value := value.(type) { case map[string]interface{}: f := flattener.NewFlattener() f.SetPrefix(prefix) values, err = f.Flatten(value) default: values[prefix] = value } if err != nil { return nil, err } } return values, nil } func (e *EventMsg) ToMap() map[string]interface{} { if e == nil { return nil } m := make(map[string]interface{}) if e.Name != "" { m["name"] = e.Name } if e.Timestamp != 0 { m["timestamp"] = e.Timestamp } if len(e.Tags) > 0 { in := make(map[string]interface{}) for k, v := range e.Tags { in[k] = v } m["tags"] = in } if len(e.Values) > 0 { m["values"] = e.Values } if len(e.Deletes) > 0 { m["deletes"] = e.Deletes } return m } func EventFromMap(m map[string]interface{}) (*EventMsg, error) { if m == nil { return nil, nil } e := new(EventMsg) if v, ok := m["name"]; ok { switch v := v.(type) { case string: e.Name = v default: return nil, fmt.Errorf("could not convert map to event message, name it not a string") } } if v, ok := m["timestamp"]; ok { i := num64(v) if i == nil { return nil, fmt.Errorf("could not convert map to event message, timestamp is not an int64: %T", v) } switch i := i.(type) { case int64: e.Timestamp = i case uint64: e.Timestamp = int64(i) } } if v, ok := m["tags"]; ok { switch v := v.(type) { case map[string]string: e.Tags = v case map[string]interface{}: e.Tags = make(map[string]string) for k, v := range v { e.Tags[k], _ = v.(string) } default: return nil, fmt.Errorf("could not convert map to event message, tags are not a map[string]string") } } if v, ok := m["values"]; ok { switch v := v.(type) { case map[string]interface{}: e.Values = v case map[string]string: e.Values = make(map[string]interface{}) for k, v := range v { e.Values[k] = v } default: return nil, fmt.Errorf("could not convert map to event message, values are not a map[string]interface{}") } } if v, ok := m["deletes"]; ok { switch v := v.(type) { case []string: e.Deletes = v case []interface{}: for _, d := range v { if ds, ok := d.(string); ok { e.Deletes = append(e.Deletes, ds) } } default: return nil, fmt.Errorf("could not convert map to event message, name it not a string") } } return e, nil } func num64(n interface{}) interface{} { switch n := n.(type) { case int: return int64(n) case int8: return int64(n) case int16: return int64(n) case int32: return int64(n) case int64: return int64(n) case uint: return uint64(n) case uintptr: return uint64(n) case uint8: return uint64(n) case uint16: return uint64(n) case uint32: return uint64(n) case uint64: return uint64(n) case float64: return uint64(n) } return nil } func addMetaTags(e *EventMsg, meta map[string]string) { for k, v := range meta { if k == "format" { continue } if _, ok := e.Tags[k]; ok { e.Tags[fmt.Sprintf("meta_%s", k)] = v continue } e.Tags[k] = v } } ================================================ FILE: pkg/formatters/event_add_tag/event_add_tag.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_add_tag import ( "encoding/json" "io" "log" "os" "regexp" "strings" "github.com/itchyny/gojq" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-add-tag" loggingPrefix = "[" + processorType + "] " ) // addTag adds a set of tags to the event message if certain criteria's are met. type addTag struct { formatters.BaseProcessor Condition string `mapstructure:"condition,omitempty"` Tags []string `mapstructure:"tags,omitempty" json:"tags,omitempty"` Values []string `mapstructure:"values,omitempty" json:"values,omitempty"` TagNames []string `mapstructure:"tag-names,omitempty" json:"tag-names,omitempty"` ValueNames []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Deletes []string `mapstructure:"deletes,omitempty" json:"deletes,omitempty"` Overwrite bool `mapstructure:"overwrite,omitempty" json:"overwrite,omitempty"` Add map[string]string `mapstructure:"add,omitempty" json:"add,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` tags []*regexp.Regexp values []*regexp.Regexp tagNames []*regexp.Regexp valueNames []*regexp.Regexp deletes []*regexp.Regexp code *gojq.Code logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &addTag{ logger: log.New(io.Discard, "", 0), } }) } func (p *addTag) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } for _, opt := range opts { opt(p) } if p.Condition != "" { p.Condition = strings.TrimSpace(p.Condition) q, err := gojq.Parse(p.Condition) if err != nil { return err } p.code, err = gojq.Compile(q) if err != nil { return err } } // init tags regex p.tags, err = compileRegex(p.Tags) if err != nil { return err } // init tag names regex p.tagNames, err = compileRegex(p.TagNames) if err != nil { return err } // init values regex p.values, err = compileRegex(p.Values) if err != nil { return err } // init value names regex p.valueNames, err = compileRegex(p.ValueNames) if err != nil { return err } // init deletes regex p.deletes, err = compileRegex(p.Deletes) if err != nil { return err } if p.logger.Writer() != io.Discard { b, err := json.Marshal(p) if err != nil { p.logger.Printf("initialized processor '%s': %+v", processorType, p) return nil } p.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (p *addTag) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } // condition is set if p.code != nil && p.Condition != "" { ok, err := formatters.CheckCondition(p.code, e) if err != nil { p.logger.Printf("condition check failed: %v", err) } if ok { p.addTags(e) } continue } // no condition, check regexes for k, v := range e.Values { for _, re := range p.valueNames { if re.MatchString(k) { p.addTags(e) break } } for _, re := range p.values { if vs, ok := v.(string); ok { if re.MatchString(vs) { p.addTags(e) } break } } } for k, v := range e.Tags { for _, re := range p.tagNames { if re.MatchString(k) { p.addTags(e) break } } for _, re := range p.tags { if re.MatchString(v) { p.addTags(e) break } } } for _, k := range e.Deletes { for _, re := range p.deletes { if re.MatchString(k) { p.addTags(e) break } } } } return es } func (p *addTag) WithLogger(l *log.Logger) { if p.Debug && l != nil { p.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if p.Debug { p.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (p *addTag) addTags(e *formatters.EventMsg) { if e.Tags == nil { e.Tags = make(map[string]string) } for nk, nv := range p.Add { if p.Overwrite { e.Tags[nk] = nv continue } if _, ok := e.Tags[nk]; !ok { e.Tags[nk] = nv } } } func compileRegex(expr []string) ([]*regexp.Regexp, error) { res := make([]*regexp.Regexp, 0, len(expr)) for _, reg := range expr { re, err := regexp.Compile(reg) if err != nil { return nil, err } res = append(res, re) } return res, nil } ================================================ FILE: pkg/formatters/event_add_tag/event_add_tag_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_add_tag import ( "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "match_condition": { processorType: processorType, processor: map[string]interface{}{ "condition": `.values.value == 1`, "add": map[string]string{"tag1": "new_tag"}, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{ "tag1": "1", }, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{ "tag1": "new_tag", }, }, }, }, }, }, "match_condition_overwrite": { processorType: processorType, processor: map[string]interface{}{ "condition": `.values.value == 1`, "add": map[string]string{"tag1": "new_tag"}, "overwrite": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{ "tag1": "new_tag", }, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{ "tag1": "new_tag", }, }, }, }, }, }, // match value name "match_value_name_add": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"value"}, "add": map[string]string{"tag1": "new_tag"}, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, }, }, }, "match_value_name_overwrite": { processorType: processorType, processor: map[string]interface{}{ "debug": true, "value-names": []string{"value"}, "overwrite": true, "add": map[string]string{ "tag1": "new_tag", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"tag1": "1"}, }, { Values: map[string]interface{}{"value": 2}, Tags: map[string]string{"tag1": "2"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{"tag1": "1"}, }, { Values: map[string]interface{}{"value": 2}, Tags: map[string]string{"tag1": "new_tag"}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, { Values: map[string]interface{}{"value": 2}, Tags: map[string]string{"tag1": "2"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "new_tag"}, }, { Values: map[string]interface{}{"value": 2}, Tags: map[string]string{"tag1": "new_tag"}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, }, }, }, "match_value_name_add_many": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"value"}, "add": map[string]string{ "tag1": "new_tag", "tag2": "new_tag2", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{ "tag1": "1", }, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{ "tag1": "1", "tag2": "new_tag2", }, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, }, }, }, "match_value_name_add_many_overwrite": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"value"}, "overwrite": true, "add": map[string]string{ "tag1": "new_tag", "tag2": "new_tag2", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{ "tag1": "1", }, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{ "tag1": "new_tag", "tag2": "new_tag2", }, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, }, }, }, // match value "match_value_add": { processorType: processorType, processor: map[string]interface{}{ "values": []string{"value"}, "add": map[string]string{"tag1": "new_tag"}, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"v": "value"}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"v": "value"}, Tags: map[string]string{"tag1": "1"}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, }, }, }, "match_value_overwrite": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"value"}, "overwrite": true, "add": map[string]string{ "tag1": "new_tag", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": "value"}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": "value"}, Tags: map[string]string{"tag1": "new_tag"}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, }, }, }, "match_value_add_many": { processorType: processorType, processor: map[string]interface{}{ "values": []string{"value"}, "add": map[string]string{ "tag1": "new_tag", "tag2": "new_tag2", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": "value"}, Tags: map[string]string{ "tag1": "1", }, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": "value"}, Tags: map[string]string{ "tag1": "1", "tag2": "new_tag2", }, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, }, }, }, "match_value_add_many_overwrite": { processorType: processorType, processor: map[string]interface{}{ "values": []string{"value"}, "overwrite": true, "add": map[string]string{ "tag1": "new_tag", "tag2": "new_tag2", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": "value"}, Tags: map[string]string{ "tag1": "1", }, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": "value"}, Tags: map[string]string{ "tag1": "new_tag", "tag2": "new_tag2", }, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, }, }, }, // match tag name "match_tag_name_add": { processorType: processorType, processor: map[string]interface{}{ "tag-names": []string{"."}, "add": map[string]string{"tag1": "new_tag"}, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{"tag1": "1"}, }, }, }, }, }, "match_tag_name_overwrite": { processorType: processorType, processor: map[string]interface{}{ "tag-names": []string{"."}, "overwrite": true, "add": map[string]string{ "tag1": "new_tag", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{"tag1": "new_tag"}, }, }, }, }, }, "match_tag_name_add_many": { processorType: processorType, processor: map[string]interface{}{ "tag-names": []string{"."}, "add": map[string]string{ "tag1": "new_tag", "tag2": "new_tag2", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "tag1": "1", }, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "tag1": "1", "tag2": "new_tag2", }, }, }, }, }, }, "match_tag_name_add_many_overwrite": { processorType: processorType, processor: map[string]interface{}{ "tag-names": []string{"."}, "overwrite": true, "add": map[string]string{ "tag1": "new_tag", "tag2": "new_tag2", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "tag1": "1", }, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "tag1": "new_tag", "tag2": "new_tag2", }, }, }, }, }, }, // match tag "match_tag_add": { processorType: processorType, processor: map[string]interface{}{ "tags": []string{"tag_value"}, "add": map[string]string{"tag1": "new_tag"}, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"old_tag": "tag_value"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "old_tag": "tag_value", "tag1": "new_tag", }, }, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "old_tag": "tag_value", "tag1": "old_value", }, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "old_tag": "tag_value", "tag1": "old_value", }, }, }, }, }, }, "match_tag_overwrite": { processorType: processorType, processor: map[string]interface{}{ "tags": []string{"tag_value"}, "overwrite": true, "add": map[string]string{ "tag1": "new_tag", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"tag1": "tag_value"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{"tag1": "new_tag"}, }, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "old_tag": "tag_value", "tag1": "old_value", }, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "old_tag": "tag_value", "tag1": "new_tag", }, }, }, }, }, }, "match_tag_add_many": { processorType: processorType, processor: map[string]interface{}{ "tags": []string{"1"}, "add": map[string]string{ "tag1": "new_tag", "tag2": "new_tag2", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "tag1": "1", }, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "tag1": "1", "tag2": "new_tag2", }, }, }, }, }, }, "match_tag_add_many_overwrite": { processorType: processorType, processor: map[string]interface{}{ "tag-names": []string{"1"}, "overwrite": true, "add": map[string]string{ "tag1": "new_tag", "tag2": "new_tag2", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "tag1": "1", }, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "tag1": "new_tag", "tag2": "new_tag2", }, }, }, }, }, }, // match delete "match_delete_add": { processorType: processorType, processor: map[string]interface{}{ "deletes": []string{"^deleted_path.*"}, "add": map[string]string{"tag1": "new_tag"}, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"old_tag": "tag_value"}, Deletes: []string{"deleted_path"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "old_tag": "tag_value", "tag1": "new_tag", }, Deletes: []string{"deleted_path"}, }, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "old_tag": "tag_value", "tag1": "old_value", }, Deletes: []string{"non_matching_deleted_path"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "old_tag": "tag_value", "tag1": "old_value", }, Deletes: []string{"non_matching_deleted_path"}, }, }, }, }, }, } func TestEventAddTag(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at %s item %d, index %d, expected: %+v", name, i, j, item.output[j]) t.Logf("failed at %s item %d, index %d, got: %+v", name, i, j, outs[j]) t.Fail() } } }) } } else { t.Errorf("event processor %s not found", ts.processorType) } } } ================================================ FILE: pkg/formatters/event_allow/event_allow.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_allow import ( "encoding/json" "io" "log" "os" "regexp" "strings" "github.com/itchyny/gojq" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-allow" loggingPrefix = "[" + processorType + "] " ) // allow Allows the msg if ANY of the Tags or Values regexes are matched type allow struct { formatters.BaseProcessor Condition string `mapstructure:"condition,omitempty"` TagNames []string `mapstructure:"tag-names,omitempty" json:"tag-names,omitempty"` ValueNames []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Tags []string `mapstructure:"tags,omitempty" json:"tags,omitempty"` Values []string `mapstructure:"values,omitempty" json:"values,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` tagNames []*regexp.Regexp valueNames []*regexp.Regexp tags []*regexp.Regexp values []*regexp.Regexp code *gojq.Code logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &allow{ logger: log.New(io.Discard, "", 0), } }) } func (d *allow) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, d) if err != nil { return err } for _, opt := range opts { opt(d) } d.Condition = strings.TrimSpace(d.Condition) q, err := gojq.Parse(d.Condition) if err != nil { return err } d.code, err = gojq.Compile(q) if err != nil { return err } // init tag keys regex d.tagNames = make([]*regexp.Regexp, 0, len(d.TagNames)) for _, reg := range d.TagNames { re, err := regexp.Compile(reg) if err != nil { return err } d.tagNames = append(d.tagNames, re) } d.tags = make([]*regexp.Regexp, 0, len(d.Tags)) for _, reg := range d.Tags { re, err := regexp.Compile(reg) if err != nil { return err } d.tags = append(d.tags, re) } // d.valueNames = make([]*regexp.Regexp, 0, len(d.ValueNames)) for _, reg := range d.ValueNames { re, err := regexp.Compile(reg) if err != nil { return err } d.valueNames = append(d.valueNames, re) } d.values = make([]*regexp.Regexp, 0, len(d.values)) for _, reg := range d.Values { re, err := regexp.Compile(reg) if err != nil { return err } d.values = append(d.values, re) } if d.logger.Writer() != io.Discard { b, err := json.Marshal(d) if err != nil { d.logger.Printf("initialized processor '%s': %+v", processorType, d) return nil } d.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (d *allow) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { i := 0 for _, e := range es { if d.allow(e) { es[i] = e i++ } } for j := i; j < len(es); j++ { es[j] = nil } es = es[:i] return es } func (d *allow) WithLogger(l *log.Logger) { if d.Debug && l != nil { d.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if d.Debug { d.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (d *allow) allow(e *formatters.EventMsg) bool { if d.Condition != "" { ok, err := formatters.CheckCondition(d.code, e) if err != nil { d.logger.Printf("condition check failed: %v", err) return false } return ok } for k, v := range e.Values { for _, re := range d.valueNames { if re.MatchString(k) { d.logger.Printf("value name '%s' matched regex '%s'", k, re.String()) return true } } for _, re := range d.values { if vs, ok := v.(string); ok { if re.MatchString(vs) { d.logger.Printf("value '%s' matched regex '%s'", v, re.String()) return true } } } } for k, v := range e.Tags { for _, re := range d.tagNames { if re.MatchString(k) { d.logger.Printf("tag name '%s' matched regex '%s'", k, re.String()) return true } } for _, re := range d.tags { if re.MatchString(v) { d.logger.Printf("tag '%s' matched regex '%s'", v, re.String()) return true } } } return false } ================================================ FILE: pkg/formatters/event_allow/event_allow_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_allow import ( "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "allow_condition": { processorType: processorType, processor: map[string]interface{}{ "condition": ".values.value == 1", }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ {}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, }, }, }, }, }, "allow_value_names": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"^number$"}, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": 1}, }, { Values: map[string]interface{}{"not-number": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": 1}, }, //{}, }, }, }, }, "allow_tag_names": { processorType: processorType, processor: map[string]interface{}{ "tag-names": []string{"^name*"}, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{}}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{}}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"name": "dummy"}, }, { Tags: map[string]string{"not-name": "dummy"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{"name": "dummy"}, }, //{}, }, }, }, }, "allow_tag_values": { processorType: processorType, processor: map[string]interface{}{ "tags": []string{"router1"}, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{}, }, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"name": "router1"}, }, { Tags: map[string]string{"not-name": "dummy"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{"name": "router1"}, }, //{}, }, }, }, }, "allow_multiple_value_names": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{ "^number$", "^name$", }, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": 1}, }, { Values: map[string]interface{}{"not-number": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": 1}, }, //{}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": "123"}, }, { Values: map[string]interface{}{"not-name": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": "123"}, }, //{}, }, }, }, }, "allow_multiple_tag_names": { processorType: processorType, processor: map[string]interface{}{ "tag-names": []string{ "^id$", "^name$", }, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"name": "dummy"}, }, { Tags: map[string]string{"not-name": "dummy"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{"name": "dummy"}, }, //{}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"name": "dummy"}, }, { Tags: map[string]string{"id": "dummy"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{"name": "dummy"}, }, { Tags: map[string]string{"id": "dummy"}, }, //{}, }, }, }, }, } func TestEventAllow(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { p := pi() err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at event allow, item %d, index %d", i, j) t.Logf("expected: %#v", item.output[j]) t.Logf(" got: %#v", outs[j]) t.Fail() } } }) } } } } ================================================ FILE: pkg/formatters/event_combine/event_combine.go ================================================ // © 2023 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_combine_test import ( "encoding/json" "fmt" "io" "log" "os" "sort" "strings" "github.com/itchyny/gojq" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-combine" loggingPrefix = "[" + processorType + "] " ) // combine allows running multiple processors together based on conditions type combine struct { formatters.BaseProcessor Processors []*procseq `mapstructure:"processors,omitempty"` Debug bool `mapstructure:"debug,omitempty"` processorsDefinitions map[string]map[string]any targetsConfigs map[string]*types.TargetConfig actionsDefinitions map[string]map[string]any logger *log.Logger } type procseq struct { Condition string `mapstructure:"condition,omitempty"` Name string `mapstructure:"name,omitempty"` condition *gojq.Code proc formatters.EventProcessor } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &combine{ logger: log.New(io.Discard, "", 0), } }) } func (p *combine) Init(cfg any, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } for _, opt := range opts { opt(p) } if len(p.Processors) == 0 { return fmt.Errorf("missing processors definition") } for i, proc := range p.Processors { if proc == nil { return fmt.Errorf("missing processor(#%d) definition", i) } if proc.Name == "" { return fmt.Errorf("invalid processor(#%d) definition: missing name", i) } // init condition if it's set if proc.Condition != "" { proc.Condition = strings.TrimSpace(proc.Condition) q, err := gojq.Parse(proc.Condition) if err != nil { return err } proc.condition, err = gojq.Compile(q) if err != nil { return err } } // init subprocessors if epCfg, ok := p.processorsDefinitions[proc.Name]; ok { epType := "" for k := range epCfg { epType = k break } if in, ok := formatters.EventProcessors[epType]; ok { proc.proc = in() err := proc.proc.Init(epCfg[epType], formatters.WithLogger(p.logger), formatters.WithTargets(p.targetsConfigs), formatters.WithActions(p.actionsDefinitions), formatters.WithProcessors(p.processorsDefinitions), ) if err != nil { return fmt.Errorf("failed initializing event processor '%s' of type='%s': %v", proc.Name, epType, err) } p.logger.Printf("added event processor '%s' of type=%s to combine processor", proc.Name, epType) continue } return fmt.Errorf("%q event processor has an unknown type=%q", proc.Name, epType) } return fmt.Errorf("%q event processor not found", proc.Name) } if p.logger.Writer() != io.Discard { b, err := json.Marshal(p) if err != nil { p.logger.Printf("initialized processor '%s': %+v", processorType, p) return nil } p.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (p *combine) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { les := len(es) in := make([]*formatters.EventMsg, 0, les) out := make([]*formatters.EventMsg, 0, les) for _, proc := range p.Processors { in = in[:0] out = out[:0] for i, e := range es { ok, err := formatters.CheckCondition(proc.condition, e) if err != nil { p.logger.Printf("condition check failed: %v", err) } if ok { if p.Debug { p.logger.Printf("processor #%d include: %s", i, e) } in = append(in, e) continue } if p.Debug { p.logger.Printf("processor #%d exclude: %s", i, e) } out = append(out, e) } in = proc.proc.Apply(in...) es = es[:0] es = append(es, in...) es = append(es, out...) if len(es) > 1 { sort.Slice(es, func(i, j int) bool { return es[i].Timestamp < es[j].Timestamp }) } } return es } func (s *combine) WithLogger(l *log.Logger) { if s.Debug && l != nil { s.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if s.Debug { s.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (s *combine) WithTargets(tcs map[string]*types.TargetConfig) { s.targetsConfigs = tcs } func (s *combine) WithActions(act map[string]map[string]any) { s.actionsDefinitions = act } func (s *combine) WithProcessors(procs map[string]map[string]any) { s.processorsDefinitions = procs } ================================================ FILE: pkg/formatters/event_combine/event_combine_test/event_combine_test.go ================================================ // © 2023 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_sequence import ( "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" _ "github.com/openconfig/gnmic/pkg/formatters/all" ) func Test_combine_Apply(t *testing.T) { type fields struct { processorConfig map[string]any processorsSet map[string]map[string]any } type args struct { es []*formatters.EventMsg } tests := []struct { name string fields fields args args want []*formatters.EventMsg }{ { name: "simple1", fields: fields{ processorConfig: map[string]any{ "debug": true, "processors": []any{ map[string]any{ "condition": ".tags.tag == \"t1\"", "name": "proc1", }, map[string]any{ "name": "proc2", }, }, }, processorsSet: map[string]map[string]any{ "proc1": { "event-strings": map[string]any{ "value-names": []string{"^number$"}, "transforms": []map[string]any{ { "replace": map[string]any{ "apply-on": "name", "old": "number", "new": "new_number", }, }, }, "debug": true, }, }, "proc2": { "event-strings": map[string]any{ "tag-names": []string{"^tag$"}, "transforms": []map[string]any{ { "replace": map[string]any{ "apply-on": "name", "old": "tag", "new": "new_tag", }, }, }, "debug": true, }, }, }, }, args: args{ es: []*formatters.EventMsg{ { Tags: map[string]string{"tag": "t1"}, Values: map[string]interface{}{"number": "42"}, }, { Tags: map[string]string{"t": "t1"}, Values: map[string]interface{}{"n": "42"}, }, }, }, want: []*formatters.EventMsg{ { Tags: map[string]string{"new_tag": "t1"}, Values: map[string]interface{}{"new_number": "42"}, }, { Tags: map[string]string{"t": "t1"}, Values: map[string]interface{}{"n": "42"}, }, }, }, { name: "simple2", fields: fields{ processorConfig: map[string]any{ "debug": true, "processors": []any{ map[string]any{ "condition": ".tags.tag == \"t2\"", "name": "proc1", }, map[string]any{ "name": "proc2", }, }, }, processorsSet: map[string]map[string]any{ "proc1": { "event-strings": map[string]any{ "value-names": []string{"^number$"}, "transforms": []map[string]any{ { "replace": map[string]any{ "apply-on": "name", "old": "number", "new": "new_number", }, }, }, "debug": true, }, }, "proc2": { "event-strings": map[string]any{ "tag-names": []string{"^tag$"}, "transforms": []map[string]any{ { "replace": map[string]any{ "apply-on": "name", "old": "tag", "new": "new_tag", }, }, }, "debug": true, }, }, }, }, args: args{ es: []*formatters.EventMsg{ { Tags: map[string]string{"tag": "t1"}, Values: map[string]interface{}{"number": "42"}, }, { Tags: map[string]string{"t": "t1"}, Values: map[string]interface{}{"n": "42"}, }, }, }, want: []*formatters.EventMsg{ { Tags: map[string]string{"new_tag": "t1"}, Values: map[string]interface{}{"number": "42"}, }, { Tags: map[string]string{"t": "t1"}, Values: map[string]interface{}{"n": "42"}, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { in := formatters.EventProcessors["event-combine"] p := in() err := p.Init(tt.fields.processorConfig, formatters.WithProcessors(tt.fields.processorsSet)) if err != nil { t.Logf("%s failed to init the processor: %v", tt.name, err) t.Fail() } if got := p.Apply(tt.args.es...); !reflect.DeepEqual(got, tt.want) { t.Errorf("combine.Apply() = %v, want %v", got, tt.want) } }) } } ================================================ FILE: pkg/formatters/event_convert/event_convert.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_convert import ( "encoding/binary" "encoding/json" "fmt" "io" "log" "math" "os" "regexp" "strconv" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-convert" loggingPrefix = "[" + processorType + "] " ) // convert converts the value with key matching one of regexes, to the specified Type type convert struct { formatters.BaseProcessor Values []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Type string `mapstructure:"type,omitempty" json:"type,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` values []*regexp.Regexp logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &convert{ logger: log.New(io.Discard, "", 0), } }) } func (c *convert) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, c) if err != nil { return err } for _, opt := range opts { opt(c) } c.values = make([]*regexp.Regexp, 0, len(c.Values)) for _, reg := range c.Values { re, err := regexp.Compile(reg) if err != nil { return err } c.values = append(c.values, re) } if c.logger.Writer() != io.Discard { b, err := json.Marshal(c) if err != nil { c.logger.Printf("initialized processor '%s': %+v", processorType, c) return nil } c.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (c *convert) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } for k, v := range e.Values { for _, re := range c.values { if re.MatchString(k) { c.logger.Printf("key '%s' matched regex '%s'", k, re.String()) switch c.Type { case "int": iv, err := convertToInt(v) if err != nil { c.logger.Printf("convert error: %v", err) break } c.logger.Printf("key '%s', value %v converted to %s: %d", k, v, c.Type, iv) e.Values[k] = iv case "uint": iv, err := convertToUint(v) if err != nil { c.logger.Printf("convert error: %v", err) break } c.logger.Printf("key '%s', value %v converted to %s: %d", k, v, c.Type, iv) e.Values[k] = iv case "string": iv, err := convertToString(v) if err != nil { c.logger.Printf("convert error: %v", err) break } c.logger.Printf("key '%s', value %v converted to %s: %s", k, v, c.Type, iv) e.Values[k] = iv case "float": iv, err := convertToFloat(v) if err != nil { c.logger.Printf("convert error: %v", err) break } c.logger.Printf("key '%s', value %v converted to %s: %f", k, v, c.Type, iv) e.Values[k] = iv } break } } } } return es } func (c *convert) WithLogger(l *log.Logger) { if c.Debug && l != nil { c.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if c.Debug { c.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func convertToInt(i interface{}) (int, error) { switch i := i.(type) { case string: iv, err := strconv.Atoi(i) if err != nil { return 0, err } return iv, nil case int: return i, nil case int8: return int(i), nil case int16: return int(i), nil case int32: return int(i), nil case int64: return int(i), nil case uint: return int(i), nil case uint8: return int(i), nil case uint16: return int(i), nil case uint32: return int(i), nil case uint64: return int(i), nil case float64: return int(i), nil case float32: return int(i), nil default: return 0, fmt.Errorf("cannot convert %v to int, type %T", i, i) } } func convertToUint(i interface{}) (uint, error) { switch i := i.(type) { case string: iv, err := strconv.Atoi(i) if err != nil { return 0, err } return uint(iv), nil case int: if i < 0 { return 0, nil } return uint(i), nil case int8: if i < 0 { return 0, nil } return uint(i), nil case int16: if i < 0 { return 0, nil } return uint(i), nil case int32: if i < 0 { return 0, nil } return uint(i), nil case int64: if i < 0 { return 0, nil } return uint(i), nil case uint: return i, nil case uint8: return uint(i), nil case uint16: return uint(i), nil case uint32: return uint(i), nil case uint64: return uint(i), nil case float32: if i < 0 { return 0, nil } return uint(i), nil case float64: if i < 0 { return 0, nil } return uint(i), nil default: return 0, fmt.Errorf("cannot convert %v to uint, type %T", i, i) } } func convertToFloat(i interface{}) (float64, error) { switch i := i.(type) { case []uint8: if len(i) == 4 { return float64(math.Float32frombits(binary.BigEndian.Uint32([]byte(i)))), nil } else if len(i) == 8 { return float64(math.Float64frombits(binary.BigEndian.Uint64([]byte(i)))), nil } else { return 0, nil } case string: iv, err := strconv.ParseFloat(i, 64) if err != nil { return 0, err } return iv, nil case int: return float64(i), nil case int8: return float64(i), nil case int16: return float64(i), nil case int32: return float64(i), nil case int64: return float64(i), nil case uint: return float64(i), nil case uint8: return float64(i), nil case uint16: return float64(i), nil case uint32: return float64(i), nil case uint64: return float64(i), nil case float64: return i, nil default: return 0, fmt.Errorf("cannot convert %v to float64, type %T", i, i) } } func convertToString(i interface{}) (string, error) { switch i := i.(type) { case string: return i, nil case int: return strconv.Itoa(i), nil case int8: return strconv.Itoa(int(i)), nil case int16: return strconv.Itoa(int(i)), nil case int32: return strconv.Itoa(int(i)), nil case int64: return strconv.Itoa(int(i)), nil case uint: return strconv.FormatUint(uint64(i), 10), nil case uint8: return strconv.FormatUint(uint64(i), 10), nil case uint16: return strconv.FormatUint(uint64(i), 10), nil case uint32: return strconv.FormatUint(uint64(i), 10), nil case uint64: return strconv.FormatUint(uint64(i), 10), nil case float64: return strconv.FormatFloat(i, 'f', -1, 64), nil case bool: return strconv.FormatBool(i), nil default: return "", fmt.Errorf("cannot convert %v to string, type %T", i, i) } } ================================================ FILE: pkg/formatters/event_convert/event_convert_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_convert import ( "reflect" "testing" "github.com/google/go-cmp/cmp" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "string_convert": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{ "^convert-me$", "^number*", }, "debug": true, "type": "string", }, tests: []item{ // nil msg { input: nil, output: nil, }, // empty msg { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, // non matching values { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, }, // matching values and tags { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"convert-me": 100}, Tags: map[string]string{"convert-me": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"convert-me": "100"}, Tags: map[string]string{"convert-me": "name_tag"}, }, }, }, // 2 msgs, with matching values { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"convert-me": 100}, Tags: map[string]string{"convert-me": "name_tag"}, }, { Values: map[string]interface{}{"convert-me": 200}, Tags: map[string]string{"convert-me": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"convert-me": "100"}, Tags: map[string]string{"convert-me": "name_tag"}, }, { Values: map[string]interface{}{"convert-me": "200"}, Tags: map[string]string{"convert-me": "name_tag"}, }, }, }, // 2 msgs, second with matching values { input: []*formatters.EventMsg{ { Tags: map[string]string{"convert-me": "name_tag"}, }, { Values: map[string]interface{}{"convert-me": 200}, Tags: map[string]string{"number": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{"convert-me": "name_tag"}, }, { Values: map[string]interface{}{"convert-me": "200"}, Tags: map[string]string{"number": "name_tag"}, }, }, }, // matching value, already a string { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"convert-me": "1"}, Tags: map[string]string{"number": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"convert-me": "1"}, Tags: map[string]string{"number": "name_tag"}, }, }, }, // matching value, uint { input: []*formatters.EventMsg{ { Values: map[string]interface{}{ "number1": uint8(100), "number2": uint16(100), "number3": uint32(100), "number4": uint64(100), }, Tags: map[string]string{"number": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{ "number1": "100", "number2": "100", "number3": "100", "number4": "100", }, Tags: map[string]string{"number": "name_tag"}, }, }, }, // matching value, float64 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": float64(100.1)}, Tags: map[string]string{"number": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": "100.1"}, Tags: map[string]string{"number": "name_tag"}, }, }, }, // matching value, bool { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": true}, Tags: map[string]string{"number": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": "true"}, Tags: map[string]string{"number": "name_tag"}, }, }, }, }, }, "int_convert": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"^number*"}, "type": "int", }, tests: []item{ // nil msg { input: nil, output: nil, }, // empty msg { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, // non matching values { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, }, // matching values and tags { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": "100"}, Tags: map[string]string{"number": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": int(100)}, Tags: map[string]string{"number": "name_tag"}, }, }, }, // 2 msgs, with matching values { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": "100"}, Tags: map[string]string{"number": "name_tag"}, }, { Values: map[string]interface{}{"number": "200"}, Tags: map[string]string{"number": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": int(100)}, Tags: map[string]string{"number": "name_tag"}, }, { Values: map[string]interface{}{"number": int(200)}, Tags: map[string]string{"number": "name_tag"}, }, }, }, // 2 msgs, second with matching values { input: []*formatters.EventMsg{ { Tags: map[string]string{"number": "name_tag"}, }, { Values: map[string]interface{}{"number": "200"}, Tags: map[string]string{"number": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{"number": "name_tag"}, }, { Values: map[string]interface{}{"number": int(200)}, Tags: map[string]string{"number": "name_tag"}, }, }, }, // matching value, already an int { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": int(100)}, Tags: map[string]string{"number": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": int(100)}, Tags: map[string]string{"number": "name_tag"}, }, }, }, // matching value, uint { input: []*formatters.EventMsg{ { Values: map[string]interface{}{ "number1": uint8(100), "number2": uint16(100), "number3": uint32(100), "number4": uint64(100), }, Tags: map[string]string{"number": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{ "number1": int(100), "number2": int(100), "number3": int(100), "number4": int(100), }, Tags: map[string]string{"number": "name_tag"}, }, }, }, // matching value, float64 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": float64(100)}, Tags: map[string]string{"number": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": int(100)}, Tags: map[string]string{"number": "name_tag"}, }, }, }, // matching value, bool { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": true}, Tags: map[string]string{"number": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": true}, Tags: map[string]string{"number": "name_tag"}, }, }, }, }, }, "uint_convert": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"^name.*"}, "type": "uint", }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{{Values: map[string]interface{}{}}}, output: []*formatters.EventMsg{{Values: map[string]interface{}{}}}, }, { input: []*formatters.EventMsg{{ Values: map[string]interface{}{"name_value_bytes": "42"}}}, output: []*formatters.EventMsg{{ Values: map[string]interface{}{"name_value_bytes": uint(42)}}}, }, { input: []*formatters.EventMsg{{ Values: map[string]interface{}{"name_value_bytes": uint(42)}}}, output: []*formatters.EventMsg{{ Values: map[string]interface{}{"name_value_bytes": uint(42)}}}, }, { input: []*formatters.EventMsg{{ Values: map[string]interface{}{"name_value_bytes": -42}}}, output: []*formatters.EventMsg{{ Values: map[string]interface{}{"name_value_bytes": uint(0)}}}, }, { input: []*formatters.EventMsg{{ Values: map[string]interface{}{"name_value_bytes": true}}}, output: []*formatters.EventMsg{{ Values: map[string]interface{}{"name_value_bytes": true}}}, }, { input: []*formatters.EventMsg{{ Values: map[string]interface{}{ "name_value_bytes1": int8(74), "name_value_bytes2": int16(75), "name_value_bytes3": int32(76), "name_value_bytes4": int64(77), }}}, output: []*formatters.EventMsg{{ Values: map[string]interface{}{ "name_value_bytes1": uint(74), "name_value_bytes2": uint(75), "name_value_bytes3": uint(76), "name_value_bytes4": uint(77), }}}, }, }, }, "float_convert": { processorType: processorType, processor: map[string]interface{}{"value-names": []string{"^number*"}, "type": "float"}, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": []uint8{62, 192, 0, 0}}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": float64(0.375)}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": []uint8{64, 9, 33, 251, 84, 68, 45, 24}}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": float64(3.141592653589793)}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": []uint8{64, 9, 33, 251, 84, 68, 45, 24, 32}}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": float64(0)}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": []uint8{62, 192, 0, 0}}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": float64(0.375)}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": "1.1"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": float64(1.1)}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": uint(42)}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": float64(42)}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": int(42)}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": float64(42)}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": true}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": true}, }, }, }, }, }, } func TestEventConvertToUint(t *testing.T) { ts := testset["uint_convert"] if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor, formatters.WithLogger(nil)) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) for i, item := range ts.tests { t.Run("uint_convert", func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at uint_convert item %d, index %d", i, j) t.Logf("expected: %#v", item.output[j]) t.Logf(" got: %#v", outs[j]) t.Fail() } } }) } } } func TestEventConvertToInt(t *testing.T) { ts := testset["int_convert"] if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor, formatters.WithLogger(nil)) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } for i, item := range ts.tests { t.Run("int_convert", func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at int_convert item %d, index %d", i, j) t.Logf("expected: %#v", item.output[j]) t.Logf(" got: %#v", outs[j]) t.Fail() } } }) } } } func TestEventConvertToString(t *testing.T) { ts := testset["string_convert"] if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor, formatters.WithLogger(nil)) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } for i, item := range ts.tests { t.Run("string_convert", func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !cmp.Equal(outs[j], item.output[j]) { t.Logf("failed at string_convert item %d, index %d", i, j) t.Logf("expected: %#v", item.output[j]) t.Logf(" got: %#v", outs[j]) t.Fail() } } }) } } } func TestEventConvertToFloat(t *testing.T) { ts := testset["float_convert"] if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor, formatters.WithLogger(nil)) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } for i, item := range ts.tests { t.Run("float_convert", func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at float_convert item %d, index %d", i, j) t.Logf("expected: %#v", item.output[j]) t.Logf(" got: %#v", outs[j]) t.Fail() } } }) } } } ================================================ FILE: pkg/formatters/event_data_convert/event_data_convert.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_data_convert import ( "encoding/json" "errors" "fmt" "io" "log" "os" "regexp" "strconv" "strings" units "github.com/bcicen/go-units" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-data-convert" loggingPrefix = "[" + processorType + "] " ) var stringUnitRegex = regexp.MustCompile(`([+-]?([0-9]*[.])?[0-9]+)\s?(\S+)`) // dataConvert converts the value with key matching one of regexes, to the specified data unit type dataConvert struct { formatters.BaseProcessor Values []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` From string `mapstructure:"from,omitempty" json:"from,omitempty"` To string `mapstructure:"to,omitempty" json:"to,omitempty"` Keep bool `mapstructure:"keep,omitempty" json:"keep,omitempty"` Old string `mapstructure:"old,omitempty" json:"old,omitempty"` New string `mapstructure:"new,omitempty" json:"new,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` values []*regexp.Regexp renameRegex *regexp.Regexp logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &dataConvert{ logger: log.New(io.Discard, "", 0), } }) } func (c *dataConvert) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, c) if err != nil { return err } for _, opt := range opts { opt(c) } c.values = make([]*regexp.Regexp, 0, len(c.Values)) for _, reg := range c.Values { re, err := regexp.Compile(reg) if err != nil { return err } c.values = append(c.values, re) } if c.Old != "" { c.renameRegex, err = regexp.Compile(c.Old) if err != nil { return err } } if c.logger.Writer() != io.Discard { b, err := json.Marshal(c) if err != nil { c.logger.Printf("initialized processor '%s': %+v", processorType, c) return nil } c.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (c *dataConvert) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } // add new Values to a new map to avoid multiple chained regex matches newValues := make(map[string]interface{}) for k, v := range e.Values { for _, re := range c.values { if re.MatchString(k) { c.logger.Printf("key '%s' matched regex '%s'", k, re.String()) iv, err := c.convertData(k, v, nil) if err != nil { c.logger.Printf("data convert error: %v", err) break } c.logger.Printf("key '%s', value %v converted to %s: %f", k, v, c.To, iv) if c.renameRegex != nil { newValues[c.getNewName(k)] = iv if !c.Keep { delete(e.Values, k) } break } if c.Keep { newValues[fmt.Sprintf("%s_%s", k, c.To)] = iv break } newValues[k] = iv break } } } // add new values to the original message for k, v := range newValues { e.Values[k] = v } } return es } func (c *dataConvert) WithLogger(l *log.Logger) { if c.Debug && l != nil { c.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if c.Debug { c.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (c *dataConvert) convertData(k string, i interface{}, from *units.Unit) (float64, error) { if from == nil && c.From == "" { from = unitFromName(k) } if from == nil { fr := sToU(c.From) from = &fr } switch i := i.(type) { case string: iv, err := strconv.Atoi(i) if err != nil { v, unit, err := parseStringUnit(i) if err != nil { return 0, err } return c.convertData(k, v, &unit) } return c.convertData(k, iv, nil) case int: cv, err := units.ConvertFloat(float64(i), *from, sToU(c.To)) if err != nil { return 0, err } return cv.Float(), nil case int8: cv, err := units.ConvertFloat(float64(i), *from, sToU(c.To)) if err != nil { return 0, err } return cv.Float(), nil case int16: cv, err := units.ConvertFloat(float64(i), *from, sToU(c.To)) if err != nil { return 0, err } return cv.Float(), nil case int32: cv, err := units.ConvertFloat(float64(i), *from, sToU(c.To)) if err != nil { return 0, err } return cv.Float(), nil case int64: if from == nil { *from = sToU(c.From) } cv, err := units.ConvertFloat(float64(i), *from, sToU(c.To)) if err != nil { return 0, err } return cv.Float(), nil case uint: cv, err := units.ConvertFloat(float64(i), *from, sToU(c.To)) if err != nil { return 0, err } return cv.Float(), nil case uint8: cv, err := units.ConvertFloat(float64(i), *from, sToU(c.To)) if err != nil { return 0, err } return cv.Float(), nil case uint16: cv, err := units.ConvertFloat(float64(i), *from, sToU(c.To)) if err != nil { return 0, err } return cv.Float(), nil case uint32: cv, err := units.ConvertFloat(float64(i), *from, sToU(c.To)) if err != nil { return 0, err } return cv.Float(), nil case uint64: cv, err := units.ConvertFloat(float64(i), *from, sToU(c.To)) if err != nil { return 0, err } return cv.Float(), nil case float64: cv, err := units.ConvertFloat(i, *from, sToU(c.To)) if err != nil { return 0, err } return cv.Float(), nil case float32: cv, err := units.ConvertFloat(float64(i), *from, sToU(c.To)) if err != nil { return 0, err } return cv.Float(), nil default: return 0, fmt.Errorf("cannot convert %v, type %T", i, i) } } func sToU(s string) units.Unit { switch s { case "b": return units.Bit case "kb": return units.KiloBit case "mb": return units.MegaBit case "gb": return units.GigaBit case "tb": return units.TeraBit case "eb": return units.ExaBit // case "B": return units.Byte case "KB": return units.KiloByte case "MB": return units.MegaByte case "GB": return units.GigaByte case "TB": return units.TeraByte case "EB": return units.ExaByte case "ZB": return units.ZettaByte case "YB": return units.YottaByte // case "KiB": return units.Kibibyte case "MiB": return units.Mebibyte case "GiB": return units.Gibibyte case "TiB": return units.Tebibyte case "EiB": return units.Exbibyte case "ZiB": return units.Zebibyte case "YiB": return units.Yobibyte // default: return units.Byte } } func parseStringUnit(s string) (float64, units.Unit, error) { // derive unit from string groups := stringUnitRegex.FindAllSubmatch([]byte(s), -1) if len(groups) == 0 { return 0, units.Byte, errors.New("failed to parse string submatches") } if len(groups[0]) != 4 { return 0, units.Byte, errors.New("failed to parse string, unexpected number of groups") } // check if the first match is equal to the original value if string(groups[0][0]) != s { return 0, units.Byte, errors.New("failed to parse string, partial match") } f, err := strconv.ParseFloat(string(groups[0][1]), 64) if err != nil { return 0, units.Unit{}, err } return f, sToU(string(groups[0][3])), nil } func unitFromName(k string) *units.Unit { switch { case strings.HasSuffix(k, "_octets"), strings.HasSuffix(k, "_bytes"), strings.HasSuffix(k, "-octets"), strings.HasSuffix(k, "-bytes"): return &units.Byte } return nil } func (c *dataConvert) getNewName(k string) string { if c.renameRegex != nil { return c.renameRegex.ReplaceAllString(k, c.New) } return k } ================================================ FILE: pkg/formatters/event_data_convert/event_data_convert_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_data_convert import ( "log" "os" "testing" "github.com/google/go-cmp/cmp" "github.com/openconfig/gnmic/pkg/formatters" ) func Test_dataConvert_Apply(t *testing.T) { type fields map[string]interface{} type args struct { es []*formatters.EventMsg } tests := []struct { name string fields fields args args want []*formatters.EventMsg }{ { name: "nil_input", fields: map[string]interface{}{ "value-names": []string{ ".*", }, "debug": true, }, args: args{}, want: nil, }, { name: "one_msg_bytes", fields: map[string]interface{}{ "value-names": []string{ "_total$", }, "to": "KB", "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": 1024, }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": float64(1), }, }, }, }, { name: "one_msg_bytes_keep", fields: map[string]interface{}{ "value-names": []string{ "_total$", }, "to": "KB", "keep": true, "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": 1024, }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": 1024, "data_total_KB": float64(1), }, }, }, }, { name: "one_msg_bytes_from", fields: map[string]interface{}{ "value-names": []string{ "_total$", }, "from": "KB", "to": "B", "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": 1, }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": float64(1024), }, }, }, }, { name: "one_msg_multiple_values", fields: map[string]interface{}{ "value-names": []string{ "_total$", }, "from": "KB", "to": "B", "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": 1, "bytes_total": 2, }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": float64(1024), "bytes_total": float64(2048), }, }, }, }, { name: "two_messages", fields: map[string]interface{}{ "value-names": []string{ "_total$", }, "from": "KB", "to": "B", "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": 1, "bytes_total": 2, }, }, { Name: "sub2", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": 1, "bytes_total": 2, }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": float64(1024), "bytes_total": float64(2048), }, }, { Name: "sub2", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": float64(1024), "bytes_total": float64(2048), }, }, }, }, { name: "string_value_with_unit", fields: map[string]interface{}{ "value-names": []string{ "_total$", }, "to": "B", "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": "1 KB", "bytes_total": "2KB", }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "data_total": float64(1024), "bytes_total": float64(2048), }, }, }, }, { name: "one_msg_rename", fields: map[string]interface{}{ "value-names": []string{ "_total$", }, "to": "KB", "old": `^(bytes)(\S+)`, "new": "kilobytes${2}", "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "bytes_total": 1024, }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "kilobytes_total": float64(1), }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := &dataConvert{} err := c.Init(tt.fields, formatters.WithLogger(log.New(os.Stderr, "[event-data-convert-test]", log.Flags()))) if err != nil { t.Errorf("failed to init processor in test %q: %v", tt.name, err) t.Fail() } if got := c.Apply(tt.args.es...); !cmp.Equal(got, tt.want) { t.Errorf("got : %+v", got) t.Errorf("want: %+v", tt.want) t.Errorf("dataConvert.Apply() = %+v, want %+v", got, tt.want) } }) } } ================================================ FILE: pkg/formatters/event_date_string/event_date_string.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_date_string import ( "encoding/json" "errors" "io" "log" "os" "regexp" "strconv" "time" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-date-string" loggingPrefix = "[" + processorType + "] " ) // dateString converts Tags and/or Values of unix timestamp to a human readable format. // Precision specifies the unit of the received timestamp, s, ms, us or ns. // DateTimeFormat is the desired datetime format, it defaults to RFC3339 type dateString struct { formatters.BaseProcessor Tags []string `mapstructure:"tag-names,omitempty" json:"tag-names,omitempty"` Values []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Precision string `mapstructure:"precision,omitempty" json:"precision,omitempty"` Format string `mapstructure:"format,omitempty" json:"format,omitempty"` Location string `mapstructure:"location,omitempty" json:"location,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` tags []*regexp.Regexp values []*regexp.Regexp location *time.Location logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &dateString{ logger: log.New(io.Discard, "", 0), } }) } func (d *dateString) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, d) if err != nil { return err } for _, opt := range opts { opt(d) } // init values regex d.values = make([]*regexp.Regexp, 0, len(d.Values)) for _, reg := range d.Values { re, err := regexp.Compile(reg) if err != nil { return err } d.values = append(d.values, re) } // init tags regex d.tags = make([]*regexp.Regexp, 0, len(d.Tags)) for _, reg := range d.Tags { re, err := regexp.Compile(reg) if err != nil { return err } d.tags = append(d.tags, re) } // set tz d.location = time.Local if d.Location != "" { loc, err := time.LoadLocation(d.Location) if err != nil { return err } d.location = loc } if d.logger.Writer() != io.Discard { b, err := json.Marshal(d) if err != nil { d.logger.Printf("initialized processor '%s': %+v", processorType, d) return nil } d.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (d *dateString) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } for k, v := range e.Values { for _, re := range d.values { if re.MatchString(k) { d.logger.Printf("key '%s' matched regex '%s'", k, re.String()) iv, err := convertToInt(v) if err != nil { d.logger.Printf("failed to convert '%v' to date string: %v", v, err) continue } var td time.Time switch d.Precision { case "s", "sec", "second": td = time.Unix(int64(iv), 0) case "ms", "millisecond": td = time.Unix(0, int64(iv)*1000000) case "us", "microsecond": td = time.Unix(0, int64(iv)*1000) case "ns", "nanosecond": td = time.Unix(0, int64(iv)) } if d.Format == "" { d.Format = time.RFC3339 } e.Values[k] = td.In(d.location).Format(d.Format) break } } } for k, v := range e.Tags { for _, re := range d.tags { if re.MatchString(k) { d.logger.Printf("key '%s' matched regex '%s'", k, re.String()) iv, err := strconv.Atoi(v) if err != nil { log.Printf("failed to convert %s to int: %v", v, err) } var td time.Time switch d.Precision { case "s", "sec", "second": td = time.Unix(int64(iv), 0) case "ms", "millisecond": td = time.Unix(0, int64(iv)*1000000) case "us", "microsecond": td = time.Unix(0, int64(iv)*1000) case "ns", "nanosecond": td = time.Unix(0, int64(iv)) } if d.Format == "" { d.Format = time.RFC3339 } e.Values[k] = td.Format(d.Format) break } } } } return es } func (d *dateString) WithLogger(l *log.Logger) { if d.Debug && l != nil { d.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if d.Debug { d.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func convertToInt(i interface{}) (int, error) { switch i := i.(type) { case string: iv, err := strconv.Atoi(i) if err != nil { return 0, err } return iv, nil case int: return i, nil case uint: return int(i), nil case float64: return int(i), nil default: return 0, errors.New("cannot convert to int") } } ================================================ FILE: pkg/formatters/event_date_string/event_date_string_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_date_string import ( "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "seconds_date_string": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"timestamp"}, "precision": "s", "location": "Asia/Taipei", }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"timestamp": 1606824673}, Tags: map[string]string{"timestamp": "0"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"timestamp": "2020-12-01T20:11:13+08:00"}, Tags: map[string]string{"timestamp": "0"}, }, }, }, }, }, } func TestEventDateString(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("initialized for test %s: %+v", name, p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at event date string, item %d, index %d", i, j) t.Logf("expected: %#v", item.output[j]) t.Logf(" got: %#v", outs[j]) t.Fail() } } }) } } } } ================================================ FILE: pkg/formatters/event_delete/event_delete.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_delete import ( "encoding/json" "io" "log" "os" "regexp" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-delete" loggingPrefix = "[" + processorType + "] " ) // deletep, deletes ALL the tags or values matching one of the regexes type deletep struct { formatters.BaseProcessor Tags []string `mapstructure:"tags,omitempty" json:"tags,omitempty"` Values []string `mapstructure:"values,omitempty" json:"values,omitempty"` TagNames []string `mapstructure:"tag-names,omitempty" json:"tag-names,omitempty"` ValueNames []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` tags []*regexp.Regexp values []*regexp.Regexp tagNames []*regexp.Regexp valueNames []*regexp.Regexp logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &deletep{ logger: log.New(io.Discard, "", 0), } }) } func (d *deletep) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, d) if err != nil { return err } for _, opt := range opts { opt(d) } // init tags regex d.tags = make([]*regexp.Regexp, 0, len(d.Tags)) for _, reg := range d.Tags { re, err := regexp.Compile(reg) if err != nil { return err } d.tags = append(d.tags, re) } // init tag names regex d.tagNames = make([]*regexp.Regexp, 0, len(d.TagNames)) for _, reg := range d.TagNames { re, err := regexp.Compile(reg) if err != nil { return err } d.tagNames = append(d.tagNames, re) } // init values regex d.values = make([]*regexp.Regexp, 0, len(d.Values)) for _, reg := range d.Values { re, err := regexp.Compile(reg) if err != nil { return err } d.values = append(d.values, re) } // init values names regex d.valueNames = make([]*regexp.Regexp, 0, len(d.ValueNames)) for _, reg := range d.ValueNames { re, err := regexp.Compile(reg) if err != nil { return err } d.valueNames = append(d.valueNames, re) } if d.logger.Writer() != io.Discard { b, err := json.Marshal(d) if err != nil { d.logger.Printf("initialized processor '%s': %+v", processorType, d) return nil } d.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (d *deletep) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } for k, v := range e.Values { for _, re := range d.valueNames { if re.MatchString(k) { d.logger.Printf("key '%s' matched regex '%s'", k, re.String()) delete(e.Values, k) } } for _, re := range d.values { if vs, ok := v.(string); ok { if re.MatchString(vs) { d.logger.Printf("key '%s' matched regex '%s'", k, re.String()) delete(e.Values, k) } } } } for k, v := range e.Tags { for _, re := range d.tagNames { if re.MatchString(k) { d.logger.Printf("key '%s' matched regex '%s'", k, re.String()) delete(e.Tags, k) } } for _, re := range d.tags { if re.MatchString(v) { d.logger.Printf("key '%s' matched regex '%s'", k, re.String()) delete(e.Tags, k) } } } } return es } func (d *deletep) WithLogger(l *log.Logger) { if d.Debug && l != nil { d.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if d.Debug { d.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } ================================================ FILE: pkg/formatters/event_delete/event_delete_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_delete import ( "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "tag-names_delete": { processorType: processorType, processor: map[string]interface{}{ "tag-names": []string{"^name*"}, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name": "name_tag", "name-2": "name-2_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name": "name_tag", "-name": "name-2_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"-name": "name-2_tag"}, }, }, }, }, }, "2_tag-names_delete": { processorType: processorType, processor: map[string]interface{}{ "tag-names": []string{"^name*", "to_delete"}, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1, "todelete": "value"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1, "todelete": "value"}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name": "name_tag", "to_delete": "value"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name": "name_tag", "name-2": "name-2_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name": "name_tag", "-name": "name-2_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"-name": "name-2_tag"}, }, }, }, }, }, "value-names_delete": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"deleteme*"}, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"deleteme": 1}, Tags: map[string]string{"-name": "name-2_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}, Tags: map[string]string{"-name": "name-2_tag"}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"deleteme": 1, "dont-deleteme": 1}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, }, }, "2_value-names_delete": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"deleteme", "deleteme-too"}, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"deleteme": 1}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"deleteme": 1, "deleteme-too": 1}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, }, }, "tag-names_and_value-names_delete": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"deleteme-value*"}, "tag-names": []string{"deleteme-tag*"}, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"deleteme-value": 1}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"deleteme-tag": "tag"}}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"deleteme-value": 1, "dont-deleteme": 1}, Tags: map[string]string{"deleteme-tag": "tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"dont-deleteme": 1}, Tags: map[string]string{}, }, }, }, }, }, "tags_delete": { processorType: processorType, processor: map[string]interface{}{ "tags": []string{"^name*"}, }, tests: []item{ // 0 { input: nil, output: nil, }, // 1 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, // 2 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}}, }, }, // 3 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name": "name_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{}, }, }, }, // 4 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name": "name_tag", "name-2": "name-2_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{}, }, }, }, // 5 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name": "name_tag", "-name": "name-2_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{}, }, }, }, }, }, "2_tags_delete": { processorType: processorType, processor: map[string]interface{}{ "tags": []string{"^name*", "to_delete"}, }, tests: []item{ // 0 { input: nil, output: nil, }, // 1 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, // 2 { input: []*formatters.EventMsg{ {Values: map[string]interface{}{"name": 1, "todelete": "to_delete"}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1, "todelete": "to_delete"}}, }, }, // 3 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name": "name_tag", "tag_name": "to_delete"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{}, }, }, }, // 4 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name": "name_tag", "name-2": "name-2_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{}, }, }, }, // 5 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name": "name_tag", "name-2": "-name-2_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": 1}, Tags: map[string]string{"name-2": "-name-2_tag"}, }, }, }, }, }, "values_delete": { processorType: processorType, processor: map[string]interface{}{ "values": []string{"deleteme*"}, }, tests: []item{ // 0 { input: nil, output: nil, }, // 1 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, // 2 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"deleteme": "deleteme"}, Tags: map[string]string{"-name": "name-2_tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}, Tags: map[string]string{"-name": "name-2_tag"}}, }, }, // 3 { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"foo": "deleteme", "dont-deleteme": 1}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"dont-deleteme": 1}}, }, }, }, }, "2_values_delete": { processorType: processorType, processor: map[string]interface{}{ "values": []string{"deleteme", "deleteme-too"}, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"foo": "deleteme"}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"foo": "deleteme", "bar": "deleteme-too"}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, }, }, "tags_and_values_delete": { processorType: processorType, processor: map[string]interface{}{ "values": []string{"deleteme-value*"}, "tags": []string{"deleteme-tag*"}, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"foo-value": "deleteme-value"}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"foo-tag": "deleteme-tag"}}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"foo-value": "deleteme-value", "dont-deleteme": 1}, Tags: map[string]string{"foo-tag": "deleteme-tag"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"dont-deleteme": 1}, Tags: map[string]string{}, }, }, }, }, }, } func TestEventDelete(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("initialized for test %s: %+v", name, p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at event delete, item %d, index %d", i, j) t.Logf("expected: %#v", item.output[j]) t.Logf(" got: %#v", outs[j]) t.Fail() } } }) } } else { t.Errorf("processors type %s not found", ts.processorType) t.Fail() } } } ================================================ FILE: pkg/formatters/event_drop/event_drop.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_drop import ( "encoding/json" "io" "log" "os" "regexp" "strings" "github.com/itchyny/gojq" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-drop" loggingPrefix = "[" + processorType + "] " ) // drop Drops the msg if ANY of the Tags or Values regexes are matched type drop struct { formatters.BaseProcessor Condition string `mapstructure:"condition,omitempty"` TagNames []string `mapstructure:"tag-names,omitempty" json:"tag-names,omitempty"` ValueNames []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Tags []string `mapstructure:"tags,omitempty" json:"tags,omitempty"` Values []string `mapstructure:"values,omitempty" json:"values,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` tagNames []*regexp.Regexp valueNames []*regexp.Regexp tags []*regexp.Regexp values []*regexp.Regexp code *gojq.Code logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &drop{ logger: log.New(io.Discard, "", 0), } }) } func (d *drop) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, d) if err != nil { return err } for _, opt := range opts { opt(d) } d.Condition = strings.TrimSpace(d.Condition) q, err := gojq.Parse(d.Condition) if err != nil { return err } d.code, err = gojq.Compile(q) if err != nil { return err } // init tag keys regex d.tagNames = make([]*regexp.Regexp, 0, len(d.TagNames)) for _, reg := range d.TagNames { re, err := regexp.Compile(reg) if err != nil { return err } d.tagNames = append(d.tagNames, re) } d.tags = make([]*regexp.Regexp, 0, len(d.Tags)) for _, reg := range d.Tags { re, err := regexp.Compile(reg) if err != nil { return err } d.tags = append(d.tags, re) } // d.valueNames = make([]*regexp.Regexp, 0, len(d.ValueNames)) for _, reg := range d.ValueNames { re, err := regexp.Compile(reg) if err != nil { return err } d.valueNames = append(d.valueNames, re) } d.values = make([]*regexp.Regexp, 0, len(d.values)) for _, reg := range d.Values { re, err := regexp.Compile(reg) if err != nil { return err } d.values = append(d.values, re) } if d.logger.Writer() != io.Discard { b, err := json.Marshal(d) if err != nil { d.logger.Printf("initialized processor '%s': %+v", processorType, d) return nil } d.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (d *drop) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { i := 0 for _, e := range es { if !d.drop(e) { es[i] = e i++ } } for j := i; j < len(es); j++ { es[j] = nil } es = es[:i] return es } func (d *drop) WithLogger(l *log.Logger) { if d.Debug && l != nil { d.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if d.Debug { d.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (d *drop) drop(e *formatters.EventMsg) bool { if d.Condition != "" { ok, err := formatters.CheckCondition(d.code, e) if err != nil { d.logger.Printf("condition check failed: %v", err) return true } return ok } for k, v := range e.Values { for _, re := range d.valueNames { if re.MatchString(k) { d.logger.Printf("value name '%s' matched regex '%s'", k, re.String()) return true } } for _, re := range d.values { if vs, ok := v.(string); ok { if re.MatchString(vs) { d.logger.Printf("value '%s' matched regex '%s'", v, re.String()) return true } } } } for k, v := range e.Tags { for _, re := range d.tagNames { if re.MatchString(k) { d.logger.Printf("tag name '%s' matched regex '%s'", k, re.String()) return true } } for _, re := range d.tags { if re.MatchString(v) { d.logger.Printf("tag '%s' matched regex '%s'", v, re.String()) return true } } } return false } ================================================ FILE: pkg/formatters/event_drop/event_drop_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_drop import ( "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "drop_condition": { processorType: processorType, processor: map[string]interface{}{ "condition": ".values.value == 1", "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, }, }, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 0}, }, { Values: map[string]interface{}{"value": 1}, }, { Values: map[string]interface{}{"value": 2}, }, { Values: map[string]interface{}{"value": 3}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 0}, }, { Values: map[string]interface{}{"value": 2}, }, { Values: map[string]interface{}{"value": 3}, }, }, }, }, }, "drop_values": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"^number$"}, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"number": 1}, }, }, output: nil, }, }, }, "drop_tags": { processorType: processorType, processor: map[string]interface{}{ "tag-names": []string{"^name*"}, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{}, }, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{"name": "dummy"}, }, }, output: nil, }, }, }, } func TestEventDrop(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { p := pi() err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) if len(outs) != len(item.output) { t.Logf("output length mismatch") t.Fail() } for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at event drop, item %d, index %d", i, j) t.Logf("expected: %#v", item.output[j]) t.Logf(" got: %#v", outs[j]) t.Fail() } } }) } } } } var input = []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 0}, }, { Values: map[string]interface{}{"value": 1}, }, { Values: map[string]interface{}{"value": 2}, }, { Values: map[string]interface{}{"value": 3}, }, } func BenchmarkApply(b *testing.B) { pi := formatters.EventProcessors["event-drop"] p := pi() err := p.Init(map[string]interface{}{ "condition": ".values.value >= 1", }) if err != nil { panic(err) } for i := 0; i < b.N; i++ { p.Apply(input...) } } ================================================ FILE: pkg/formatters/event_duration_convert/event_duration_convert.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_data_convert import ( "encoding/json" "fmt" "io" "log" "os" "regexp" "strconv" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-duration-convert" loggingPrefix = "[" + processorType + "] " ) var durationRegex = regexp.MustCompile(`((?P\d+)w)?((?P\d+)d)?((?P\d+)h)?((?P\d+)m)?((?P\d+)s)?`) // durationConvert converts the value with key matching one of regexes, to the specified duration precision type durationConvert struct { formatters.BaseProcessor Values []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Keep bool `mapstructure:"keep,omitempty" json:"keep,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` values []*regexp.Regexp logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &durationConvert{ logger: log.New(io.Discard, "", 0), } }) } func (c *durationConvert) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, c) if err != nil { return err } for _, opt := range opts { opt(c) } c.values = make([]*regexp.Regexp, 0, len(c.Values)) for _, reg := range c.Values { re, err := regexp.Compile(reg) if err != nil { return err } c.values = append(c.values, re) } if c.logger.Writer() != io.Discard { b, err := json.Marshal(c) if err != nil { c.logger.Printf("initialized processor '%s': %+v", processorType, c) return nil } c.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (c *durationConvert) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } // add new Values to a new map to avoid multiple chained regex matches newValues := make(map[string]interface{}) for k, v := range e.Values { for _, re := range c.values { if re.MatchString(k) { c.logger.Printf("key '%s' matched regex '%s'", k, re.String()) dur, err := c.convertDuration(k, v) if err != nil { c.logger.Printf("duration convert error: %v", err) break } c.logger.Printf("key '%s', value %v converted to seconds: %d", k, v, dur) if c.Keep { newValues[fmt.Sprintf("%s_seconds", k)] = dur break } newValues[k] = dur break } } } // add new values to the original message for k, v := range newValues { e.Values[k] = v } } return es } func (c *durationConvert) WithLogger(l *log.Logger) { if c.Debug && l != nil { c.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if c.Debug { c.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (c *durationConvert) convertDuration(k string, i interface{}) (int64, error) { switch i := i.(type) { case string: iv, err := strconv.Atoi(i) if err != nil { return parseStringDuration(i) } return c.convertDuration(k, iv) case int: return int64(i), nil case int8: return int64(i), nil case int16: return int64(i), nil case int32: return int64(i), nil case int64: return int64(i), nil case uint: return int64(i), nil case uint8: return int64(i), nil case uint16: return int64(i), nil case uint32: return int64(i), nil case uint64: return int64(i), nil case float64: return int64(i), nil case float32: return int64(i), nil default: return 0, fmt.Errorf("cannot convert %v, type %T", i, i) } } func parseStringDuration(s string) (int64, error) { match := durationRegex.FindStringSubmatch(s) namedGroups := make(map[string]string) for i, name := range durationRegex.SubexpNames() { if i != 0 && name != "" { namedGroups[name] = match[i] } } r := int64(0) for k, v := range namedGroups { if v == "" { continue } switch k { case "weeks": i, err := strconv.Atoi(v) if err != nil { return 0, err } r += int64(i) * 7 * 24 * 60 * 60 case "days": i, err := strconv.Atoi(v) if err != nil { return 0, err } r += int64(i) * 24 * 60 * 60 case "hours": i, err := strconv.Atoi(v) if err != nil { return 0, err } r += int64(i) * 60 * 60 case "minutes": i, err := strconv.Atoi(v) if err != nil { return 0, err } r += int64(i) * 60 case "seconds": i, err := strconv.Atoi(v) if err != nil { return 0, err } r += int64(i) } } return r, nil } ================================================ FILE: pkg/formatters/event_duration_convert/event_duration_convert_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_data_convert import ( "log" "os" "testing" "github.com/google/go-cmp/cmp" "github.com/openconfig/gnmic/pkg/formatters" ) const ( oneMins = int64(60) oneHs = int64(60 * 60) oneDs = int64(24 * 60 * 60) oneWs = int64(7 * 24 * 60 * 60) ) func Test_durationConvert_Apply(t *testing.T) { type fields map[string]interface{} type args struct { es []*formatters.EventMsg } tests := []struct { name string fields fields args args want []*formatters.EventMsg }{ { name: "nil_input", fields: map[string]interface{}{ "value-names": []string{ ".*", }, "debug": true, }, args: args{}, want: nil, }, { name: "week", fields: map[string]interface{}{ "value-names": []string{ ".*uptime", }, "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "connection_uptime": "1w", }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "connection_uptime": oneWs, }, }, }, }, { name: "week_day", fields: map[string]interface{}{ "value-names": []string{ ".*uptime", }, "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "connection_uptime": "1w2d", }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "connection_uptime": oneWs + 2*oneDs, }, }, }, }, { name: "week_day_hour", fields: map[string]interface{}{ "value-names": []string{ ".*uptime", }, "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "connection_uptime": "1w2d3h", }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "connection_uptime": oneWs + 2*oneDs + 3*oneHs, }, }, }, }, { name: "week_day_hour_minute", fields: map[string]interface{}{ "value-names": []string{ ".*uptime", }, "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "connection_uptime": "1w2d3h4m", }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "connection_uptime": oneWs + 2*oneDs + 3*oneHs + 4*oneMins, }, }, }, }, { name: "week_day_hour_minute_second", fields: map[string]interface{}{ "value-names": []string{ ".*uptime", }, "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "connection_uptime": "1w2d3h4m5s", }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "connection_uptime": oneWs + 2*oneDs + 3*oneHs + 4*oneMins + 5, }, }, }, }, { name: "week_second", fields: map[string]interface{}{ "value-names": []string{ ".*uptime", }, "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "connection_uptime": "1w5s", }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]interface{}{ "connection_uptime": oneWs + 5, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := &durationConvert{} err := c.Init(tt.fields, formatters.WithLogger(log.New(os.Stderr, "[event-duration-convert-test]", log.Flags()))) if err != nil { t.Errorf("failed to init processor in test %q: %v", tt.name, err) t.Fail() } if got := c.Apply(tt.args.es...); !cmp.Equal(got, tt.want) { t.Errorf("durationConvert.Apply() = %v, want %v", got, tt.want) } }) } } ================================================ FILE: pkg/formatters/event_extract_tags/event_extract_tags.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_extract_tags import ( "encoding/json" "io" "log" "os" "regexp" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-extract-tags" loggingPrefix = "[" + processorType + "] " ) // extractTags extracts tags from a value, a value name, a tag name or a tag value using regex named groups type extractTags struct { formatters.BaseProcessor Tags []string `mapstructure:"tags,omitempty" json:"tags,omitempty"` Values []string `mapstructure:"values,omitempty" json:"values,omitempty"` TagNames []string `mapstructure:"tag-names,omitempty" json:"tag-names,omitempty"` ValueNames []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Overwrite bool `mapstructure:"overwrite,omitempty" json:"overwrite,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` tags []*regexp.Regexp values []*regexp.Regexp tagNames []*regexp.Regexp valueNames []*regexp.Regexp logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &extractTags{ logger: log.New(io.Discard, "", 0), } }) } func (p *extractTags) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } for _, opt := range opts { opt(p) } // init tags regex p.tags = make([]*regexp.Regexp, 0, len(p.Tags)) for _, reg := range p.Tags { re, err := regexp.Compile(reg) if err != nil { return err } p.tags = append(p.tags, re) } // init tag names regex p.tagNames = make([]*regexp.Regexp, 0, len(p.TagNames)) for _, reg := range p.TagNames { re, err := regexp.Compile(reg) if err != nil { return err } p.tagNames = append(p.tagNames, re) } // init values regex p.values = make([]*regexp.Regexp, 0, len(p.Values)) for _, reg := range p.Values { re, err := regexp.Compile(reg) if err != nil { return err } p.values = append(p.values, re) } // init value names regex p.valueNames = make([]*regexp.Regexp, 0, len(p.ValueNames)) for _, reg := range p.ValueNames { re, err := regexp.Compile(reg) if err != nil { return err } p.valueNames = append(p.valueNames, re) } if p.logger.Writer() != io.Discard { b, err := json.Marshal(p) if err != nil { p.logger.Printf("initialized processor '%s': %+v", processorType, p) return nil } p.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (p *extractTags) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } for k, v := range e.Values { for _, re := range p.valueNames { p.addTags(e, re, k) } for _, re := range p.values { if vs, ok := v.(string); ok { p.addTags(e, re, vs) } } } for k, v := range e.Tags { for _, re := range p.tagNames { p.addTags(e, re, k) } for _, re := range p.tags { p.addTags(e, re, v) } } } return es } func (p *extractTags) WithLogger(l *log.Logger) { if p.Debug && l != nil { p.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if p.Debug { p.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (p *extractTags) addTags(e *formatters.EventMsg, re *regexp.Regexp, s string) { if e.Tags == nil { e.Tags = make(map[string]string) } matches := re.FindStringSubmatch(s) if p.Debug { p.logger.Printf("matches: %+v", matches) } if len(matches) != len(re.SubexpNames()) { return } for i, name := range re.SubexpNames() { if i != 0 && name != "" { if p.Debug { p.logger.Printf("adding: name=%s, value=%s", name, matches[i]) } if p.Overwrite { e.Tags[name] = matches[i] continue } if _, ok := e.Tags[matches[i]]; !ok { e.Tags[name] = matches[i] } } } } ================================================ FILE: pkg/formatters/event_extract_tags/event_extract_tags_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_extract_tags import ( "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "match_value_names": { processorType: processorType, processor: map[string]interface{}{ "debug": true, "value-names": []string{ `/(?P\w+)/(?P\w+)/(?P\w+)`, }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"/elem1/elem2/elem3": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"/elem1/elem2/elem3": 1}, Tags: map[string]string{ "tag1": "1", "e1": "elem1", "e2": "elem2", "e3": "elem3", }, }, }, }, }, }, "match_value_names_partial": { processorType: processorType, processor: map[string]interface{}{ "debug": true, "value-names": []string{ `/(?P\w+)/(\w+)/(?P\w+)`, }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"/elem1/elem2/elem3": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"/elem1/elem2/elem3": 1}, Tags: map[string]string{ "tag1": "1", "e1": "elem1", "e3": "elem3", }, }, }, }, }, }, "match_tag_names": { processorType: processorType, processor: map[string]interface{}{ "debug": true, "tag-names": []string{ `/(?P\w+)/(?P\w+)/(?P\w+)`, }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "tag1": "1", "/elem1/elem2/elem3": "1", }, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "/elem1/elem2/elem3": "1", "tag1": "1", "e1": "elem1", "e2": "elem2", "e3": "elem3", }, }, }, }, }, }, "match_tag_names_partial": { processorType: processorType, processor: map[string]interface{}{ "debug": true, "tag-names": []string{ `/(?P\w+)/(\w+)/(?P\w+)`, }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "tag1": "1", "/elem1/elem2/elem3": "1", }, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "/elem1/elem2/elem3": "1", "tag1": "1", "e1": "elem1", "e3": "elem3", }, }, }, }, }, }, } func TestEventAddTag(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at %s item %d, index %d, expected: %+v", name, i, j, item.output[j]) t.Logf("failed at %s item %d, index %d, got: %+v", name, i, j, outs[j]) t.Fail() } } }) } } else { t.Errorf("event processor %s not found", ts.processorType) } } } ================================================ FILE: pkg/formatters/event_group_by/event_group_by.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_group_by import ( "encoding/json" "hash/fnv" "io" "log" "os" "slices" "strings" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-group-by" loggingPrefix = "[" + processorType + "] " ) // groupBy groups values from different event messages in the same event message // based on tags values type groupBy struct { formatters.BaseProcessor Tags []string `mapstructure:"tags,omitempty" json:"tags,omitempty"` ByName bool `mapstructure:"by-name,omitempty" json:"by-name,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &groupBy{ logger: log.New(io.Discard, "", 0), } }) } func (p *groupBy) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } for _, opt := range opts { opt(p) } if p.logger.Writer() != io.Discard { b, err := json.Marshal(p) if err != nil { p.logger.Printf("initialized processor '%s': %+v", processorType, p) return nil } p.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (p *groupBy) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { result := make([]*formatters.EventMsg, 0, len(es)) if p.Debug { p.logger.Printf("before: %+v", es) } if !p.ByName { result = p.byTags(es) if p.Debug { p.logger.Printf("after: %+v", result) } return result } groups := make(map[string][]*formatters.EventMsg) names := make([]string, 0) for _, e := range es { _, ok := groups[e.Name] if !ok { groups[e.Name] = make([]*formatters.EventMsg, 0) names = append(names, e.Name) } groups[e.Name] = append(groups[e.Name], e) } slices.Sort(names) for _, n := range names { result = append(result, p.byTags(groups[n])...) } if p.Debug { p.logger.Printf("after: %+v", result) } return result } func (p *groupBy) WithLogger(l *log.Logger) { if p.Debug && l != nil { p.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if p.Debug { p.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (p *groupBy) byTagsOld(es []*formatters.EventMsg) []*formatters.EventMsg { if len(p.Tags) == 0 { return es } result := make([]*formatters.EventMsg, 0, len(es)) groups := make(map[string]*formatters.EventMsg) keys := make([]string, 0) for _, e := range es { if e == nil || e.Tags == nil || (e.Values == nil && e.Deletes == nil) { continue } exist := true var key strings.Builder for _, t := range p.Tags { if v, ok := e.Tags[t]; ok { key.WriteString(t) key.Write(eqByte) key.WriteString(v) key.Write(pipeByte) continue } exist = false break } if !exist { result = append(result, e) continue } skey := key.String() group, ok := groups[skey] if !ok { keys = append(keys, skey) group = &formatters.EventMsg{ Name: e.Name, Timestamp: e.Timestamp, Tags: make(map[string]string), Values: make(map[string]interface{}), } groups[skey] = group } for k, v := range e.Tags { group.Tags[k] = v } for k, v := range e.Values { group.Values[k] = v } if e.Deletes != nil { group.Deletes = append(group.Deletes, e.Deletes...) } } slices.Sort(keys) for _, k := range keys { result = append(result, groups[k]) } return result } func (p *groupBy) byTags(es []*formatters.EventMsg) []*formatters.EventMsg { if len(p.Tags) == 0 { return es } result := make([]*formatters.EventMsg, 0, len(es)) groups := make(map[uint64]*formatters.EventMsg) for _, e := range es { if e == nil || e.Tags == nil || (e.Values == nil && e.Deletes == nil) { continue } //grouping key based on tags value skey, match := generateKeyAndCheck(e.Tags, p.Tags) if !match { result = append(result, e) continue } group, exists := groups[skey] if !exists { group = &formatters.EventMsg{ Name: e.Name, Timestamp: e.Timestamp, Tags: make(map[string]string, len(e.Tags)), Values: make(map[string]interface{}, len(e.Values)), Deletes: make([]string, 0, len(e.Deletes)), } groups[skey] = group } // merge tags, values and deletes into the group for k, v := range e.Tags { group.Tags[k] = v } for k, v := range e.Values { group.Values[k] = v } if e.Deletes != nil { group.Deletes = append(group.Deletes, e.Deletes...) } } for _, ev := range groups { result = append(result, ev) } return result } func generateKeyAndCheck(tags map[string]string, keys []string) (uint64, bool) { h := fnv.New64a() for _, k := range keys { v, ok := tags[k] if !ok { return 0, false } h.Write([]byte(k)) h.Write([]byte(eqByte)) h.Write([]byte(v)) h.Write([]byte(pipeByte)) } return h.Sum64(), true } var ( eqByte = []byte("=") pipeByte = []byte("|") ) ================================================ FILE: pkg/formatters/event_group_by/event_group_by_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_group_by import ( "fmt" "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "group_by_1_tag": { processorType: processorType, processor: map[string]interface{}{ "tags": []string{"tag1"}, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{"tag1": "1"}, }, { Values: map[string]interface{}{"value2": 2}, Tags: map[string]string{"tag1": "1"}, }, { Values: map[string]interface{}{"value3": 3}, Tags: map[string]string{"tag2": "2"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{ "value3": 3, }, Tags: map[string]string{ "tag2": "2", }, }, { Values: map[string]interface{}{ "value1": 1, "value2": 2, }, Tags: map[string]string{ "tag1": "1", }, }, }, }, { input: []*formatters.EventMsg{ { Deletes: []string{"value1"}, Tags: map[string]string{"tag1": "1"}, }, { Deletes: []string{"value2"}, Tags: map[string]string{"tag1": "1"}, }, { Deletes: []string{"value3"}, Tags: map[string]string{"tag2": "2"}, }, }, output: []*formatters.EventMsg{ { Deletes: []string{ "value3", }, Tags: map[string]string{ "tag2": "2", }, }, { Values: make(map[string]interface{}), Deletes: []string{ "value1", "value2", }, Tags: map[string]string{ "tag1": "1", }, }, }, }, }, }, "group_by_2_tags": { processorType: processorType, processor: map[string]interface{}{ "tags": []string{"tag1", "tag2"}, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{ "tag1": "1", "tag2": "2", }, }, { Values: map[string]interface{}{"value2": 2}, Tags: map[string]string{ "tag1": "1", "tag2": "2", }, }, { Values: map[string]interface{}{"value3": 3}, Tags: map[string]string{ "tag1": "1", "tag2": "3", }, }, { Values: map[string]interface{}{"value4": 4}, Tags: map[string]string{ "tag1": "1", "tag2": "3", }, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{ "value1": 1, "value2": 2, }, Tags: map[string]string{ "tag1": "1", "tag2": "2", }, }, { Values: map[string]interface{}{ "value3": 3, "value4": 4, }, Tags: map[string]string{ "tag1": "1", "tag2": "3", }, }, }, }, { input: []*formatters.EventMsg{ { Deletes: []string{"value1"}, Tags: map[string]string{ "tag1": "1", "tag2": "2", }, }, { Deletes: []string{"value2"}, Tags: map[string]string{ "tag1": "1", "tag2": "2", }, }, { Deletes: []string{"value3"}, Tags: map[string]string{ "tag1": "1", "tag2": "3", }, }, { Deletes: []string{"value4"}, Tags: map[string]string{ "tag1": "1", "tag2": "3", }, }, }, output: []*formatters.EventMsg{ { Values: make(map[string]interface{}), Deletes: []string{ "value1", "value2", }, Tags: map[string]string{ "tag1": "1", "tag2": "2", }, }, { Values: make(map[string]interface{}), Deletes: []string{ "value3", "value4", }, Tags: map[string]string{ "tag1": "1", "tag2": "3", }, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{ "tag1": "1", }, }, { Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{ "tag1": "1", "tag2": "2", }, }, { Values: map[string]interface{}{"value2": 2}, Tags: map[string]string{ "tag1": "1", "tag2": "2", }, }, { Values: map[string]interface{}{"value3": 3}, Tags: map[string]string{ "tag1": "1", "tag2": "3", }, }, { Values: map[string]interface{}{"value4": 4}, Tags: map[string]string{ "tag1": "1", "tag2": "3", }, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{ "tag1": "1", }, }, { Values: map[string]interface{}{ "value1": 1, "value2": 2, }, Tags: map[string]string{ "tag1": "1", "tag2": "2", }, }, { Values: map[string]interface{}{ "value3": 3, "value4": 4, }, Tags: map[string]string{ "tag1": "1", "tag2": "3", }, }, }, }, }, }, "group_by_name": { processorType: processorType, processor: map[string]interface{}{ "by-name": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub1", Values: map[string]interface{}{"value2": 2}, Tags: map[string]string{ "tag2": "2", }, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub1", Values: map[string]interface{}{"value2": 2}, Tags: map[string]string{ "tag2": "2", }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: make(map[string]interface{}), Deletes: []string{ "value1", }, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub1", Values: make(map[string]interface{}), Deletes: []string{ "value2", }, Tags: map[string]string{ "tag2": "2", }, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: make(map[string]interface{}), Deletes: []string{ "value1", }, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub1", Values: make(map[string]interface{}), Deletes: []string{ "value2", }, Tags: map[string]string{ "tag2": "2", }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{"value3": 3}, Tags: map[string]string{ "tag2": "2", }, }, { Name: "sub1", Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub1", Values: map[string]interface{}{"value2": 2}, Tags: map[string]string{ "tag2": "2", }, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub1", Values: map[string]interface{}{"value2": 2}, Tags: map[string]string{ "tag2": "2", }, }, { Name: "sub2", Values: map[string]interface{}{"value3": 3}, Tags: map[string]string{ "tag2": "2", }, }, }, }, }, }, "group_by_name_by_tags": { processorType: processorType, processor: map[string]interface{}{ "by-name": true, "tags": []string{"tag1"}, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub1", Values: map[string]interface{}{"value2": 2}, Tags: map[string]string{ "tag1": "1", }, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "value1": 1, "value2": 2, }, Tags: map[string]string{ "tag1": "1", }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Deletes: []string{"value1"}, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub1", Deletes: []string{"value2"}, Tags: map[string]string{ "tag1": "1", }, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: make(map[string]interface{}), Deletes: []string{ "value1", "value2", }, Tags: map[string]string{ "tag1": "1", }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub1", Values: map[string]interface{}{"value2": 2}, Tags: map[string]string{ "tag1": "2", }, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "value1": 1, }, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub1", Values: map[string]interface{}{ "value2": 2, }, Tags: map[string]string{ "tag1": "2", }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Deletes: []string{"value1"}, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub1", Deletes: []string{"value2"}, Tags: map[string]string{ "tag1": "2", }, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: make(map[string]interface{}), Deletes: []string{"value1"}, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub1", Values: make(map[string]interface{}), Deletes: []string{"value2"}, Tags: map[string]string{ "tag1": "2", }, }, }, }, }, }, } func TestEventGroupBy(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) if len(outs) != len(item.output) { t.Errorf("failed at %s, outputs not of same length", name) t.Errorf("expected: %v", item.output) t.Errorf(" got: %v", outs) return } if !slicesEqual(outs, item.output) { t.Errorf("failed at %s, expected: %+v", name, item.output) t.Errorf("failed at %s, got: %+v", name, outs) } }) } } else { t.Errorf("event processor %s not found", ts.processorType) } } } func generateMockEvents(numEvents, numTags int) []*formatters.EventMsg { es := make([]*formatters.EventMsg, numEvents) for i := 0; i < numEvents; i++ { tags := make(map[string]string, numTags) values := make(map[string]interface{}, numTags) for j := 0; j < numTags; j++ { tags[fmt.Sprintf("tag%d", j)] = fmt.Sprintf("value%d", j) values[fmt.Sprintf("valueKey%d", j)] = fmt.Sprintf("value%d", j) } es[i] = &formatters.EventMsg{ Name: fmt.Sprintf("event%d", i%5), // Group some events by name Timestamp: int64(i), Tags: tags, Values: values, } } return es } func BenchmarkByTags(b *testing.B) { p := &groupBy{Tags: []string{"tag1", "tag2"}} // Generate mock event messages es := generateMockEvents(100_000, 5) b.Run("OldByTags", func(b *testing.B) { for i := 0; i < b.N; i++ { _ = p.byTagsOld(es) } }) b.Run("NewByTags", func(b *testing.B) { for i := 0; i < b.N; i++ { _ = p.byTags(es) } }) } func slicesEqual(slice1, slice2 []*formatters.EventMsg) bool { if len(slice1) != len(slice2) { return false } // Create a map to track matches in slice2 used := make([]bool, len(slice2)) // Check that every item in slice1 has a match in slice2 for _, e1 := range slice1 { found := false for i, e2 := range slice2 { if !used[i] && eventMsgEqual(e1, e2) { used[i] = true found = true break } } if !found { return false // No match found for this item } } return true } func eventMsgEqual(a, b *formatters.EventMsg) bool { if a == nil || b == nil { return a == b } if a.Name != b.Name || a.Timestamp != b.Timestamp { return false } if !reflect.DeepEqual(a.Tags, b.Tags) { return false } if !reflect.DeepEqual(a.Values, b.Values) { return false } if a.Deletes == nil && b.Deletes == nil { return true } if len(a.Deletes) == 0 && len(b.Deletes) == 0 { return true } if !reflect.DeepEqual(a.Deletes, b.Deletes) { return false } return true } ================================================ FILE: pkg/formatters/event_ieeefloat32/event_ieeefloat32.go ================================================ // © 2024 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_ieeefloat32 import ( "encoding/base64" "encoding/binary" "encoding/json" "fmt" "io" "log" "math" "os" "regexp" "strings" "github.com/itchyny/gojq" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-ieeefloat32" loggingPrefix = "[" + processorType + "] " ) // ieeefloat32 converts values from a base64 encoded string into a float32 type ieeefloat32 struct { formatters.BaseProcessor Condition string `mapstructure:"condition,omitempty"` ValueNames []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` valueNames []*regexp.Regexp code *gojq.Code logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &ieeefloat32{ logger: log.New(io.Discard, "", 0), } }) } func (p *ieeefloat32) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } for _, opt := range opts { opt(p) } if p.Condition != "" { p.Condition = strings.TrimSpace(p.Condition) q, err := gojq.Parse(p.Condition) if err != nil { return err } p.code, err = gojq.Compile(q) if err != nil { return err } } // init value names regex p.valueNames, err = compileRegex(p.ValueNames) if err != nil { return err } if p.logger.Writer() != io.Discard { b, err := json.Marshal(p) if err != nil { p.logger.Printf("initialized processor '%s': %+v", processorType, p) return nil } p.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (p *ieeefloat32) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } // condition is set if p.code != nil && p.Condition != "" { ok, err := formatters.CheckCondition(p.code, e) if err != nil { p.logger.Printf("condition check failed: %v", err) } if !ok { continue } } // condition passed => check regexes for k, v := range e.Values { for _, re := range p.valueNames { if re.MatchString(k) { f, err := p.decodeBase64String(v) if err != nil { p.logger.Printf("failed to decode base64 string: %v", err) continue } e.Values[k] = f break } } } } return es } func (p *ieeefloat32) WithLogger(l *log.Logger) { if p.Debug && l != nil { p.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if p.Debug { p.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (p *ieeefloat32) decodeBase64String(e any) (float32, error) { var err error var data []byte switch b64 := e.(type) { default: return 0, fmt.Errorf("invalid type: %T", e) case string: data, err = base64.StdEncoding.DecodeString(b64) if err != nil { return 0, fmt.Errorf("failed to decode base64: %v", err) } case []byte: data = b64 } if len(data) < 4 { return 0, fmt.Errorf("decoded data is less than 4 bytes") } bits := binary.BigEndian.Uint32(data[:4]) floatVal := math.Float32frombits(bits) return floatVal, nil } func compileRegex(expr []string) ([]*regexp.Regexp, error) { res := make([]*regexp.Regexp, 0, len(expr)) for _, reg := range expr { re, err := regexp.Compile(reg) if err != nil { return nil, err } res = append(res, re) } return res, nil } ================================================ FILE: pkg/formatters/event_ieeefloat32/event_ieeefloat32_test.go ================================================ // © 2024 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_ieeefloat32 import ( "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "simple": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{ "^components/component/power-supply/state/output-current$", "^components/component/power-supply/state/input-current$", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{"value": 1}, Tags: map[string]string{ "tag1": "1", }, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{ "components/component/power-supply/state/output-current": "QEYAAA=="}, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{ "components/component/power-supply/state/output-current": float32(3.09375)}, }, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{ "components/component/power-supply/state/output-current": "QEYAAA==", "components/component/power-supply/state/input-current": "QEYAAA==", }, }, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{ "components/component/power-supply/state/output-current": float32(3.09375), "components/component/power-supply/state/input-current": float32(3.09375), }, }, }, }, }, }, } func TestEventIEEEFloat32(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at %s item %d, index %d, expected: %+v", name, i, j, item.output[j]) t.Logf("failed at %s item %d, index %d, got: %+v", name, i, j, outs[j]) t.Fail() } } }) } } else { t.Errorf("event processor %s not found", ts.processorType) } } } ================================================ FILE: pkg/formatters/event_jq/event_jq.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_jq import ( "errors" "io" "log" "os" "strings" "github.com/itchyny/gojq" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-jq" loggingPrefix = "[" + processorType + "] " defaultCondition = "all([true])" defaultExpression = "." ) // jq runs a jq expression on the received event messages type jq struct { formatters.BaseProcessor Condition string `mapstructure:"condition,omitempty"` Expression string `mapstructure:"expression,omitempty"` Debug bool `mapstructure:"debug,omitempty"` cond *gojq.Code expr *gojq.Code logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &jq{ logger: log.New(io.Discard, "", 0), } }) } func (p *jq) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } for _, opt := range opts { opt(p) } p.setDefaults() p.Condition = strings.TrimSpace(p.Condition) q, err := gojq.Parse(p.Condition) if err != nil { return err } p.cond, err = gojq.Compile(q) if err != nil { return err } p.Expression = strings.TrimSpace(p.Expression) q, err = gojq.Parse(p.Expression) if err != nil { return err } p.expr, err = gojq.Compile(q) if err != nil { return err } return nil } func (p *jq) setDefaults() { if p.Condition == "" { p.Condition = defaultCondition } if p.Expression == "" { p.Expression = defaultExpression } } func (p *jq) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { nuMsgs := len(es) inputs := make([]interface{}, 0, nuMsgs) res := make([]*formatters.EventMsg, 0, nuMsgs) for _, e := range es { if e == nil { continue } input := e.ToMap() ok, err := p.evaluateCondition(input) if err != nil { p.logger.Printf("failed to evaluate condition: %v", err) continue } if ok { inputs = append(inputs, input) continue } res = append(res, e) } evs, err := p.applyExpression(inputs) if err != nil { p.logger.Printf("failed to apply jq expression: %v", err) return nil } return append(res, evs...) } func (p *jq) evaluateCondition(input map[string]interface{}) (bool, error) { var res interface{} var err error if p.cond != nil { iter := p.cond.Run(input) var ok bool res, ok = iter.Next() if !ok { // iterator not done, so the final result won't be a boolean return false, nil } if err, ok = res.(error); ok { return false, err } p.logger.Printf("condition jq result: (%T)%v for input %+v", res, res, input) } switch res := res.(type) { case bool: return res, nil default: return false, errors.New("unexpected condition return type") } } func (p *jq) applyExpression(input []interface{}) ([]*formatters.EventMsg, error) { var res []interface{} var err error var evs = make([]*formatters.EventMsg, 0) iter := p.expr.Run(input) for { r, ok := iter.Next() if !ok { p.logger.Printf("iter done? %v | r=%v", ok, r) break } p.logger.Printf("iter result: (%T)%+v\n", r, r) switch r := r.(type) { case error: return nil, err default: p.logger.Printf("adding %+v\n", r) res = append(res, r) } } for _, e := range res { switch es := e.(type) { case []interface{}: for _, ee := range es { switch ee := ee.(type) { case map[string]interface{}: ev, err := formatters.EventFromMap(ee) if err != nil { return nil, err } evs = append(evs, ev) default: p.logger.Printf("unexpected type (%T)%+v", ee, ee) } } case map[string]interface{}: ev, err := formatters.EventFromMap(es) if err != nil { return nil, err } evs = append(evs, ev) default: p.logger.Printf("unexpected type (%T)%+v", e, e) } } return evs, nil } func (p *jq) WithLogger(l *log.Logger) { if p.Debug && l != nil { p.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if p.Debug { p.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } ================================================ FILE: pkg/formatters/event_jq/event_jq_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_jq import ( "log" "os" "reflect" "testing" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "default_values": { processorType: processorType, processor: map[string]interface{}{ "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, }, }, }, "simple_select_expression": { processorType: processorType, processor: map[string]interface{}{ "expression": `.[] | select(.name=="sub1")`, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, { Name: "sub2", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, }, }, }, "double_condition_and_select_expression": { processorType: processorType, processor: map[string]interface{}{ "expression": `.[] | select(.name=="sub1" and .values.counter1 > 90)`, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "counter1": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "counter1": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{}, }, }, }, "complex_select_expression": { processorType: processorType, processor: map[string]interface{}{ "expression": `.[] | select((.name=="sub1" and .values.counter1 > 90) or (.name=="sub2" and .values.counter2 > 80))`, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "counter1": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "counter1": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, }, }, }, "delete_a_single_value": { processorType: processorType, processor: map[string]interface{}{ "expression": `.[] | del(.values.counter1)`, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "counter1": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{}, Tags: map[string]string{"tag1": "1"}, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, }, }, }, "delete_multiple_values": { processorType: processorType, processor: map[string]interface{}{ "expression": `.[] | del(.values.["counter1", "counter2"])`, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "counter1": 91, "counter2": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{}, Tags: map[string]string{"tag1": "1"}, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{}, Tags: map[string]string{"tag1": "1"}, }, }, }, }, }, "add_a_tag": { processorType: processorType, processor: map[string]interface{}{ "expression": `.[] |= (.tags.new = "TAG1")`, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "counter1": 91, "counter2": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "counter1": 91, "counter2": 91, }, Tags: map[string]string{ "tag1": "1", "new": "TAG1", }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{ "tag1": "1", "new": "TAG1", }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, }, Tags: map[string]string{ "tag1": "1", "new": "TAG1", }, }, }, output: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, }, Tags: map[string]string{ "tag1": "1", "new": "TAG1", }, }, }, }, }, }, "add_a_value": { processorType: processorType, processor: map[string]interface{}{ "expression": `.[] |= (.values.new = "Value1")`, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "counter1": 91, "counter2": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "counter1": 91, "counter2": 91, "new": "Value1", }, Tags: map[string]string{ "tag1": "1", }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "value": 1, "new": "Value1", }, Tags: map[string]string{ "tag1": "1", }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, "new": "Value1", }, Tags: map[string]string{ "tag1": "1", }, }, }, output: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, "new": "Value1", }, Tags: map[string]string{ "tag1": "1", }, }, }, }, }, }, "add_a_value_with_condition": { processorType: processorType, processor: map[string]interface{}{ "condition": `.tags | has("tag1")`, "expression": `.[] |= (.values.new = "Value1")`, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "counter1": 91, "counter2": 91, }, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "counter1": 91, "counter2": 91, "new": "Value1", }, Tags: map[string]string{ "tag1": "1", }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{"value": 1}, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "value": 1, "new": "Value1", }, Tags: map[string]string{ "tag1": "1", }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, }, Tags: map[string]string{}, }, }, output: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, }, Tags: map[string]string{}, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter1": 91, }, Tags: map[string]string{ "tag1": "1", }, }, { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, }, Tags: map[string]string{}, }, }, output: []*formatters.EventMsg{ { Name: "sub2", Values: map[string]interface{}{ "counter2": 91, }, Tags: map[string]string{}, }, { Name: "sub2", Values: map[string]interface{}{ "counter1": 91, "new": "Value1", }, Tags: map[string]string{ "tag1": "1", }, }, }, }, }, }, "expression_with_$var": { processorType: processorType, processor: map[string]interface{}{ "condition": `.values | has("a")`, "expression": `.[] | .values.a as $x | .values.b=$x+1`, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "a": 42, }, Tags: map[string]string{"tag1": "1"}, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Values: map[string]interface{}{ "a": 42, "b": 43, }, Tags: map[string]string{ "tag1": "1", }, }, }, }, }, }, } func TestEventJQ(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor, formatters.WithLogger(log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags))) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range item.input { t.Logf("%q item %d, index %d, inputs=%+v", name, i, j, item.input[j]) } // compare lengths first if len(outs) != len(item.output) { t.Logf("expected and gotten outputs are not of the same length") t.Logf("expected: %+v", item.output) t.Logf(" got: %+v", outs) t.Fail() } // for j := range outs { t.Logf("%q item %d, index %d, output=%+v", name, i, j, outs[j]) if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at %s item %d, index %d", name, i, j) t.Logf("expected: %+v", item.output[j]) t.Logf(" got: %+v", outs[j]) t.Fail() } } }) } } else { t.Errorf("event processor %s not found", ts.processorType) } } } ================================================ FILE: pkg/formatters/event_merge/event_merge.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_merge import ( "encoding/json" "io" "log" "os" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-merge" loggingPrefix = "[" + processorType + "] " ) // merge merges a list of event messages into one or multiple messages based on some criteria type merge struct { formatters.BaseProcessor Always bool `mapstructure:"always,omitempty" json:"always,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &merge{ logger: log.New(io.Discard, "", 0), } }) } func (p *merge) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } for _, opt := range opts { opt(p) } if p.logger.Writer() != io.Discard { b, err := json.Marshal(p) if err != nil { p.logger.Printf("initialized processor '%s': %+v", processorType, p) return nil } p.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (p *merge) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { if len(es) == 0 { return nil } if p.Always { for i, e := range es { if e == nil { continue } if i > 0 { mergeEvents(es[0], e) } } return []*formatters.EventMsg{es[0]} } result := make([]*formatters.EventMsg, 0, len(es)) timestamps := make(map[int64]int) for _, e := range es { if e == nil { continue } if idx, ok := timestamps[e.Timestamp]; ok { mergeEvents(result[idx], e) continue } result = append(result, e) timestamps[e.Timestamp] = len(result) - 1 } return result } func (p *merge) WithLogger(l *log.Logger) { if p.Debug && l != nil { p.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if p.Debug { p.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func mergeEvents(e1, e2 *formatters.EventMsg) { if e1.Tags == nil { e1.Tags = make(map[string]string) } if e1.Values == nil { e1.Values = make(map[string]interface{}) } for n, t := range e2.Tags { e1.Tags[n] = t } for n, v := range e2.Values { e1.Values[n] = v } e1.Deletes = append(e1.Deletes, e2.Deletes...) if e2.Timestamp > e1.Timestamp { e1.Timestamp = e2.Timestamp } } ================================================ FILE: pkg/formatters/event_merge/event_merge_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_merge import ( "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "merge_by_timestamps": { processorType: processorType, processor: map[string]interface{}{ "always": false, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Timestamp: 1, Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{"tag1": "1"}, }, { Timestamp: 1, Values: map[string]interface{}{"value2": 2}, Tags: map[string]string{"tag2": "2"}, }, { Timestamp: 1, Values: map[string]interface{}{"value3": 3}, Tags: map[string]string{"tag3": "3"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Values: map[string]interface{}{ "value1": 1, "value2": 2, "value3": 3, }, Tags: map[string]string{ "tag1": "1", "tag2": "2", "tag3": "3", }, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 1, Values: map[string]interface{}{"name": 1}, }, { Timestamp: 2, Values: map[string]interface{}{"name": "foo"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Values: map[string]interface{}{"name": 1}, }, { Timestamp: 2, Values: map[string]interface{}{"name": "foo"}, }, }, }, }, }, "merge_always": { processorType: processorType, processor: map[string]interface{}{ "always": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Timestamp: 1, Values: map[string]interface{}{"value1": 1}, Tags: map[string]string{"tag1": "1"}, }, { Timestamp: 1, Values: map[string]interface{}{"value2": 2}, Tags: map[string]string{"tag2": "2"}, }, { Timestamp: 1, Values: map[string]interface{}{"value3": 3}, Tags: map[string]string{"tag3": "3"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Values: map[string]interface{}{ "value1": 1, "value2": 2, "value3": 3, }, Tags: map[string]string{ "tag1": "1", "tag2": "2", "tag3": "3", }, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 1, Values: map[string]interface{}{ "name": 1, }, }, { Timestamp: 2, Values: map[string]interface{}{ "name2": "foo", }, }, }, output: []*formatters.EventMsg{ { Timestamp: 2, Tags: make(map[string]string), Values: map[string]interface{}{ "name": 1, "name2": "foo", }, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 1, Values: map[string]interface{}{ "name": 1, }, }, { Timestamp: 2, Values: map[string]interface{}{ "name": "foo", }, }, }, output: []*formatters.EventMsg{ { Timestamp: 2, Tags: make(map[string]string), Values: map[string]interface{}{ "name": "foo", }, }, }, }, }, }, } func TestEventMerge(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Errorf("failed at %s item %d, index %d, expected %+v, got: %+v", name, i, j, item.output[j], outs[j]) } } }) } } else { t.Errorf("event processor %s not found", ts.processorType) } } } ================================================ FILE: pkg/formatters/event_override_ts/event_override_ts.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_override_ts import ( "encoding/json" "io" "log" "os" "time" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-override-ts" loggingPrefix = "[" + processorType + "] " ) // overrideTS Overrides the message timestamp with the local time type overrideTS struct { formatters.BaseProcessor Precision string `mapstructure:"precision,omitempty" json:"precision,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &overrideTS{ logger: log.New(io.Discard, "", 0), } }) } func (o *overrideTS) Init(cfg any, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, o) if err != nil { return err } for _, opt := range opts { opt(o) } if o.Precision == "" { o.Precision = "ns" } if o.logger.Writer() != io.Discard { b, err := json.Marshal(o) if err != nil { o.logger.Printf("initialized processor '%s': %+v", processorType, o) return nil } o.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (o *overrideTS) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } now := time.Now() o.logger.Printf("setting timestamp to %d with precision %s", now.UnixNano(), o.Precision) switch o.Precision { case "s": e.Timestamp = now.Unix() case "ms": e.Timestamp = now.UnixNano() / 1000000 case "us": e.Timestamp = now.UnixNano() / 1000 case "ns": e.Timestamp = now.UnixNano() } } return es } func (o *overrideTS) WithLogger(l *log.Logger) { if o.Debug && l != nil { o.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if o.Debug { o.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } ================================================ FILE: pkg/formatters/event_override_ts/event_override_ts_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_override_ts import ( "testing" "time" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var now = time.Now() var testset = map[string]struct { processor map[string]interface{} tests []item }{ "ms": { processor: map[string]interface{}{ "type": processorType, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Timestamp: 0, }, }, output: []*formatters.EventMsg{ { Timestamp: now.UnixNano() / 1000000, }, }, }, }, }, "ns": { processor: map[string]interface{}{ "type": processorType, "precision": "ns", "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Timestamp: -1, }, }, output: []*formatters.EventMsg{ { Timestamp: now.UnixNano(), }, }, }, }, }, "us": { processor: map[string]interface{}{ "type": processorType, "precision": "us", "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Timestamp: -1, }, }, output: []*formatters.EventMsg{ { Timestamp: now.UnixNano() / 1000, }, }, }, }, }, "s": { processor: map[string]interface{}{ "type": processorType, "precision": "s", "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Timestamp: -1, }, }, output: []*formatters.EventMsg{ { Timestamp: now.Unix(), }, }, }, }, }, } func TestEventDateString(t *testing.T) { for name, ts := range testset { t.Log(name) if typ, ok := ts.processor["type"]; ok { t.Log("found type") if pi, ok := formatters.EventProcessors[typ.(string)]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("initialized for test %s: %+v", name, p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if outs[j].Timestamp < item.output[j].Timestamp { t.Logf("failed at event override_ts, item %d, index %d", i, j) t.Logf("expected: %#v", item.output[j]) t.Logf(" got: %#v", outs[j]) t.Fail() } } }) } } } } } ================================================ FILE: pkg/formatters/event_plugin/plugin.go ================================================ package event_plugin import ( "net/rpc" "github.com/hashicorp/go-plugin" "github.com/openconfig/gnmic/pkg/formatters" ) type EventProcessorPlugin struct { Impl formatters.EventProcessor } func (p *EventProcessorPlugin) Server(*plugin.MuxBroker) (interface{}, error) { return &eventProcessorRPCServer{Impl: p.Impl}, nil } func (p *EventProcessorPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { return &EventProcessorRPC{client: c}, nil } ================================================ FILE: pkg/formatters/event_plugin/rpc.go ================================================ package event_plugin import ( "encoding/gob" "log" "net/rpc" "os" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-plugin" loggingPrefix = "[" + processorType + "] " ) type InitArgs struct { Cfg interface{} } type ApplyArgs struct { Events []*formatters.EventMsg } type ApplyResponse struct { Events []*formatters.EventMsg } type ( Actionresponse struct{} InitResponse struct{} Targetresponse struct{} Proccessorresponse struct{} ) type eventProcessorRPCServer struct { Impl formatters.EventProcessor } func init() { gob.Register(map[string]interface{}{}) gob.Register([]interface{}{}) } func (s *eventProcessorRPCServer) Init(args *InitArgs, resp *InitResponse) error { return s.Impl.Init(args.Cfg) } func (s *eventProcessorRPCServer) Apply(args *ApplyArgs, resp *ApplyResponse) error { resp.Events = s.Impl.Apply(args.Events...) return nil } func (s *eventProcessorRPCServer) WithActions(args map[string]map[string]interface{}, resp *Actionresponse) error { s.Impl.WithActions(args) return nil } func (s *eventProcessorRPCServer) WithTargets(args map[string]*types.TargetConfig, resp *Targetresponse) error { s.Impl.WithTargets(args) return nil } func (s *eventProcessorRPCServer) WithProcessors( args map[string]map[string]interface{}, resp *Proccessorresponse, ) error { s.Impl.WithProcessors(args) return nil } func (s *eventProcessorRPCServer) WithLogger() error { return nil } type EventProcessorRPC struct { client *rpc.Client logger *log.Logger } func (g *EventProcessorRPC) Init(cfg interface{}, opts ...formatters.Option) error { for _, opt := range opts { opt(g) } err := g.client.Call("Plugin.Init", &InitArgs{Cfg: cfg}, &InitResponse{}) if err != nil { return err } return nil } func (g *EventProcessorRPC) Apply(event ...*formatters.EventMsg) []*formatters.EventMsg { var resp ApplyResponse err := g.client.Call("Plugin.Apply", &ApplyArgs{Events: event}, &resp) if err != nil { g.logger.Print("RPC client call error: ", err) return nil } return resp.Events } func (g *EventProcessorRPC) WithActions(act map[string]map[string]interface{}) { err := g.client.Call("Plugin.WithActions", act, &Actionresponse{}) if err != nil { g.logger.Print("RPC client call error: ", err) } } func (g *EventProcessorRPC) WithTargets(tcs map[string]*types.TargetConfig) { err := g.client.Call("Plugin.WithTargets", tcs, &Targetresponse{}) if err != nil { g.logger.Print("RPC client call error: ", err) } } func (g *EventProcessorRPC) WithProcessors(procs map[string]map[string]any) { err := g.client.Call("Plugin.WithProcessors", procs, &Proccessorresponse{}) if err != nil { g.logger.Print("RPC client call error: ", err) } } func (g *EventProcessorRPC) WithLogger(l *log.Logger) { if l == nil { g.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) return } g.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } ================================================ FILE: pkg/formatters/event_rate_limit/event_rate_limit.go ================================================ package event_rate_limit import ( "crypto/sha256" "encoding/hex" "encoding/json" "fmt" "io" "log" "os" "sort" "time" lru "github.com/hashicorp/golang-lru/v2" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-rate-limit" loggingPrefix = "[" + processorType + "] " defaultCacheSize = 1000 oneSecond int64 = int64(time.Second) ) var ( eqChar = []byte("=") lfChar = []byte("\n") ) // rateLimit rate-limits the message to the given rate. type rateLimit struct { formatters.BaseProcessor PerSecondLimit float64 `mapstructure:"per-second,omitempty" json:"per-second,omitempty"` CacheSize int `mapstructure:"cache-size,omitempty" json:"cache-size,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` // eventIndex is an lru cache used to compare the events hash with known value. // LRU cache seems like a good choice because we expect the rate-limiter to be // most useful in burst scenarios. // We need some form of control over the size of the cache to contain RAM usage // so LRU is good in that respect also. eventIndex *lru.Cache[string, int64] logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &rateLimit{ logger: log.New(io.Discard, "", 0), } }) } func (o *rateLimit) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, o) if err != nil { return err } for _, opt := range opts { opt(o) } if o.CacheSize <= 0 { o.logger.Printf("using default value for lru size %d", defaultCacheSize) o.CacheSize = defaultCacheSize } if o.PerSecondLimit <= 0 { return fmt.Errorf("provided limit is %f, must be greater than 0", o.PerSecondLimit) } if o.logger.Writer() != io.Discard { b, err := json.Marshal(o) if err != nil { o.logger.Printf("initialized processor '%s': %+v", processorType, o) return nil } o.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } o.eventIndex, err = lru.New[string, int64](o.CacheSize) if err != nil { return fmt.Errorf("failed to initialize cache: %w", err) } return nil } func (o *rateLimit) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { validEs := make([]*formatters.EventMsg, 0, len(es)) for _, e := range es { if e == nil { continue } h := hashEvent(e) ts, has := o.eventIndex.Get(h) // we check that we have the event hash in the map, if not, it's the first time we see the event if val := float64(e.Timestamp-ts) * o.PerSecondLimit; has && e.Timestamp != ts && int64(val) < oneSecond { // reject event o.logger.Printf("dropping event val %.2f lower than configured rate", val) continue } // retain the last event that passed through o.eventIndex.Add(h, e.Timestamp) validEs = append(validEs, e) } return validEs } func hashEvent(e *formatters.EventMsg) string { h := sha256.New() tagKeys := make([]string, len(e.Tags)) i := 0 for tagKey := range e.Tags { tagKeys[i] = tagKey i++ } sort.Strings(tagKeys) for _, tagKey := range tagKeys { h.Write([]byte(tagKey)) h.Write(eqChar) h.Write([]byte(e.Tags[tagKey])) h.Write(lfChar) } return hex.EncodeToString(h.Sum(nil)) } func (o *rateLimit) WithLogger(l *log.Logger) { if o.Debug && l != nil { o.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if o.Debug { o.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } ================================================ FILE: pkg/formatters/event_rate_limit/event_rate_limit_test.go ================================================ package event_rate_limit import ( "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processor map[string]interface{} tests []item }{ "1pps-notags-pass": { processor: map[string]interface{}{ "type": processorType, "debug": true, "per-second": 1.0, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Timestamp: 0, }, { Timestamp: 1e9 + 1, }, }, output: []*formatters.EventMsg{ { Timestamp: 0, }, { Timestamp: 1e9 + 1, }, }, }, }, }, "1pps-tags-pass": { processor: map[string]interface{}{ "type": processorType, "per-second": 1.0, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Timestamp: 0, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, { Timestamp: 1, }, { Timestamp: 1 + 1e9, }, { Timestamp: 1e9 + 1, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, }, output: []*formatters.EventMsg{ { Timestamp: 0, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, { Timestamp: 1, }, { Timestamp: 1 + 1e9, }, { Timestamp: 1e9 + 1, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, }, }, }, }, "1pps-notags-drop": { processor: map[string]interface{}{ "type": processorType, "debug": true, "per-second": 1.0, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Timestamp: 0, }, { Timestamp: 1e9 - 1, }, }, output: []*formatters.EventMsg{ { Timestamp: 0, }, }, }, }, }, "1pps-tags-drop": { processor: map[string]interface{}{ "type": processorType, "per-second": 1.0, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Timestamp: 0, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, { Timestamp: 1, }, { Timestamp: 1e9 - 1, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, }, output: []*formatters.EventMsg{ { Timestamp: 0, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, { Timestamp: 1, }, }, }, }, }, "100pps-tags-pass": { processor: map[string]interface{}{ "type": processorType, "per-second": 100.0, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Timestamp: 0, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, { Timestamp: 1, }, { Timestamp: 1e9 / 100, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, }, output: []*formatters.EventMsg{ { Timestamp: 0, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, { Timestamp: 1, }, { Timestamp: 1e9 / 100, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, }, }, }, }, "100pps-tags-drop": { processor: map[string]interface{}{ "type": processorType, "per-second": 100.0, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Timestamp: 0, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, { Timestamp: 1, }, { Timestamp: 1e9/100 - 1, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, }, output: []*formatters.EventMsg{ { Timestamp: 0, Tags: map[string]string{ "a": "val-x", "b": "val-y", }, }, { Timestamp: 1, }, }, }, }, }, "same-ts-pass": { processor: map[string]interface{}{ "type": processorType, "per-second": 100.0, "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{}, }, { input: []*formatters.EventMsg{ { Timestamp: 0, }, { Timestamp: 0, }, }, output: []*formatters.EventMsg{ { Timestamp: 0, }, { Timestamp: 0, }, }, }, }, }, } func TestRateLimit(t *testing.T) { for name, ts := range testset { t.Log(name) if typ, ok := ts.processor["type"]; ok { t.Log("found type") if pi, ok := formatters.EventProcessors[typ.(string)]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor, formatters.WithLogger(nil)) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("initialized for test %s: %+v", name, p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) if len(outs) != len(item.output) { t.Logf("failed at event rate_limit, item %d", i) t.Logf("different number of events between output=%d and wanted=%d", len(outs), len(item.output)) t.Fail() } }) } } } } } ================================================ FILE: pkg/formatters/event_starlark/dict.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_starlark import ( "encoding/json" "errors" "fmt" "sort" "go.starlark.net/starlark" ) type isDict interface { starlark.HasSetKey starlark.IterableMapping Clear() error Delete(starlark.Value) (starlark.Value, bool, error) } type dict[K comparable, V any] struct { name string m map[K]V iterCount int frozen bool } func newDict[K comparable, V any](name string, m map[K]V) *dict[K, V] { if m == nil { m = make(map[K]V) } return &dict[K, V]{name: name, m: m} } type builtinMethod func(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) // https://github.com/google/starlark-go/blob/243c74974e97462c5df21338e182470391748b04/starlark/library.go#L147 func builtinAttr(recv starlark.Value, name string, methods map[string]builtinMethod) (starlark.Value, error) { method := methods[name] if method == nil { return starlark.None, fmt.Errorf("no such method %q", name) } // Allocate a closure over 'method'. impl := func(thread *starlark.Thread, b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { return method(b, args, kwargs) } return starlark.NewBuiltin(name, impl).BindReceiver(recv), nil } func builtinAttrNames(methods map[string]builtinMethod) []string { names := make([]string, 0, len(methods)) for name := range methods { names = append(names, name) } sort.Strings(names) return names } // dict implements starlark.Value func (d *dict[K, V]) String() string { b, _ := json.Marshal(d.m) return string(b) } // dict implements starlark.Value func (d *dict[K, V]) Type() string { return d.name } // dict implements starlark.Value func (d *dict[K, V]) Freeze() { d.frozen = true } // dict implements starlark.Value func (d *dict[K, V]) Truth() starlark.Bool { return len(d.m) != 0 } // dict implements starlark.Value func (d *dict[K, V]) Hash() (uint32, error) { return 0, errors.New("dict is not hashable") } // AttrNames implements the starlark.HasAttrs interface. func (d *dict[K, V]) AttrNames() []string { return builtinAttrNames(dictMethods) } // Attr implements the starlark.HasAttrs interface. func (d *dict[K, V]) Attr(name string) (starlark.Value, error) { return builtinAttr(d, name, dictMethods) } var dictMethods = map[string]builtinMethod{ "clear": dictClear, "get": dictGet, "items": dictItems, "keys": dictKeys, "pop": dictPop, "setdefault": dictSetDefault, "update": dictUpdate, "values": dictValues, } // Get implements the starlark.Mapping interface. func (d *dict[K, V]) Get(key starlark.Value) (v starlark.Value, found bool, err error) { k, err := toGoVal(key) if err != nil { return nil, false, err } if kk, ok := k.(K); ok { gv, found := d.m[kk] if !found { return starlark.None, false, nil } vv, err := toStarlarkValue(gv) return vv, true, err } return starlark.None, false, errors.New("key must be of type 'string'") } // SetKey implements the starlark.HasSetKey interface to support map update // using x[k]=v syntax, like a dictionary. func (d *dict[K, V]) SetKey(k, v starlark.Value) error { if d.iterCount > 0 { return fmt.Errorf("cannot insert during iteration") } kk, err := toGoVal(k) if err != nil { return err } key, ok := kk.(K) if !ok { return fmt.Errorf("unexpected key type: %T", kk) } vv, err := toGoVal(v) if err != nil { return err } if val, ok := vv.(V); ok { d.m[key] = val return nil } return fmt.Errorf("unexpected value type: %T", vv) } // Items implements the starlark.IterableMapping interface. func (d *dict[K, V]) Items() []starlark.Tuple { items := make([]starlark.Tuple, 0, len(d.m)) for k, v := range d.m { value, err := toStarlarkValue(v) if err != nil { continue } kk, err := toStarlarkValue(k) if err != nil { continue } pair := starlark.Tuple{kk, value} items = append(items, pair) } return items } func (d *dict[K, V]) Clear() error { if d.iterCount > 0 { return fmt.Errorf("cannot clear dict during iteration") } for k := range d.m { delete(d.m, k) } return nil } func (d *dict[K, V]) Delete(k starlark.Value) (v starlark.Value, found bool, err error) { if d.iterCount > 0 { return nil, false, fmt.Errorf("cannot delete a key during iteration") } gk, err := toGoVal(k) if err != nil { return nil, false, err } gkk, ok := gk.(K) if !ok { return nil, false, fmt.Errorf("unexpected key type: %T", gk) } value, ok := d.m[gkk] if ok { delete(d.m, gkk) v, err := toStarlarkValue(value) return v, ok, err } return starlark.None, false, nil } // Iterate implements the starlark.Iterator interface. func (d *dict[K, V]) Iterate() starlark.Iterator { d.iterCount++ tags := make([]*tag[K, V], 0, len(d.m)) for k, v := range d.m { tags = append(tags, &tag[K, V]{key: k, value: v}) } return &dictIterator[K, V]{ dict: &dict[K, V]{m: d.m}, tags: tags, } } type tag[K, V any] struct { key K value V } type dictIterator[K comparable, V any] struct { *dict[K, V] tags []*tag[K, V] } // Next implements the starlark.Iterator interface. func (i *dictIterator[K, V]) Next(p *starlark.Value) bool { if len(i.tags) == 0 { return false } tag := i.tags[0] i.tags = i.tags[1:] sk, err := toStarlarkValue(tag.key) if err != nil { return false } *p = sk return true } // Done implements the starlark.Iterator interface. func (i *dictIterator[K, V]) Done() { i.iterCount-- } // --- dictionary methods --- // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·clear func dictClear(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } return starlark.None, b.Receiver().(isDict).Clear() } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·pop func dictPop(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var k, d starlark.Value if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &k, &d); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } v, found, err := b.Receiver().(isDict).Delete(k) if err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } if found { return v, nil } if d != nil { return d, nil } return starlark.None, fmt.Errorf("%s: missing key", b.Name()) } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·get func dictGet(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var key, d starlark.Value if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &d); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } v, ok, err := b.Receiver().(isDict).Get(key) if err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } if ok { return v, nil } if d != nil { return d, nil } return starlark.None, nil } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·setdefault func dictSetDefault(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var key starlark.Value var d = starlark.None if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 1, &key, &d); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } recv := b.Receiver().(isDict) v, found, err := recv.Get(key) if err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } if !found { v = d if err := recv.SetKey(key, d); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } } return v, nil } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update func dictUpdate(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { // Unpack the arguments if len(args) > 1 { return nil, fmt.Errorf("update: got %d arguments, want at most 1", len(args)) } // Get the target recv := b.Receiver().(isDict) if len(args) == 1 { switch updates := args[0].(type) { case starlark.IterableMapping: // Iterate over dict's key/value pairs, not just keys. for _, item := range updates.Items() { if err := recv.SetKey(item[0], item[1]); err != nil { return nil, err // dict is frozen } } case starlark.Iterable: // all other sequences iter := starlark.Iterate(updates) if iter == nil { return nil, fmt.Errorf("got %s, want iterable", updates.Type()) } defer iter.Done() var pair starlark.Value for i := 0; iter.Next(&pair); i++ { iter2 := starlark.Iterate(pair) if iter2 == nil { return nil, fmt.Errorf("dictionary update sequence element #%d is not iterable (%s)", i, pair.Type()) } defer iter2.Done() length := starlark.Len(pair) if length < 0 { return nil, fmt.Errorf("dictionary update sequence element #%d has unknown length (%s)", i, pair.Type()) } if length != 2 { return nil, fmt.Errorf("dictionary update sequence element #%d has length %d, want 2", i, length) } var k, v starlark.Value iter2.Next(&k) iter2.Next(&v) err := recv.SetKey(k, v) if err != nil { return nil, err } } default: return nil, errors.New("cannot update dict: update values are not iterable") } } // Then add the kwargs. before := starlark.Len(recv) for _, pair := range kwargs { if err := recv.SetKey(pair[0], pair[1]); err != nil { return nil, err // dict is frozen } } // In the common case, each kwarg will add another dict entry. // If that's not so, check whether it is because there was a duplicate kwarg. if starlark.Len(recv) < before+len(kwargs) { keys := make(map[starlark.String]bool, len(kwargs)) for _, kv := range kwargs { k := kv[0].(starlark.String) if keys[k] { return nil, fmt.Errorf("duplicate keyword arg: %v", k) } keys[k] = true } } return starlark.None, nil } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·items func dictItems(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } items := b.Receiver().(isDict).Items() res := make([]starlark.Value, len(items)) for i, item := range items { res[i] = item // convert [2]starlark.Value to starlark.Value } return starlark.NewList(res), nil } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·keys func dictKeys(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } items := b.Receiver().(isDict).Items() res := make([]starlark.Value, len(items)) for i, item := range items { res[i] = item[0] } return starlark.NewList(res), nil } // https://github.com/google/starlark-go/blob/master/doc/spec.md#dict·update func dictValues(b *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { if err := starlark.UnpackPositionalArgs(b.Name(), args, kwargs, 0); err != nil { return starlark.None, fmt.Errorf("%s: %v", b.Name(), err) } items := b.Receiver().(isDict).Items() res := make([]starlark.Value, len(items)) for i, item := range items { res[i] = item[1] } return starlark.NewList(res), nil } ================================================ FILE: pkg/formatters/event_starlark/event.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_starlark import ( "encoding/json" "errors" "fmt" "reflect" "go.starlark.net/starlark" "github.com/openconfig/gnmic/pkg/formatters" ) type event struct { ev *formatters.EventMsg frozen bool } func fromEvent(ev *formatters.EventMsg) *event { return &event{ ev: ev, } } func toEvent(sev *event) *formatters.EventMsg { if sev == nil { return nil } return sev.ev } // *event implements starlark.Value func (s *event) String() string { b, _ := json.Marshal(s.ev) return string(b) } // *event implements starlark.Value func (s *event) Type() string { return "Event" } // *event implements starlark.Value func (s *event) Freeze() { s.frozen = true } // *event implements starlark.Value func (s *event) Truth() starlark.Bool { return starlark.True } // *event implements starlark.Value func (s *event) Hash() (uint32, error) { return 0, errors.New("not hashable") } // *event implements the starlark.HasAttrs interface. func (s *event) AttrNames() []string { return []string{"name", "timestamp", "tags", "values", "deletes"} } // *event implements the starlark.HasAttrs interface. func (s *event) Attr(name string) (starlark.Value, error) { switch name { case "name": return starlark.String(s.ev.Name), nil case "timestamp": return starlark.MakeInt64(s.ev.Timestamp), nil case "tags": return s.Tags(), nil case "values": return s.Values(), nil case "deletes": return s.Deletes(), nil default: // Returning nil, nil indicates "no such field or method" return nil, nil } } // *event implements the starlark.HasSetField interface. func (s *event) SetField(name string, value starlark.Value) error { if s.frozen { return fmt.Errorf("cannot modify frozen event struct") } switch name { case "name": return s.SetName(value) case "timestamp": return s.SetTimestamp(value) case "tags": return s.SetTags(value) case "values": return s.SetValues(value) case "deletes": return s.SetDeletes(value) default: return starlark.NoSuchAttrError( fmt.Sprintf("cannot assign to field %q", name)) } } func (s *event) SetName(name starlark.Value) error { if name, ok := name.(starlark.String); ok { s.ev.Name = name.GoString() return nil } return fmt.Errorf("name not a string, %T", name) } func (s *event) Tags() starlark.Value { return newDict("Tags", s.ev.Tags) } func (s *event) Values() starlark.Value { return newDict("Values", s.ev.Values) } func (s *event) Deletes() starlark.Value { if len(s.ev.Deletes) == 0 { return &starlark.List{} } result := &starlark.List{} for _, s := range s.ev.Deletes { v, _ := toStarlarkValue(s) result.Append(v) } return result } func (s *event) Timestamp() starlark.Int { return starlark.MakeInt64(s.ev.Timestamp) } func (s *event) SetTimestamp(value starlark.Value) error { switch v := value.(type) { case starlark.Int: ns, ok := v.Int64() if !ok { return errors.New("type error: expected int64 timestamp") } s.ev.Timestamp = ns return nil default: return fmt.Errorf("type error: got %T", v) } } func (s *event) SetTags(value starlark.Value) error { tags, err := toTags(value) if err != nil { return err } s.ev.Tags = tags return nil } func (s *event) SetValues(value starlark.Value) error { vals, err := toValues(value) if err != nil { return err } s.ev.Values = vals return nil } func (s *event) SetDeletes(value starlark.Value) error { dels, err := toDeletes(value) if err != nil { return err } s.ev.Deletes = dels return nil } func newEvent(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var name starlark.String var ts starlark.Int var tags starlark.Value var values starlark.Value var deletes starlark.Value err := starlark.UnpackArgs("Event", args, kwargs, "name", &name, "timestamp?", &ts, "tags?", &tags, "values?", &values, "deletes?", &deletes, ) if err != nil { return nil, err } vs, err := toValues(values) if err != nil { return nil, err } tgs, err := toTags(tags) if err != nil { return nil, err } dels, err := toDeletes(deletes) if err != nil { return nil, err } timestamp, ok := ts.Int64() if !ok { return nil, fmt.Errorf("failed to represent %v as int64", ts) } ev := &formatters.EventMsg{ Name: string(name), Timestamp: timestamp, Tags: tgs, Values: vs, Deletes: dels, } return &event{ ev: ev, }, nil } func toValues(value starlark.Value) (map[string]any, error) { if value == nil { return make(map[string]any), nil } if value, ok := value.(starlark.IterableMapping); ok { result := make(map[string]any) var err error for _, item := range value.Items() { k, ok := item[0].(starlark.String) if !ok { return nil, fmt.Errorf("failed to represent value name %v as string", item[0]) } result[k.GoString()], err = toGoVal(item[1]) if err != nil { return nil, err } } return result, nil } return nil, errors.New("unexpected iterable type in values field") } func toTags(value starlark.Value) (map[string]string, error) { if value == nil { return make(map[string]string), nil } if value, ok := value.(starlark.IterableMapping); ok { result := make(map[string]string) for _, item := range value.Items() { k, ok := item[0].(starlark.String) if !ok { return nil, fmt.Errorf("failed to represent value name %v as string", item[0]) } v, ok := item[1].(starlark.String) if !ok { return nil, fmt.Errorf("failed to represent value name %v as string", item[1]) } result[k.GoString()] = v.GoString() } return result, nil } return nil, errors.New("unexpected iterable type in tags field") } func toDeletes(value starlark.Value) ([]string, error) { if value == nil { return []string{}, nil } if value, ok := value.(starlark.Sequence); ok { iter := value.Iterate() defer iter.Done() result := make([]string, 0, value.Len()) for { var item starlark.Value if iter.Next(&item) { if s, ok := item.(starlark.String); ok { result = append(result, s.GoString()) continue } return nil, errors.New("sequence item is not a 'string") } break } return result, nil } return nil, errors.New("unexpected iterable type in deletes field") } // toStarlarkValue converts a value to a starlark.Value. func toStarlarkValue(value any) (starlark.Value, error) { v := reflect.ValueOf(value) switch v.Kind() { case reflect.Slice: length := v.Len() array := make([]starlark.Value, 0, length) for i := 0; i < length; i++ { sVal, err := toStarlarkValue(v.Index(i).Interface()) if err != nil { return starlark.None, err } array = append(array, sVal) } return starlark.NewList(array), nil case reflect.Map: dict := starlark.NewDict(v.Len()) iter := v.MapRange() for iter.Next() { sKey, err := toStarlarkValue(iter.Key().Interface()) if err != nil { return starlark.None, err } sValue, err := toStarlarkValue(iter.Value().Interface()) if err != nil { return starlark.None, err } dict.SetKey(sKey, sValue) } return dict, nil case reflect.Float32, reflect.Float64: return starlark.Float(v.Float()), nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return starlark.MakeInt64(v.Int()), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return starlark.MakeUint64(v.Uint()), nil case reflect.String: return starlark.String(v.String()), nil case reflect.Bool: return starlark.Bool(v.Bool()), nil } return starlark.None, errors.New("invalid type") } func toGoVal(value starlark.Value) (any, error) { switch v := value.(type) { case starlark.Float: return float64(v), nil case starlark.Int: n, ok := v.Int64() if !ok { return nil, errors.New("cannot represent integer as int64") } return n, nil case starlark.String: return string(v), nil case starlark.Bool: return bool(v), nil } return nil, errors.New("invalid starlark type") } func copyEvent(_ *starlark.Thread, _ *starlark.Builtin, args starlark.Tuple, kwargs []starlark.Tuple) (starlark.Value, error) { var sm *event if err := starlark.UnpackPositionalArgs("copy_event", args, kwargs, 1, &sm); err != nil { return nil, err } tags := make(map[string]string) values := make(map[string]any) for k, v := range sm.ev.Tags { tags[k] = v } for k, v := range sm.ev.Values { values[k] = v } dup := &event{ ev: &formatters.EventMsg{ Name: sm.ev.Name, Timestamp: sm.ev.Timestamp, Tags: tags, Values: values, }, } return dup, nil } ================================================ FILE: pkg/formatters/event_starlark/event_starlark.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_starlark import ( "encoding/json" "errors" "fmt" "io" "log" "os" "sync" "go.starlark.net/lib/math" "go.starlark.net/lib/time" "go.starlark.net/starlark" "go.starlark.net/syntax" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-starlark" loggingPrefix = "[" + processorType + "] " ) // starlarkProc runs a starlark script on the received events type starlarkProc struct { formatters.BaseProcessor Script string `mapstructure:"script,omitempty" json:"script,omitempty"` Source string `mapstructure:"source,omitempty" json:"source,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` // this mutex ensures batches of events are processed in sequence m sync.Mutex thread *starlark.Thread applyFn starlark.Value logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &starlarkProc{ logger: log.New(io.Discard, "", 0), } }) } func (p *starlarkProc) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } for _, opt := range opts { opt(p) } err = p.validate() if err != nil { return err } p.thread = &starlark.Thread{ Print: func(_ *starlark.Thread, msg string) { p.logger.Printf("print(): %v", msg) }, Load: func(_ *starlark.Thread, module string) (starlark.StringDict, error) { return loadModule(module) }, } // sourceProgram builtins := starlark.StringDict{} builtins["Event"] = starlark.NewBuiltin("Event", newEvent) builtins["copy_event"] = starlark.NewBuiltin("copy_event", copyEvent) prog, err := p.sourceProgram(builtins) if err != nil { return err } globals, err := prog.Init(p.thread, builtins) if err != nil { return err } if !globals.Has("apply") { return errors.New("missing global function apply") } p.applyFn = globals["apply"] globals["cache"] = starlark.NewDict(0) globals.Freeze() if p.logger.Writer() != io.Discard { b, err := json.Marshal(p) if err != nil { p.logger.Printf("initialized processor '%s': %+v", processorType, p) return nil } p.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (p *starlarkProc) validate() error { if p.Source == "" && p.Script == "" { return errors.New("one of 'script' or 'source' must be set") } if p.Source != "" && p.Script != "" { return errors.New("only one of 'script' or 'source' can be set") } return nil } func (p *starlarkProc) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { p.m.Lock() defer p.m.Unlock() numMsgs := len(es) if numMsgs == 0 { return es } sevs := make([]starlark.Value, 0, numMsgs) for _, ev := range es { if ev.Tags == nil { ev.Tags = make(map[string]string) } if ev.Values == nil { ev.Values = make(map[string]any) } if ev.Deletes == nil { ev.Deletes = make([]string, 0) } sevs = append(sevs, fromEvent(ev)) } if len(sevs) == 0 { return es } if p.Debug { p.logger.Printf("events input: %v", sevs) } r, err := starlark.Call(p.thread, p.applyFn, sevs, nil) if err != nil { if p.Debug { p.logger.Printf("failed to run script with input %v: %v", sevs, err) } else { p.logger.Printf("failed to run script: %v", err) } return es } if p.Debug { p.logger.Printf("script output: %+v", r) } // r must implement .Iterate() and .Len() if r, ok := r.(starlark.Sequence); ok { res := make([]*formatters.EventMsg, 0, r.Len()) iter := r.Iterate() defer r.Iterate().Done() for { var v starlark.Value ok := iter.Next(&v) if !ok { break } switch v := v.(type) { case *event: res = append(res, toEvent(v)) default: p.logger.Printf("unexpected return type: %T", v) continue } } if p.Debug { p.logger.Printf("resulting events: %v", res) } return res } p.logger.Printf("unexpected script output format, expecting a Sequence of Event, got %T", r) return es } func (p *starlarkProc) WithLogger(l *log.Logger) { if l != nil { p.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if p.Debug { p.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (p *starlarkProc) sourceProgram(builtins starlark.StringDict) (*starlark.Program, error) { var src any if p.Source != "" { src = p.Source } options := &syntax.FileOptions{ Set: true, GlobalReassign: true, Recursion: true, } _, program, err := starlark.SourceProgramOptions(options, p.Script, src, builtins.Has) return program, err } func loadModule(module string) (starlark.StringDict, error) { switch module { case "math.star": return starlark.StringDict{ "math": math.Module, }, nil case "time.star": return starlark.StringDict{ "time": time.Module, }, nil default: return nil, fmt.Errorf("module %q unknown", module) } } ================================================ FILE: pkg/formatters/event_starlark/event_starlark_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_starlark import ( "log" "os" "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) func Test_starlarkProc_Apply(t *testing.T) { type fields struct { cfg map[string]interface{} } type args struct { es []*formatters.EventMsg } tests := []struct { name string fields fields args args want []*formatters.EventMsg }{ { name: "print", fields: fields{ cfg: map[string]interface{}{ "debug": true, "source": ` def apply(*events): for e in events: print(e) return events `, }, }, args: args{ es: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, }, }, want: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, }, }, { name: "add_tag", fields: fields{ cfg: map[string]interface{}{ "debug": true, "source": ` def apply(*events): for e in events: e.tags["new_tag"] = "new_tag" return events `, }, }, args: args{ es: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, }, }, want: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", "new_tag": "new_tag", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", "new_tag": "new_tag", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, }, }, { name: "delete_tag", fields: fields{ cfg: map[string]interface{}{ "debug": true, "source": ` def apply(*events): for e in events: e.tags.pop("tag1") return events `, }, }, args: args{ es: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, }, }, want: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, }, }, { name: "add_value", fields: fields{ cfg: map[string]interface{}{ "debug": true, "source": ` def apply(*events): for e in events: e.values["new_val"] = "val" return events `, }, }, args: args{ es: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, }, }, want: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", "new_val": "val", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", "new_val": "val", }, }, }, }, { name: "delete_val", fields: fields{ cfg: map[string]interface{}{ "debug": true, "source": ` def apply(*events): for e in events: e.values.pop("val1") return events `, }, }, args: args{ es: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val2": "foo", }, }, }, }, want: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val2": "foo", }, }, }, }, { name: "insert_event1", fields: fields{ cfg: map[string]interface{}{ "debug": true, "source": ` def apply(*events): ne = Event("new_event") ne.tags["tag1"] = "tag1" evs = list(events) evs.append(ne) return evs `, }, }, args: args{ es: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, }, }, want: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "new_event", Tags: map[string]string{ "tag1": "tag1", }, Values: map[string]interface{}{}, }, }, }, { name: "insert_event2", fields: fields{ cfg: map[string]interface{}{ "debug": true, "source": ` def apply(*events): ne = Event("new_event", 42, {"a": "b"}, {"foo": "bar"}) print(ne) evs = list(events) evs.append(ne) return evs`, }, }, args: args{ es: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, }, }, want: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "tag1": "v1", "tag2": "v2", }, Values: map[string]interface{}{ "val1": 42, "val2": "foo", }, }, { Name: "new_event", Timestamp: 42, Tags: map[string]string{ "a": "b", }, Values: map[string]interface{}{ "foo": "bar", }, }, }, }, { name: "use_cache", fields: fields{ cfg: map[string]interface{}{ "debug": true, "source": ` cache = {} def apply(*events): evs = [] for e in events: target_if = e.tags["target"] + "_" + e.tags["interface_name"] if e.values.get("description"): cache[target_if] = e.values["description"] for e in events: if e.values.get("description"): continue e.tags["description"] = cache[target_if] evs.append(e) return evs `, }, }, args: args{ es: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "target": "router1", "interface_name": "if1", }, Values: map[string]interface{}{ "description": "foo", }, }, { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "target": "router1", "interface_name": "if1", }, Values: map[string]interface{}{ "val1": 42, }, }, }, }, want: []*formatters.EventMsg{ { Name: "ev2", Timestamp: 42, Tags: map[string]string{ "target": "router1", "interface_name": "if1", "description": "foo", }, Values: map[string]interface{}{ "val1": 42, }, }, }, }, { name: "set_tags", fields: fields{ cfg: map[string]interface{}{ "debug": true, "source": ` def apply(*events): for e in events: e.tags = {"t1": "v1"} return events `, }, }, args: args{ es: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "target": "router1", "interface_name": "if1", }, Values: map[string]interface{}{ "val1": 42, }, }, }, }, want: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "t1": "v1", }, Values: map[string]interface{}{ "val1": 42, }, }, }, }, { name: "set_values", fields: fields{ cfg: map[string]interface{}{ "debug": true, "source": ` def apply(*events): for e in events: e.values = {"t1": "v1"} return events `, }, }, args: args{ es: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "target": "router1", "interface_name": "if1", }, Values: map[string]interface{}{ "val1": 42, }, }, }, }, want: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "target": "router1", "interface_name": "if1", }, Values: map[string]interface{}{ "t1": "v1", }, }, }, }, { name: "set_deletes", fields: fields{ cfg: map[string]interface{}{ "debug": true, "source": ` cache = {} def apply(*events): for e in events: e.deletes = ["path1", "path2"] return events `, }, }, args: args{ es: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "target": "router1", "interface_name": "if1", }, Values: map[string]interface{}{ "val1": 42, }, }, }, }, want: []*formatters.EventMsg{ { Name: "ev1", Timestamp: 42, Tags: map[string]string{ "target": "router1", "interface_name": "if1", }, Values: map[string]interface{}{ "val1": 42, }, Deletes: []string{"path1", "path2"}, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &starlarkProc{} err := p.Init(tt.fields.cfg, formatters.WithLogger(log.New(os.Stderr, "test", log.Default().Flags()))) if err != nil { t.Errorf("%q failed to init processor: %v", tt.name, err) t.Fail() } got := p.Apply(tt.args.es...) t.Logf("got : %v", got) t.Logf("want: %v", tt.want) // compare lengths first if len(got) != len(tt.want) { t.Logf("expected and gotten outputs are not of the same length") t.Logf("expected: %+v", tt.want) t.Logf(" got: %+v", got) t.Fail() } // for j := range got { t.Logf("%q index %d, output=%+v", tt.name, j, got[j]) if !reflect.DeepEqual(got[j].Values, tt.want[j].Values) { t.Logf("failed at %s index %d, values are different", tt.name, j) t.Logf("expected: %+v", tt.want[j]) t.Logf(" got: %+v", got[j]) t.Fail() } if !reflect.DeepEqual(got[j].Tags, tt.want[j].Tags) { t.Logf("failed at %s index %d, tags are different", tt.name, j) t.Logf("expected: %+v", tt.want[j]) t.Logf(" got: %+v", got[j]) t.Fail() } if !reflect.DeepEqual(got[j].Name, tt.want[j].Name) { t.Logf("failed at %s index %d, names are different", tt.name, j) t.Logf("expected: %+v", tt.want[j]) t.Logf(" got: %+v", got[j]) t.Fail() } if !reflect.DeepEqual(got[j].Timestamp, tt.want[j].Timestamp) { t.Logf("failed at %s index %d, timestamps are different", tt.name, j) t.Logf("expected: %+v", tt.want[j]) t.Logf(" got: %+v", got[j]) t.Fail() } } }) } } ================================================ FILE: pkg/formatters/event_strings/event_strings.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_strings import ( "encoding/json" "io" "log" "os" "path/filepath" "regexp" "strings" "golang.org/x/text/cases" "golang.org/x/text/language" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-strings" loggingPrefix = "[" + processorType + "] " nameField = "name" valueField = "value" ) // stringsp provides some of Golang's strings functions to transform: tags, tag names, values and value names type stringsp struct { formatters.BaseProcessor Tags []string `mapstructure:"tags,omitempty" json:"tags,omitempty"` Values []string `mapstructure:"values,omitempty" json:"values,omitempty"` TagNames []string `mapstructure:"tag-names,omitempty" json:"tag-names,omitempty"` ValueNames []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` Transforms []map[string]*transform `mapstructure:"transforms,omitempty" json:"transforms,omitempty"` tags []*regexp.Regexp values []*regexp.Regexp tagKeys []*regexp.Regexp valueKeys []*regexp.Regexp logger *log.Logger } type transform struct { op string // apply the transformation on name or value ApplyOn string `mapstructure:"apply-on,omitempty" json:"apply-on,omitempty"` // Keep the old value or not if the name changed Keep bool `mapstructure:"keep,omitempty" json:"keep,omitempty"` // string to be replaced Old string `mapstructure:"old,omitempty" json:"old,omitempty"` // replacement string of Old New string `mapstructure:"new,omitempty" json:"new,omitempty"` // Prefix to be trimmed Prefix string `mapstructure:"prefix,omitempty" json:"prefix,omitempty"` // Suffix to be trimmed Suffix string `mapstructure:"suffix,omitempty" json:"suffix,omitempty"` // character to split on SplitOn string `mapstructure:"split-on,omitempty" json:"split-on,omitempty"` // character to join with JoinWith string `mapstructure:"join-with,omitempty" json:"join-with,omitempty"` // number of first items to ignore when joining IgnoreFirst int `mapstructure:"ignore-first,omitempty" json:"ignore-first,omitempty"` // number of last items to ignore when joining IgnoreLast int `mapstructure:"ignore-last,omitempty" json:"ignore-last,omitempty"` // replaceRegexp *regexp.Regexp } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &stringsp{ logger: log.New(io.Discard, "", 0), } }) } func (s *stringsp) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, s) if err != nil { return err } for _, opt := range opts { opt(s) } for i := range s.Transforms { for k := range s.Transforms[i] { s.Transforms[i][k].op = k switch k { case "replace": s.Transforms[i][k].replaceRegexp, err = regexp.Compile(s.Transforms[i][k].Old) if err != nil { return err } } } } // init tags regex s.tags = make([]*regexp.Regexp, 0, len(s.Tags)) for _, reg := range s.Tags { re, err := regexp.Compile(reg) if err != nil { return err } s.tags = append(s.tags, re) } // init tag names regex s.tagKeys = make([]*regexp.Regexp, 0, len(s.TagNames)) for _, reg := range s.TagNames { re, err := regexp.Compile(reg) if err != nil { return err } s.tagKeys = append(s.tagKeys, re) } // init values regex s.values = make([]*regexp.Regexp, 0, len(s.Values)) for _, reg := range s.Values { re, err := regexp.Compile(reg) if err != nil { return err } s.values = append(s.values, re) } // init value Keys regex s.valueKeys = make([]*regexp.Regexp, 0, len(s.ValueNames)) for _, reg := range s.ValueNames { re, err := regexp.Compile(reg) if err != nil { return err } s.valueKeys = append(s.valueKeys, re) } if s.logger.Writer() != io.Discard { b, err := json.Marshal(s) if err != nil { s.logger.Printf("initialized processor '%s': %+v", processorType, s) return nil } s.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (s *stringsp) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } for k, v := range e.Values { for _, re := range s.valueKeys { if re.MatchString(k) { s.logger.Printf("value name '%s' matched regex '%s'", k, re.String()) s.applyValueTransformations(e, k, v) } } for _, re := range s.values { if vs, ok := v.(string); ok { if re.MatchString(vs) { s.logger.Printf("value '%s' matched regex '%s'", vs, re.String()) s.applyValueTransformations(e, k, vs) } } } } for k, v := range e.Tags { for _, re := range s.tagKeys { if re.MatchString(k) { s.logger.Printf("tag name '%s' matched regex '%s'", k, re.String()) s.applyTagTransformations(e, k, v) } } for _, re := range s.tags { if re.MatchString(v) { s.logger.Printf("tag '%s' matched regex '%s'", k, re.String()) s.applyTagTransformations(e, k, v) } } } } return es } func (s *stringsp) WithLogger(l *log.Logger) { if s.Debug && l != nil { s.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if s.Debug { s.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (s *stringsp) applyValueTransformations(e *formatters.EventMsg, k string, v interface{}) { for _, trans := range s.Transforms { for _, t := range trans { if !t.Keep { delete(e.Values, k) } k, v = t.apply(k, v) e.Values[k] = v } } } func (s *stringsp) applyTagTransformations(e *formatters.EventMsg, k, v string) { for _, trans := range s.Transforms { for _, t := range trans { if !t.Keep { delete(e.Tags, k) } var vi interface{} k, vi = t.apply(k, v) if vs, ok := vi.(string); ok { e.Tags[k] = vs v = vs // change the original value in case it's used in the next transform continue } s.logger.Printf("failed to assert %v type as string", vi) } } } func (t *transform) apply(k string, v interface{}) (string, interface{}) { switch t.op { case "replace": return t.replace(k, v) case "trim-prefix": return t.trimPrefix(k, v) case "trim-suffix": return t.trimSuffix(k, v) case "title": return t.toTitle(k, v) case "to-lower": return t.toLower(k, v) case "to-upper": return t.toUpper(k, v) case "split": return t.split(k, v) case "path-base": return t.pathBase(k, v) } return k, v } func (t *transform) replace(k string, v interface{}) (string, interface{}) { switch t.ApplyOn { case nameField: k = t.replaceRegexp.ReplaceAllString(k, t.New) case valueField: if vs, ok := v.(string); ok { v = t.replaceRegexp.ReplaceAllString(vs, t.New) } } return k, v } func (t *transform) trimPrefix(k string, v interface{}) (string, interface{}) { switch t.ApplyOn { case nameField: k = strings.TrimPrefix(k, t.Prefix) case valueField: if vs, ok := v.(string); ok { v = strings.TrimPrefix(vs, t.Prefix) } } return k, v } func (t *transform) trimSuffix(k string, v interface{}) (string, interface{}) { switch t.ApplyOn { case nameField: k = strings.TrimSuffix(k, t.Suffix) case valueField: if vs, ok := v.(string); ok { v = strings.TrimSuffix(vs, t.Suffix) } } return k, v } func (t *transform) toTitle(k string, v interface{}) (string, interface{}) { switch t.ApplyOn { case nameField: k = cases.Title(language.English).String(k) case valueField: if vs, ok := v.(string); ok { v = cases.Title(language.English).String(vs) } } return k, v } func (t *transform) toLower(k string, v interface{}) (string, interface{}) { switch t.ApplyOn { case nameField: k = strings.ToLower(k) case valueField: if vs, ok := v.(string); ok { v = strings.ToLower(vs) } } return k, v } func (t *transform) toUpper(k string, v interface{}) (string, interface{}) { switch t.ApplyOn { case nameField: k = strings.ToUpper(k) case valueField: if vs, ok := v.(string); ok { v = strings.ToUpper(vs) } } return k, v } func (t *transform) split(k string, v interface{}) (string, interface{}) { switch t.ApplyOn { case nameField: items := strings.Split(k, t.SplitOn) numItems := len(items) if numItems <= t.IgnoreFirst || numItems <= t.IgnoreLast || t.IgnoreFirst >= numItems-t.IgnoreLast { return "", v } k = strings.Join(items[t.IgnoreFirst:numItems-t.IgnoreLast], t.JoinWith) case valueField: if vs, ok := v.(string); ok { items := strings.Split(vs, t.SplitOn) numItems := len(items) if numItems <= t.IgnoreFirst || numItems <= t.IgnoreLast || t.IgnoreFirst >= numItems-t.IgnoreLast { return k, "" } v = strings.Join(items[t.IgnoreFirst:numItems-t.IgnoreLast], t.JoinWith) } } return k, v } func (t *transform) pathBase(k string, v interface{}) (string, interface{}) { switch t.ApplyOn { case nameField: k = filepath.Base(k) case valueField: if vs, ok := v.(string); ok { v = filepath.Base(vs) } } return k, v } ================================================ FILE: pkg/formatters/event_strings/event_strings_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_strings import ( "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "replace": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"^name$"}, "tag-names": []string{"^tag$"}, "debug": true, "transforms": []map[string]*transform{ { "replace": &transform{ ApplyOn: "name", Old: "name", New: "new_name", }, }, { "replace": &transform{ ApplyOn: "name", Old: "tag", New: "new_tag", }, }, }, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{ "name": "foo", }}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{ "new_name": "foo", }}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "tag": "foo", }}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "new_tag": "foo", }}, }, }, }, }, "trim_prefix": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"^prefix_"}, "transforms": []map[string]*transform{ { "trim-prefix": &transform{ ApplyOn: "name", Prefix: "prefix_", }, }, }, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "prefix_name": "foo", }, Values: map[string]interface{}{ "prefix_name": "foo", }}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "prefix_name": "foo", }, Values: map[string]interface{}{ "name": "foo", }}, }, }, }, }, "trim-suffix": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"_suffix$"}, "transforms": []map[string]*transform{ { "trim-suffix": &transform{ ApplyOn: "name", Suffix: "_suffix", }, }, }, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "name_suffix": "foo", }, Values: map[string]interface{}{ "name_suffix": "foo", }}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "name_suffix": "foo", }, Values: map[string]interface{}{ "name": "foo", }}, }, }, }, }, "title": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"title"}, "transforms": []map[string]*transform{ { "title": &transform{ ApplyOn: "name", }, }, }, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "title": "foo", }, Values: map[string]interface{}{ "title": "foo", }}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "title": "foo", }, Values: map[string]interface{}{ "Title": "foo", }}, }, }, }, }, "to_upper": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"to_be_capitalized"}, "transforms": []map[string]*transform{ { "to-upper": &transform{ ApplyOn: "name", }, }, }, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "to_be_capitalized": "foo", }, Values: map[string]interface{}{ "to_be_capitalized": "foo", }}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "to_be_capitalized": "foo", }, Values: map[string]interface{}{ "TO_BE_CAPITALIZED": "foo", }}, }, }, }, }, "to_lower": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"TO_BE_LOWERED"}, "transforms": []map[string]*transform{ { "to-lower": &transform{ ApplyOn: "name", }, }, }, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "TO_BE_LOWERED": "foo", }, Values: map[string]interface{}{ "TO_BE_LOWERED": "foo", }}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "TO_BE_LOWERED": "foo", }, Values: map[string]interface{}{ "to_be_lowered": "foo", }}, }, }, }, }, "split": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"path/to/a/resource"}, "transforms": []map[string]*transform{ { "split": &transform{ ApplyOn: "name", SplitOn: "/", JoinWith: "_", IgnoreFirst: 2, }, }, }, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "path/to/a/resource": "foo", }, Values: map[string]interface{}{ "path/to/a/resource": "foo", }}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "path/to/a/resource": "foo", }, Values: map[string]interface{}{ "a_resource": "foo", }}, }, }, }, }, "path_base": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"path/to/a/resource"}, "transforms": []map[string]*transform{ { "path-base": &transform{ ApplyOn: "name", }, }, }, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "path/to/a/resource": "foo", }, Values: map[string]interface{}{ "path/to/a/resource": "foo", }}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "path/to/a/resource": "foo", }, Values: map[string]interface{}{ "resource": "foo", }}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "path/to/a/resource": "foo", }, Values: map[string]interface{}{ "path/to/a/resource": 0, }}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "path/to/a/resource": "foo", }, Values: map[string]interface{}{ "resource": 0, }}, }, }, }, }, "replace_regex": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"."}, "tag-names": []string{"."}, "debug": true, "transforms": []map[string]*transform{ { "replace": &transform{ ApplyOn: "name", Old: "-state$", New: "-state-code", }, }, { "replace": &transform{ ApplyOn: "name", Old: "-tag$", New: "-better-tag", }, }, }, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{ "interface-oper-state": "foo", }}, }, output: []*formatters.EventMsg{ { Values: map[string]interface{}{ "interface-oper-state-code": "foo", }}, }, }, { input: []*formatters.EventMsg{ { Tags: map[string]string{ "my-tag": "foo", }}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "my-better-tag": "foo", }}, }, }, }, }, } func TestEventStrings(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("initialized for test %s: %+v", name, p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at event strings, item %d, index %d", i, j) t.Logf("expected: %#v", item.output[j]) t.Logf(" got: %#v", outs[j]) t.Fail() } } }) } } } } ================================================ FILE: pkg/formatters/event_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package formatters import ( "fmt" "reflect" "testing" "github.com/google/go-cmp/cmp" "github.com/openconfig/gnmi/proto/gnmi" ) type item struct { ev *EventMsg m map[string]interface{} } var eventMsgtestSet = map[string][]item{ "nil": { { ev: nil, m: nil, }, { ev: new(EventMsg), m: make(map[string]interface{}), }, }, "filled": { { ev: &EventMsg{ Timestamp: 100, Values: map[string]interface{}{"value1": int64(1)}, Tags: map[string]string{"tag1": "1"}, }, m: map[string]interface{}{ "timestamp": int64(100), "values": map[string]interface{}{ "value1": int64(1), }, "tags": map[string]interface{}{ "tag1": "1", }, }, }, { ev: &EventMsg{ Name: "sub1", Timestamp: 100, Tags: map[string]string{ "tag1": "1", "tag2": "1", }, }, m: map[string]interface{}{ "name": "sub1", "timestamp": int64(100), "tags": map[string]interface{}{ "tag1": "1", "tag2": "1", }, }, }, { ev: &EventMsg{ Name: "sub1", Timestamp: 100, Values: map[string]interface{}{ "value1": int64(1), "value2": int64(1), }, Tags: map[string]string{ "tag1": "1", "tag2": "1", }, }, m: map[string]interface{}{ "name": "sub1", "timestamp": int64(100), "values": map[string]interface{}{ "value1": int64(1), "value2": int64(1), }, "tags": map[string]interface{}{ "tag1": "1", "tag2": "1", }, }, }, }, } func TestToMap(t *testing.T) { for name, items := range eventMsgtestSet { for i, item := range items { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) out := item.ev.ToMap() if !reflect.DeepEqual(out, item.m) { t.Logf("failed at %q item %d", name, i) t.Logf("expected: (%T)%+v", item.m, item.m) t.Logf(" got: (%T)%+v", out, out) t.Fail() } }) } } } func TestFromMap(t *testing.T) { for name, items := range eventMsgtestSet { for i, item := range items { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) out, err := EventFromMap(item.m) if err != nil { t.Logf("failed at %q: %v", name, err) t.Fail() } if !reflect.DeepEqual(out, item.ev) { t.Logf("failed at %q item %d", name, i) t.Logf("expected: (%T)%+v", item.m, item.m) t.Logf(" got: (%T)%+v", out, out) t.Fail() } }) } } } func TestTagsFromGNMIPath(t *testing.T) { type args struct { p *gnmi.Path } tests := []struct { name string args args want string want1 map[string]string }{ { name: "nil", args: args{p: nil}, want: "", want1: nil, }, { name: "path_no_keys", args: args{p: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", }, { Name: "statistics", }, }, }}, want: "/interface/statistics", want1: make(map[string]string), }, { name: "path_with_keys", args: args{p: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, { Name: "statistics", }, }, }}, want: "/interface/statistics", want1: map[string]string{ "interface_name": "ethernet-1/1", }, }, { name: "path_with_multiple_keys", args: args{p: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "elem1", Key: map[string]string{ "bar": "bar_val", "foo": "foo_val", }, }, { Name: "elem2", }, }, }}, want: "/elem1/elem2", want1: map[string]string{ "elem1_bar": "bar_val", "elem1_foo": "foo_val", }, }, { name: "path_with_multiple_keys_and_target", args: args{p: &gnmi.Path{ Target: "target1", Elem: []*gnmi.PathElem{ { Name: "elem1", Key: map[string]string{ "bar": "bar_val", "foo": "foo_val", }, }, { Name: "elem2", }, }, }}, want: "/elem1/elem2", want1: map[string]string{ "elem1_bar": "bar_val", "elem1_foo": "foo_val", "target": "target1", }, }, { name: "path_with_multiple_keys_target_and_origin", args: args{p: &gnmi.Path{ Origin: "origin1", Target: "target1", Elem: []*gnmi.PathElem{ { Name: "elem1", Key: map[string]string{ "bar": "bar_val", "foo": "foo_val", }, }, { Name: "elem2", }, }, }}, want: "origin1:/elem1/elem2", want1: map[string]string{ "elem1_bar": "bar_val", "elem1_foo": "foo_val", "target": "target1", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, got1 := tagsFromGNMIPath(tt.args.p) if got != tt.want { t.Errorf("TagsFromGNMIPath() got = %v, want %v", got, tt.want) } if !cmp.Equal(got1, tt.want1) { t.Errorf("TagsFromGNMIPath() got1 = %v, want %v", got1, tt.want1) } }) } } func Test_getValueFlat(t *testing.T) { type args struct { prefix string updValue *gnmi.TypedValue } tests := []struct { name string args args want map[string]interface{} wantErr bool }{ { name: "simple_json_value", args: args{ prefix: "/configure/router/interface", updValue: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte(`{ "admin-state": "enable", "ipv4": { "primary": { "address": "1.1.1.1", "prefix-length": 32 } } }`), }, }, }, want: map[string]interface{}{ "/configure/router/interface/admin-state": "enable", "/configure/router/interface/ipv4/primary/address": "1.1.1.1", "/configure/router/interface/ipv4/primary/prefix-length": float64(32), }, wantErr: false, }, { name: "json_value_with_list", args: args{ prefix: "/network-instance", updValue: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{ JsonVal: []byte(`{ "interface": [ "ethernet-1/1", "ethernet-1/2", "ethernet-1/3", "ethernet-1/4" ] }`), }, }, }, want: map[string]interface{}{ "/network-instance/interface.0": "ethernet-1/1", "/network-instance/interface.1": "ethernet-1/2", "/network-instance/interface.2": "ethernet-1/3", "/network-instance/interface.3": "ethernet-1/4", }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := getValueFlat(tt.args.prefix, tt.args.updValue) if (err != nil) != tt.wantErr { t.Errorf("getValueFlat() error = %v, wantErr %v", err, tt.wantErr) return } if !cmp.Equal(got, tt.want) { for k, v := range got { fmt.Printf("%s: %v: %T\n", k, v, v) } t.Errorf("got: %+v", got) t.Errorf("want: %+v", tt.want) t.Errorf("getValueFlat() = %v, want %v", got, tt.want) } }) } } func TestResponseToEventMsgs(t *testing.T) { type args struct { name string rsp *gnmi.SubscribeResponse meta map[string]string eps []EventProcessor } tests := []struct { name string args args want []*EventMsg wantErr bool }{ { name: "sync_response", args: args{ name: "sub1", rsp: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_SyncResponse{ SyncResponse: true, }, }, }, want: []*EventMsg{}, wantErr: false, }, { name: "single_update_ascii_value", args: args{ name: "sub1", rsp: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: 42, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, {Name: "oper-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: "up"}, }, }, }, }, }, }, }, want: []*EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{ "interface_name": "ethernet-1/1", }, Values: map[string]interface{}{ "/interface/oper-state": "up", }, }, }, wantErr: false, }, { name: "single_update_string_json_value", args: args{ name: "sub1", rsp: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: 42, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, {Name: "oper-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte("\"up\"")}, }, }, }, }, }, }, }, want: []*EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{ "interface_name": "ethernet-1/1", }, Values: map[string]interface{}{ "/interface/oper-state": "up", }, }, }, wantErr: false, }, { name: "single_update_object_json_value", args: args{ name: "sub1", rsp: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: 42, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, {Name: "statistics"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte(`{"in-octets":"10","out-octets":"11"}`)}, }, }, }, }, }, }, }, want: []*EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{ "interface_name": "ethernet-1/1", }, Values: map[string]interface{}{ "/interface/statistics/in-octets": "10", "/interface/statistics/out-octets": "11", }, }, }, wantErr: false, }, { name: "multiple_updates_single_ascii_values", args: args{ name: "sub1", rsp: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: 42, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, {Name: "admin-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: "enable"}, }, }, { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, {Name: "oper-state"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: "up"}, }, }, }, }, }, }, }, want: []*EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{ "interface_name": "ethernet-1/1", }, Values: map[string]interface{}{ "/interface/admin-state": "enable", }, }, { Name: "sub1", Timestamp: 42, Tags: map[string]string{ "interface_name": "ethernet-1/1", }, Values: map[string]interface{}{ "/interface/oper-state": "up", }, }, }, wantErr: false, }, { name: "with_single_delete", args: args{ name: "sub1", rsp: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: 42, Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, }, }, }, }, }, }, }, want: []*EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{ "interface_name": "ethernet-1/1", }, Deletes: []string{ "/interface", }, }, }, wantErr: false, }, { name: "with_2_deletes", args: args{ name: "sub1", rsp: &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: 42, Delete: []*gnmi.Path{ { Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/1", }, }, }, }, { Elem: []*gnmi.PathElem{ { Name: "interface", Key: map[string]string{ "name": "ethernet-1/2", }, }, }, }, }, }, }, }, }, want: []*EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{ "interface_name": "ethernet-1/1", }, Deletes: []string{ "/interface", }, }, { Name: "sub1", Timestamp: 42, Tags: map[string]string{ "interface_name": "ethernet-1/2", }, Deletes: []string{ "/interface", }, }, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := ResponseToEventMsgs(tt.args.name, tt.args.rsp, tt.args.meta, tt.args.eps...) if (err != nil) != tt.wantErr { t.Errorf("ResponseToEventMsgs() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("ResponseToEventMsgs() got = %v", got) t.Errorf("ResponseToEventMsgs() want= %v", tt.want) } }) } } ================================================ FILE: pkg/formatters/event_time_epoch/event_time_epoch.go ================================================ // © 2025 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_time_epoch import ( "encoding/json" "io" "log" "os" "regexp" "time" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-time-epoch" loggingPrefix = "[" + processorType + "] " ) // epoch converts a time string to epoch time type epoch struct { formatters.BaseProcessor Values []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Precision string `mapstructure:"precision,omitempty" json:"precision,omitempty"` Format string `mapstructure:"format,omitempty" json:"format,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` values []*regexp.Regexp logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &epoch{ logger: log.New(io.Discard, "", 0), } }) } func (d *epoch) Init(cfg any, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, d) if err != nil { return err } for _, opt := range opts { opt(d) } if d.Format == "" { d.Format = time.RFC3339 } // init values regex d.values = make([]*regexp.Regexp, 0, len(d.Values)) for _, reg := range d.Values { re, err := regexp.Compile(reg) if err != nil { return err } d.values = append(d.values, re) } if d.logger.Writer() != io.Discard { b, err := json.Marshal(d) if err != nil { d.logger.Printf("initialized processor '%s': %+v", processorType, d) return nil } d.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (d *epoch) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } for k, v := range e.Values { for _, re := range d.values { if re.MatchString(k) { d.logger.Printf("key '%s' matched regex '%s'", k, re.String()) switch v := v.(type) { case string: td, err := time.Parse(d.Format, v) if err != nil { d.logger.Printf("failed to convert '%v' to time: %v", v, err) continue } var ts int64 switch d.Precision { case "s", "sec", "second": ts = td.Unix() case "ms", "millisecond": ts = td.UnixMilli() case "us", "microsecond": ts = td.UnixMicro() case "ns", "nanosecond": ts = td.UnixNano() default: ts = td.UnixNano() } e.Values[k] = ts default: } break } } } } return es } func (d *epoch) WithLogger(l *log.Logger) { if d.Debug && l != nil { d.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if d.Debug { d.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } ================================================ FILE: pkg/formatters/event_time_epoch/event_time_epoch_test.go ================================================ // © 2025 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_time_epoch import ( "log" "os" "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) func Test_epoch_Apply(t *testing.T) { type fields map[string]any type args struct { es []*formatters.EventMsg } tests := []struct { name string fields fields args args want []*formatters.EventMsg }{ { name: "nil_input", fields: map[string]interface{}{ "value-names": []string{ ".*", }, "debug": true, }, args: args{}, want: nil, }, { name: "simple", fields: map[string]any{ "precision": "s", "value-names": []string{ ".*last-change", }, "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]any{ "interface/last-change": "2024-06-19T15:11:24.601Z", }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]any{ "interface/last-change": int64(1718809884), }, }, }, }, { name: "ms", fields: map[string]any{ "precision": "ms", "value-names": []string{ ".*last-change", }, "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]any{ "interface/last-change": "2024-06-19T15:11:24.601Z", }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]any{ "interface/last-change": int64(1718809884601), }, }, }, }, { name: "us", fields: map[string]any{ "precision": "us", "value-names": []string{ ".*last-change", }, "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]any{ "interface/last-change": "2024-06-19T15:11:24.601Z", }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]any{ "interface/last-change": int64(1718809884601000), }, }, }, }, { name: "ns", fields: map[string]any{ "precision": "ns", "value-names": []string{ ".*last-change", }, "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]any{ "interface/last-change": "2024-06-19T15:11:24.601Z", }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]any{ "interface/last-change": int64(1718809884601000000), }, }, }, }, { name: "no_match", fields: map[string]any{ "precision": "ns", "value-names": []string{ ".*no_match.*", }, "debug": true, }, args: args{ es: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]any{ "interface/last-change": "2024-06-19T15:11:24.601Z", }, }, }, }, want: []*formatters.EventMsg{ { Name: "sub1", Timestamp: 42, Tags: map[string]string{}, Values: map[string]any{ "interface/last-change": "2024-06-19T15:11:24.601Z", }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := &epoch{} err := c.Init(tt.fields, formatters.WithLogger(log.New(os.Stderr, "[event-epoch-test]", log.Flags()))) if err != nil { t.Errorf("failed to init processor in test %q: %v", tt.name, err) t.Fail() } if got := c.Apply(tt.args.es...); !reflect.DeepEqual(got, tt.want) { t.Errorf("epoch.Apply() = %v, want %v", got, tt.want) } }) } } ================================================ FILE: pkg/formatters/event_to_tag/event_to_tag.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_to_tag import ( "encoding/json" "fmt" "io" "log" "os" "regexp" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-to-tag" loggingPrefix = "[" + processorType + "] " ) // toTag moves ALL values matching any of the regex in .Values to the EventMsg.Tags map. // if .Keep is true, the matching values are not deleted from EventMsg.Tags type toTag struct { formatters.BaseProcessor Values []string `mapstructure:"values,omitempty" json:"values,omitempty"` ValueNames []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Keep bool `mapstructure:"keep,omitempty" json:"keep,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` valueNames []*regexp.Regexp values []*regexp.Regexp logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &toTag{ logger: log.New(io.Discard, "", 0), } }) } func (t *toTag) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, t) if err != nil { return err } for _, opt := range opts { opt(t) } t.valueNames = make([]*regexp.Regexp, 0, len(t.ValueNames)) for _, reg := range t.ValueNames { re, err := regexp.Compile(reg) if err != nil { return err } t.valueNames = append(t.valueNames, re) } t.values = make([]*regexp.Regexp, 0, len(t.Values)) for _, reg := range t.Values { re, err := regexp.Compile(reg) if err != nil { return err } t.values = append(t.values, re) } if t.logger.Writer() != io.Discard { b, err := json.Marshal(t) if err != nil { t.logger.Printf("initialized processor '%s': %+v", processorType, t) return nil } t.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } func (t *toTag) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } if e.Tags == nil { e.Tags = make(map[string]string) } for k, v := range e.Values { for _, re := range t.valueNames { if re.MatchString(k) { switch v := v.(type) { case string: e.Tags[k] = v default: e.Tags[k] = fmt.Sprint(v) } if !t.Keep { delete(e.Values, k) } } } for _, re := range t.values { if vs, ok := v.(string); ok { if re.MatchString(vs) { e.Tags[k] = vs if !t.Keep { delete(e.Values, k) } } } } } } return es } func (t *toTag) Apply2(es ...*formatters.EventMsg) []*formatters.EventMsg { for _, e := range es { if e == nil { continue } if e.Tags == nil { e.Tags = make(map[string]string) } for k, v := range e.Values { for _, re := range t.valueNames { if re.MatchString(k) { e.Tags[k] = fmt.Sprint(v) // always cast v results on extra allocations: Apply > Apply2 if !t.Keep { delete(e.Values, k) } } } for _, re := range t.values { if vs, ok := v.(string); ok { if re.MatchString(vs) { e.Tags[k] = vs if !t.Keep { delete(e.Values, k) } } } } } } return es } func (t *toTag) WithLogger(l *log.Logger) { if t.Debug && l != nil { t.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if t.Debug { t.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } ================================================ FILE: pkg/formatters/event_to_tag/event_to_tag_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_to_tag import ( "fmt" "reflect" "regexp" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "1_value_match": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{".*name$"}, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{}, Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": "dummy"}}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{"name": "dummy"}, Values: map[string]interface{}{}}, }, }, }, }, "1_value_match_with_keep": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{".*name$"}, "keep": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{}, Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{"name": "dummy"}}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{"name": "dummy"}, Values: map[string]interface{}{"name": "dummy"}}, }, }, }, }, "2_value_match": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{".*name$"}, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{}, Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{ "name": "dummy", "second_name": "dummy2"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "name": "dummy", "second_name": "dummy2"}, Values: map[string]interface{}{}}, }, }, }, }, "2_value_match_with_keep": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{".*name$"}, "keep": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{}, Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{ "name": "dummy", "second_name": "dummy2"}, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "name": "dummy", "second_name": "dummy2"}, Values: map[string]interface{}{ "name": "dummy", "second_name": "dummy2"}}, }, }, }, }, "match_integer_value": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{".*peer-as$"}, "keep": true, }, tests: []item{ { input: nil, output: nil, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{}}, }, output: []*formatters.EventMsg{ { Tags: map[string]string{}, Values: map[string]interface{}{}}, }, }, { input: []*formatters.EventMsg{ { Values: map[string]interface{}{ "name": "dummy", "peer-as": 65000, }, }, }, output: []*formatters.EventMsg{ { Tags: map[string]string{ "peer-as": "65000", }, Values: map[string]interface{}{ "name": "dummy", "peer-as": 65000, }, }, }, }, }, }, } func TestEventToTag(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Logf("failed at event to_tag %s, item %d, index %d", name, i, j) t.Logf("expected: %#v", item.output[j]) t.Logf(" got: %#v", outs[j]) t.Fail() } } }) } } else { t.Errorf("event processor %s not found", ts.processorType) } } } // Helper function to generate test messages func generateTestMessages(count int) []*formatters.EventMsg { messages := make([]*formatters.EventMsg, count) for i := 0; i < count; i++ { messages[i] = &formatters.EventMsg{ Name: fmt.Sprintf("event%d", i), Timestamp: int64(i), Values: map[string]interface{}{ fmt.Sprintf("key%d", i): fmt.Sprintf("value%d", i), "staticKey": "staticValue", fmt.Sprintf("tagw%d", i): fmt.Sprintf("value%d", i), }, } } return messages } // Benchmark test for the Apply function func BenchmarkApply(b *testing.B) { // Create a toTag instance with sample regex patterns toTagInstance := &toTag{ valueNames: []*regexp.Regexp{ regexp.MustCompile(`^key\d+$`), // Matches keys like "key1", "key2", etc. }, values: []*regexp.Regexp{ regexp.MustCompile(`^value\d+$`), // Matches values like "value1", "value2", etc. }, Keep: false, } // Generate a sample EventMsg array eventMessages := generateTestMessages(10000) // Benchmark the Apply function b.ResetTimer() for i := 0; i < b.N; i++ { toTagInstance.Apply(eventMessages...) } } func BenchmarkApply2(b *testing.B) { // Create a toTag instance with sample regex patterns toTagInstance := &toTag{ valueNames: []*regexp.Regexp{ regexp.MustCompile(`^key\d+$`), // Matches keys like "key1", "key2", etc. }, values: []*regexp.Regexp{ regexp.MustCompile(`^value\d+$`), // Matches values like "value1", "value2", etc. }, Keep: false, } // Generate a sample EventMsg array eventMessages := generateTestMessages(10000) // Benchmark the Apply function b.ResetTimer() for i := 0; i < b.N; i++ { toTagInstance.Apply2(eventMessages...) } } ================================================ FILE: pkg/formatters/event_trigger/event_trigger.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_trigger import ( "context" "encoding/json" "errors" "fmt" "io" "log" "os" "strings" "time" "gopkg.in/yaml.v2" "github.com/itchyny/gojq" "github.com/openconfig/gnmic/pkg/actions" _ "github.com/openconfig/gnmic/pkg/actions/all" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" gfile "github.com/openconfig/gnmic/pkg/file" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-trigger" loggingPrefix = "[" + processorType + "] " defaultCondition = "any([true])" ) // trigger triggers an action when certain conditions are met type trigger struct { formatters.BaseProcessor Condition string `mapstructure:"condition,omitempty"` MinOccurrences int `mapstructure:"min-occurrences,omitempty"` MaxOccurrences int `mapstructure:"max-occurrences,omitempty"` Window time.Duration `mapstructure:"window,omitempty"` Actions []string `mapstructure:"actions,omitempty"` Vars map[string]interface{} `mapstructure:"vars,omitempty"` VarsFile string `mapstructure:"vars-file,omitempty"` Debug bool `mapstructure:"debug,omitempty"` Async bool `mapstructure:"async,omitempty"` occurrencesTimes []time.Time lastTrigger time.Time code *gojq.Code actions []actions.Action vars map[string]interface{} targets map[string]*types.TargetConfig acts map[string]map[string]interface{} logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &trigger{ logger: log.New(io.Discard, "", 0), } }) } func (p *trigger) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } for _, opt := range opts { opt(p) } err = p.setDefaults() if err != nil { return err } p.Condition = strings.TrimSpace(p.Condition) q, err := gojq.Parse(p.Condition) if err != nil { return err } p.code, err = gojq.Compile(q) if err != nil { return err } for _, name := range p.Actions { if actCfg, ok := p.acts[name]; ok { err = p.initializeAction(actCfg) if err != nil { return err } continue } return fmt.Errorf("failed to initialize action %q: config not found", name) } err = p.readVars() if err != nil { return err } p.logger.Printf("%q initialized: %+v", processorType, p) return nil } func (p *trigger) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { now := time.Now() for _, e := range es { if e == nil { continue } res, err := formatters.CheckCondition(p.code, e) if err != nil { p.logger.Printf("failed evaluating condition %q: %v", p.Condition, err) continue } if p.Debug { p.logger.Printf("msg=%+v, condition %q result: (%T)%v", e, p.Condition, res, res) } if res { if p.evalOccurrencesWithinWindow(now) { if p.Async { go p.triggerActions(e) } else { p.triggerActions(e) } } } } return es } func (p *trigger) WithLogger(l *log.Logger) { if p.Debug && l != nil { p.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if p.Debug { p.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (p *trigger) WithTargets(tcs map[string]*types.TargetConfig) { p.targets = tcs } func (p *trigger) WithActions(acts map[string]map[string]interface{}) { if p.Debug { p.logger.Printf("with actions: %+v", acts) } p.acts = acts } func (p *trigger) initializeAction(cfg map[string]interface{}) error { if len(cfg) == 0 { return errors.New("missing action definition") } if actType, ok := cfg["type"]; ok { switch actType := actType.(type) { case string: if in, ok := actions.Actions[actType]; ok { act := in() err := act.Init(cfg, actions.WithLogger(p.logger), actions.WithTargets(p.targets)) if err != nil { return err } p.actions = append(p.actions, act) return nil } return fmt.Errorf("unknown action type %q", actType) default: return fmt.Errorf("unexpected action field type %T", actType) } } return errors.New("missing type field under action") } func (p *trigger) String() string { b, err := json.Marshal(p) if err != nil { return "" } return string(b) } func (p *trigger) setDefaults() error { if p.Condition == "" { p.Condition = defaultCondition } if p.MinOccurrences <= 0 { p.MinOccurrences = 1 } if p.MaxOccurrences <= 0 { p.MaxOccurrences = 1 } if p.MaxOccurrences < p.MinOccurrences { return errors.New("max-occurrences cannot be lower than min-occurrences") } if p.Window <= 0 { p.Window = time.Minute } return nil } func (p *trigger) readVars() error { if p.VarsFile == "" { p.vars = p.Vars return nil } b, err := gfile.ReadFile(context.TODO(), p.VarsFile) if err != nil { return err } v := make(map[string]interface{}) err = yaml.Unmarshal(b, &v) if err != nil { return err } p.vars = utils.MergeMaps(v, p.Vars) return nil } func (p *trigger) triggerActions(e *formatters.EventMsg) { actx := &actions.Context{Input: e, Env: make(map[string]interface{}), Vars: p.vars} for _, act := range p.actions { res, err := act.Run(context.TODO(), actx) if err != nil { p.logger.Printf("trigger action %q failed: %+v", act.NName(), err) return } actx.Env[act.NName()] = res p.logger.Printf("action %q result: %+v", act.NName(), res) } } func (p *trigger) evalOccurrencesWithinWindow(now time.Time) bool { if p.occurrencesTimes == nil { p.occurrencesTimes = make([]time.Time, 0) } occurrencesInWindow := make([]time.Time, 0, len(p.occurrencesTimes)) if p.Debug { p.logger.Printf("occurrencesTimes: %v", p.occurrencesTimes) } for _, t := range p.occurrencesTimes { if t.Add(p.Window).After(now) { if p.Debug { p.logger.Printf("time=%s + %s is after now=%s", t, p.Window, now) } occurrencesInWindow = append(occurrencesInWindow, t) } } p.occurrencesTimes = append(occurrencesInWindow, now) numOccurrences := len(p.occurrencesTimes) if numOccurrences > p.MaxOccurrences { p.occurrencesTimes = p.occurrencesTimes[numOccurrences-p.MaxOccurrences-1:] numOccurrences = len(p.occurrencesTimes) } if p.Debug { p.logger.Printf("numOccurrences: %d", numOccurrences) } if numOccurrences >= p.MinOccurrences && numOccurrences <= p.MaxOccurrences { p.lastTrigger = now return true } // check last trigger if numOccurrences > p.MinOccurrences && p.lastTrigger.Add(p.Window).Before(now) { p.lastTrigger = now return true } return false } func (p *trigger) WithProcessors(procs map[string]map[string]any) {} ================================================ FILE: pkg/formatters/event_trigger/event_trigger_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_trigger import ( "log" "os" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var actionsCfg = map[string]map[string]interface{}{ "dummy1": { "name": "dummy1", "type": "http", }, "dummy2": { "name": "dummy2", "type": "http", "url": "http://remote-alerting-system:9090/", }, } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "init": { processorType: processorType, processor: map[string]interface{}{ "debug": true, "actions": []string{ "dummy1", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Tags: map[string]string{ "tag1": "1", }, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Tags: map[string]string{ "tag1": "1", }, }, }, }, }, }, "with_condition": { processorType: processorType, processor: map[string]interface{}{ "condition": `.values["counter1"] > 90`, "debug": true, "actions": []string{ "dummy2", }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Name: "sub1", Tags: map[string]string{ "tag1": "1", }, Values: map[string]interface{}{ "counter1": 91, }, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Tags: map[string]string{ "tag1": "1", }, Values: map[string]interface{}{ "counter1": 91, }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Tags: map[string]string{ "tag1": "1", }, Values: map[string]interface{}{ "counter1": 89, }, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Tags: map[string]string{ "tag1": "1", }, Values: map[string]interface{}{ "counter1": 89, }, }, }, }, { input: []*formatters.EventMsg{ { Name: "sub1", Tags: map[string]string{ "tag1": "1", }, Values: map[string]interface{}{ "counter2": 91, }, }, }, output: []*formatters.EventMsg{ { Name: "sub1", Tags: map[string]string{ "tag1": "1", }, Values: map[string]interface{}{ "counter2": 91, }, }, }, }, }, }, } var triggerOccWindowTestSet = map[string]struct { t *trigger now time.Time out bool }{ "defaults_0_occurrences": { t: &trigger{ logger: log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags), Debug: true, MinOccurrences: 1, MaxOccurrences: 1, Window: time.Minute, occurrencesTimes: []time.Time{}, }, out: true, now: time.Now(), }, "defaults_with_1_occurrence_in_window": { t: &trigger{ logger: log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags), Debug: true, MinOccurrences: 1, MaxOccurrences: 1, Window: time.Minute, occurrencesTimes: []time.Time{ time.Now().Add(-time.Second), }, lastTrigger: time.Now().Add(-time.Second), }, out: false, now: time.Now(), }, "defaults_with_1_occurrence_out_of_window": { t: &trigger{ logger: log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags), Debug: true, MinOccurrences: 1, MaxOccurrences: 1, Window: time.Minute, occurrencesTimes: []time.Time{ time.Now().Add(-time.Hour), }, }, out: true, now: time.Now(), }, "2max_1min_without_occurrences": { t: &trigger{ logger: log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags), Debug: true, MinOccurrences: 1, MaxOccurrences: 2, Window: time.Minute, occurrencesTimes: []time.Time{}, }, out: true, now: time.Now(), }, "2max_1min_with_1occurrence_in_window": { t: &trigger{ logger: log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags), Debug: true, MinOccurrences: 1, MaxOccurrences: 2, Window: time.Minute, occurrencesTimes: []time.Time{ time.Now().Add(-30 * time.Second), }, }, out: true, now: time.Now(), }, "2max_1min_with_2occurrences_in_window": { t: &trigger{ logger: log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags), Debug: true, MinOccurrences: 1, MaxOccurrences: 2, Window: time.Minute, occurrencesTimes: []time.Time{ time.Now().Add(-10 * time.Second), time.Now().Add(-30 * time.Second), }, lastTrigger: time.Now().Add(-10 * time.Second), }, out: false, now: time.Now(), }, "2max_2min_without_occurrences": { t: &trigger{ logger: log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags), Debug: true, MinOccurrences: 2, MaxOccurrences: 2, Window: time.Minute, occurrencesTimes: []time.Time{}, }, out: false, now: time.Now(), }, "2max_2min_with_1occurrence_in_window": { t: &trigger{ logger: log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags), Debug: true, MinOccurrences: 2, MaxOccurrences: 2, Window: time.Minute, occurrencesTimes: []time.Time{ time.Now().Add(-30 * time.Second), }, }, out: true, now: time.Now(), }, "2max_2min_with_2occurrences_in_window": { t: &trigger{ logger: log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags), Debug: true, MinOccurrences: 2, MaxOccurrences: 2, Window: time.Minute, occurrencesTimes: []time.Time{ time.Now().Add(-10 * time.Second), time.Now().Add(-30 * time.Second), }, lastTrigger: time.Now().Add(-10 * time.Second), }, out: false, now: time.Now(), }, "2max_2min_with_2occurrences_in_window_lastTrigger_out_of_window": { t: &trigger{ logger: log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags), Debug: true, MinOccurrences: 2, MaxOccurrences: 2, Window: time.Minute, occurrencesTimes: []time.Time{ time.Now().Add(-10 * time.Second), time.Now().Add(-30 * time.Second), }, lastTrigger: time.Now().Add(-61 * time.Second), }, out: true, now: time.Now(), }, } func TestEventTrigger(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor, formatters.WithLogger(log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags)), formatters.WithActions(actionsCfg), ) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) if len(outs) != len(item.output) { t.Errorf("failed at %s, result has a different length than the expected result", name) t.Fail() } for j := range outs { if !cmp.Equal(outs[j], item.output[j]) { t.Errorf("failed at %s item %d, index %d, expected %+v, got: %+v", name, i, j, item.output[j], outs[j]) t.Fail() } } }) } } else { t.Errorf("event processor %s not found", ts.processorType) } } } func TestOccurrenceTrigger(t *testing.T) { for name, ts := range triggerOccWindowTestSet { t.Run(name, func(t *testing.T) { ok := ts.t.evalOccurrencesWithinWindow(ts.now) t.Logf("%q result: %v", name, ok) if ok != ts.out { t.Errorf("failed at %s , expected %+v, got: %+v", name, ts.out, ok) } }) } } ================================================ FILE: pkg/formatters/event_value_tag/event_value_tag.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_value_tag import ( "encoding/json" "fmt" "io" "log" "os" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-value-tag" loggingPrefix = "[" + processorType + "] " ) type valueTag struct { formatters.BaseProcessor TagName string `mapstructure:"tag-name,omitempty" json:"tag-name,omitempty"` ValueName string `mapstructure:"value-name,omitempty" json:"value-name,omitempty"` Consume bool `mapstructure:"consume,omitempty" json:"consume,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &valueTag{logger: log.New(io.Discard, "", 0)} }) } func (vt *valueTag) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, vt) if err != nil { return err } if vt.TagName == "" { vt.TagName = vt.ValueName } for _, opt := range opts { opt(vt) } if vt.logger.Writer() != io.Discard { b, err := json.Marshal(vt) if err != nil { vt.logger.Printf("initialized processor '%s': %+v", processorType, vt) return nil } vt.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } type tagVal struct { tags map[string]string value interface{} } func (vt *valueTag) Apply(evs ...*formatters.EventMsg) []*formatters.EventMsg { vts := vt.buildApplyRules(evs) for _, tv := range vts { for _, ev := range evs { match := compareTags(tv.tags, ev.Tags) if match { switch v := tv.value.(type) { case string: ev.Tags[vt.TagName] = v default: ev.Tags[vt.TagName] = fmt.Sprint(tv.value) } } } } return evs } func (vt *valueTag) WithLogger(l *log.Logger) { if vt.Debug && l != nil { vt.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if vt.Debug { vt.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } // returns true if all keys match, false otherwise. func compareTags(a map[string]string, b map[string]string) bool { if len(a) > len(b) { return false } for k, v := range a { if vv, ok := b[k]; !ok || v != vv { return false } } return true } func (vt *valueTag) WithProcessors(procs map[string]map[string]any) {} func (vt *valueTag) buildApplyRules(evs []*formatters.EventMsg) []*tagVal { toApply := make([]*tagVal, 0) for _, ev := range evs { if v, ok := ev.Values[vt.ValueName]; ok { toApply = append(toApply, &tagVal{ tags: copyTags(ev.Tags), value: v, }) if vt.Consume { delete(ev.Values, vt.ValueName) } } } return toApply } func copyTags(src map[string]string) map[string]string { dest := make(map[string]string, len(src)) for k, v := range src { dest[k] = v } return dest } ================================================ FILE: pkg/formatters/event_value_tag/event_value_tag_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_value_tag import ( "fmt" "log" "reflect" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "no-options": { processorType: processorType, processor: map[string]interface{}{ "value-name": "foo", "debug": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": "new_value"}, }, { Timestamp: 3, Tags: map[string]string{"other_tag": "value"}, Values: map[string]interface{}{"other_val": "val"}, }, { Timestamp: 4, Tags: map[string]string{"foo": "other_value"}, Values: map[string]interface{}{"other_val": "val"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value", "foo": "new_value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value", "foo": "new_value"}, Values: map[string]interface{}{"foo": "new_value"}, }, { Timestamp: 3, Tags: map[string]string{"other_tag": "value"}, Values: map[string]interface{}{"other_val": "val"}, }, { Timestamp: 4, Tags: map[string]string{"foo": "other_value"}, Values: map[string]interface{}{"other_val": "val"}, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"counter1": "1"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value", "foo": "value"}, Values: map[string]interface{}{"counter1": "1"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value", "foo": "value"}, Values: map[string]interface{}{"foo": "value"}, }, }, }, }, }, "rename-tag": { processorType: processorType, processor: map[string]interface{}{ "value-name": "foo", "tag-name": "bar", }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": "new_value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value", "bar": "new_value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value", "bar": "new_value"}, Values: map[string]interface{}{"foo": "new_value"}, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"counter1": "1"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value", "bar": "value"}, Values: map[string]interface{}{"counter1": "1"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value", "bar": "value"}, Values: map[string]interface{}{"foo": "value"}, }, }, }, }, }, "consume-value": { processorType: processorType, processor: map[string]interface{}{ "value-name": "foo", "consume": true, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": "new_value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value", "foo": "new_value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value", "foo": "new_value"}, Values: make(map[string]interface{}, 0), }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, }, }, }, "integer_val": { processorType: processorType, processor: map[string]interface{}{ "value-name": "foo", }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": 42}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value", "foo": "42"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value", "foo": "42"}, Values: map[string]interface{}{"foo": 42}, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value1"}, Values: map[string]interface{}{"foo": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value1", "foo": "value"}, Values: map[string]interface{}{"foo": "value"}, }, }, }, }, }, } func TestEventValueTag(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") p := pi() err := p.Init(ts.processor, formatters.WithLogger(log.Default())) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Errorf("failed at %s item %d, index %d, expected %+v", name, i, j, item.output[j]) t.Errorf("failed at %s item %d, index %d, got: %+v", name, i, j, outs[j]) } } }) } } else { t.Errorf("event processor %s not found", ts.processorType) } } } func generateEventMsgs(numEvents, numValues int, targetKey, targetValue string) []*formatters.EventMsg { evs := make([]*formatters.EventMsg, numEvents) for i := 0; i < numEvents; i++ { values := make(map[string]any) for j := 0; j < numValues; j++ { values[fmt.Sprintf("key%d", j)] = fmt.Sprintf("value%d", j) } values[targetKey] = targetValue evs[i] = &formatters.EventMsg{ Tags: map[string]string{"tag": "test"}, Values: values, } } return evs } func BenchmarkBuildApplyRules(b *testing.B) { evs := generateEventMsgs(100_000, 10, "targetKey", "targetValue") vt := &valueTag{ValueName: "targetKey", Consume: true} b.ResetTimer() for i := 0; i < b.N; i++ { vt.buildApplyRules(evs) } } func BenchmarkBuildApplyRules2(b *testing.B) { evs := generateEventMsgs(100_000, 10, "targetKey", "targetValue") vt := &valueTag{ValueName: "targetKey", Consume: true} b.ResetTimer() for i := 0; i < b.N; i++ { vt.buildApplyRules2(evs) } } // as ref func (vt *valueTag) buildApplyRules2(evs []*formatters.EventMsg) []*tagVal { toApply := make([]*tagVal, 0) for _, ev := range evs { for k, v := range ev.Values { if vt.ValueName == k { toApply = append(toApply, &tagVal{ // copyTags(ev.Tags), ev.Tags, v, }) if vt.Consume { delete(ev.Values, vt.ValueName) } } } } return toApply } ================================================ FILE: pkg/formatters/event_value_tag_v2/event_value_tag_v2.go ================================================ // © 2025 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_value_tag_v2 import ( "encoding/json" "fmt" "hash/fnv" "io" "log" "os" "slices" "sync" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-value-tag-v2" loggingPrefix = "[" + processorType + "] " ) var ( eqByte = []byte("=") semiC = []byte(";") pipeByte = []byte("|") ) type valueTag struct { formatters.BaseProcessor Rules []*rule `mapstructure:"rules,omitempty" json:"rules,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` logger *log.Logger m *sync.RWMutex applyRules []map[uint64]*applyRule } type rule struct { TagName string `mapstructure:"tag-name,omitempty" json:"tag-name,omitempty"` ValueName string `mapstructure:"value-name,omitempty" json:"value-name,omitempty"` Consume bool `mapstructure:"consume,omitempty" json:"consume,omitempty"` } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &valueTag{m: new(sync.RWMutex), logger: log.New(io.Discard, "", 0)} }) } func (vt *valueTag) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, vt) if err != nil { return err } for _, opt := range opts { opt(vt) } for _, r := range vt.Rules { if r.TagName == "" { r.TagName = r.ValueName } } vt.applyRules = make([]map[uint64]*applyRule, len(vt.Rules)) for i := range vt.applyRules { vt.applyRules[i] = make(map[uint64]*applyRule, 0) } if vt.logger.Writer() != io.Discard { b, err := json.Marshal(vt) if err != nil { vt.logger.Printf("initialized processor '%s': %+v", processorType, vt) return nil } vt.logger.Printf("initialized processor '%s': %s", processorType, string(b)) } return nil } type applyRule struct { // Set of tags that must be present in a message // in order to add the value as tag. tags map[string]string // The value to be added as tag. // The tag name is taken from the main proc struct. value any } func (vt *valueTag) Apply(evs ...*formatters.EventMsg) []*formatters.EventMsg { vt.m.Lock() defer vt.m.Unlock() for _, ev := range evs { for i, r := range vt.Rules { if v, ok := ev.Values[r.ValueName]; ok { // calculate apply rule Key k := vt.applyRuleKey(ev.Tags, r) vt.applyRules[i][k] = &applyRule{ tags: copyTags(ev.Tags), // copy map value: v, } if r.Consume { delete(ev.Values, r.ValueName) } } for _, ar := range vt.applyRules[i] { if includedIn(ar.tags, ev.Tags) { switch v := ar.value.(type) { case string: ev.Tags[r.TagName] = v default: ev.Tags[r.TagName] = fmt.Sprint(ar.value) } } } } } return evs } func (vt *valueTag) WithLogger(l *log.Logger) { if vt.Debug && l != nil { vt.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if vt.Debug { vt.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } // comparison logic for maps // i.e: a ⊆ b func includedIn(a, b map[string]string) bool { if len(a) > len(b) { return false } for k, v := range a { if bv, ok := b[k]; !ok || v != bv { return false } } return true } // the apply rule key is a hash of the valueName and the event msg tags func (vt *valueTag) applyRuleKey(m map[string]string, r *rule) uint64 { keys := make([]string, 0, len(m)) for k := range m { keys = append(keys, k) } slices.Sort(keys) h := fnv.New64a() h.Write([]byte(r.ValueName)) h.Write(pipeByte) for _, k := range keys { h.Write([]byte(k)) h.Write(eqByte) h.Write([]byte(m[k])) h.Write(semiC) } return h.Sum64() } func copyTags(src map[string]string) map[string]string { dest := make(map[string]string, len(src)) for k, v := range src { dest[k] = v } return dest } ================================================ FILE: pkg/formatters/event_value_tag_v2/event_value_tag_v2_test.go ================================================ // © 2025 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_value_tag_v2 import ( "log" "os" "reflect" "sync" "testing" "github.com/stretchr/testify/assert" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input []*formatters.EventMsg output []*formatters.EventMsg } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "no-options": { processorType: processorType, processor: map[string]interface{}{ "rules": []map[string]any{ {"value-name": "foo"}, }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { // `foo`` value becomes a tag input: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": "new_value"}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value", "foo": "new_value"}, Values: map[string]interface{}{"foo": "new_value"}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value", "foo": "new_value"}, }, }, }, { // no change input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, }, { // foo value becomes a tag input: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": "value"}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"counter1": "1"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value", "foo": "value"}, Values: map[string]interface{}{"foo": "value"}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value", "foo": "value"}, Values: map[string]interface{}{"counter1": "1"}, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": 42}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"counter1": "1"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value", "foo": "42"}, Values: map[string]interface{}{"foo": 42}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value", "foo": "42"}, Values: map[string]interface{}{"counter1": "1"}, }, }, }, }, }, "rename-tag": { processorType: processorType, processor: map[string]interface{}{ "rules": []map[string]any{ { "value-name": "foo", "tag-name": "bar", }, }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": "new_value"}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value", "bar": "new_value"}, Values: map[string]interface{}{"foo": "new_value"}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value", "bar": "new_value"}, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": "value"}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"counter1": "1"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value", "bar": "value"}, Values: map[string]interface{}{"foo": "value"}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value", "bar": "value"}, Values: map[string]interface{}{"counter1": "1"}, }, }, }, }, }, "consume-value": { processorType: processorType, processor: map[string]interface{}{ "rules": []map[string]any{ { "value-name": "foo", "consume": true, }, }, }, tests: []item{ { input: nil, output: nil, }, { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, { input: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": "new_value"}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value", "foo": "new_value"}, Values: make(map[string]interface{}, 0), }, { Timestamp: 1, Tags: map[string]string{"tag": "value", "foo": "new_value"}, }, }, }, { input: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, }, }, }, }, }, "multiple-rules": { processorType: processorType, processor: map[string]interface{}{ "rules": []map[string]any{ { "value-name": "foo", "consume": true, }, { "value-name": "bar", // "consume": true, }, }, }, tests: []item{ // 0 { input: nil, output: nil, }, // 1 { input: make([]*formatters.EventMsg, 0), output: make([]*formatters.EventMsg, 0), }, // 2 { input: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"foo": "new_value"}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value", "foo": "new_value"}, Values: make(map[string]interface{}, 0), }, { Timestamp: 1, Tags: map[string]string{"tag": "value", "foo": "new_value"}, }, }, }, // 3 { input: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value"}, Values: map[string]interface{}{"bar": "value"}, // value to be copied to tags }, { Timestamp: 1, Tags: map[string]string{"tag": "value"}, }, { // this message should remain unchanged Timestamp: 3, Tags: map[string]string{"tag1": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag": "value", "bar": "value"}, Values: map[string]interface{}{"bar": "value"}, }, { Timestamp: 1, Tags: map[string]string{"tag": "value", "bar": "value"}, }, { Timestamp: 3, Tags: map[string]string{"tag1": "value"}, }, }, }, // 4 { input: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag1": "value"}, Values: map[string]interface{}{"foo": "value"}, // value to be copied to tags }, { Timestamp: 4, Tags: map[string]string{"tag2": "value"}, Values: map[string]interface{}{"bar": "value"}, // value to be copied to tags }, { Timestamp: 1, Tags: map[string]string{"tag1": "value"}, }, { Timestamp: 3, Tags: map[string]string{"tag2": "value"}, }, { // this message should remain unchanged Timestamp: 5, Tags: map[string]string{"other_tag": "value"}, }, { // this message should remain unchanged Timestamp: 6, // Tags: map[string]string{"other_tag": "value"}, }, }, output: []*formatters.EventMsg{ { Timestamp: 2, Tags: map[string]string{"tag1": "value", "foo": "value"}, Values: map[string]interface{}{}, }, { Timestamp: 4, Tags: map[string]string{"tag2": "value", "bar": "value"}, Values: map[string]interface{}{"bar": "value"}, // value to be copied to tags }, { Timestamp: 1, Tags: map[string]string{"tag1": "value", "foo": "value"}, }, { Timestamp: 3, Tags: map[string]string{"tag2": "value", "bar": "value"}, }, { Timestamp: 5, Tags: map[string]string{"other_tag": "value"}, }, { // this message should remain unchanged Timestamp: 6, // Tags: map[string]string{"other_tag": "value"}, }, }, }, }, }, } func TestEventValueTag(t *testing.T) { for name, ts := range testset { if pi, ok := formatters.EventProcessors[ts.processorType]; ok { t.Log("found processor") for i, item := range ts.tests { // a processor per test item p := pi() err := p.Init(ts.processor, formatters.WithLogger(log.New(os.Stderr, "test", log.Flags()))) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("processor: %+v", p) t.Run(name, func(t *testing.T) { t.Logf("running test item %d", i) // _ = p.Apply(item.input...) outs := p.Apply(item.input...) for j := range outs { if !reflect.DeepEqual(outs[j], item.output[j]) { t.Errorf("failed at %s item %d, index %d, expected %+v", name, i, j, item.output[j]) t.Errorf("failed at %s item %d, index %d, got: %+v", name, i, j, outs[j]) } } }) } } else { t.Errorf("event processor %s not found", ts.processorType) } } } func TestValueTagApplySubsequentRuns(t *testing.T) { processor := &valueTag{ Rules: []*rule{ { TagName: "moved-tag", ValueName: "important-value", Consume: true, }, }, Debug: true, logger: log.Default(), m: new(sync.RWMutex), applyRules: []map[uint64]*applyRule{ make(map[uint64]*applyRule), }, } // first set events1 := []*formatters.EventMsg{ { Tags: map[string]string{"tag1": "value1"}, Values: map[string]interface{}{ "important-value": "value-to-move", }, }, { Tags: map[string]string{"tag2": "value2"}, Values: map[string]interface{}{ "other-value": "irrelevant", }, }, } // first apply processed1 := processor.Apply(events1...) // assert assert.Equal(t, "value-to-move", processed1[0].Tags["moved-tag"]) assert.NotContains(t, processed1[0].Values, "important-value") assert.NotContains(t, processed1[1].Tags, "moved-tag") // second set events2 := []*formatters.EventMsg{ { Tags: map[string]string{ "tag1": "value1", }, Values: map[string]interface{}{ "new-value": "some-new-data", }, }, { Tags: map[string]string{ "tag1": "value1", }, Values: map[string]interface{}{ "counter1": 42, }, }, } // second apply processed2 := processor.Apply(events2...) // assert assert.Equal(t, "value-to-move", processed2[0].Tags["moved-tag"]) assert.Contains(t, processed2[0].Tags, "tag1") assert.Contains(t, processed2[0].Values, "new-value") assert.Contains(t, processed2[1].Tags, "tag1") assert.Contains(t, processed2[1].Values, "counter1") } ================================================ FILE: pkg/formatters/event_write/event_write.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_write import ( "encoding/json" "io" "log" "os" "regexp" "strings" "github.com/itchyny/gojq" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" ) const ( processorType = "event-write" loggingPrefix = "[" + processorType + "] " ) type write struct { formatters.BaseProcessor Condition string `mapstructure:"condition,omitempty"` Tags []string `mapstructure:"tags,omitempty" json:"tags,omitempty"` Values []string `mapstructure:"values,omitempty" json:"values,omitempty"` TagNames []string `mapstructure:"tag-names,omitempty" json:"tag-names,omitempty"` ValueNames []string `mapstructure:"value-names,omitempty" json:"value-names,omitempty"` Dst string `mapstructure:"dst,omitempty" json:"dst,omitempty"` Separator string `mapstructure:"separator,omitempty" json:"separator,omitempty"` Indent string `mapstructure:"indent,omitempty" json:"indent,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` tags []*regexp.Regexp values []*regexp.Regexp tagNames []*regexp.Regexp valueNames []*regexp.Regexp dst io.Writer sep []byte code *gojq.Code logger *log.Logger } func init() { formatters.Register(processorType, func() formatters.EventProcessor { return &write{ logger: log.New(io.Discard, "", 0), } }) } func (p *write) Init(cfg interface{}, opts ...formatters.Option) error { err := formatters.DecodeConfig(cfg, p) if err != nil { return err } for _, opt := range opts { opt(p) } p.Condition = strings.TrimSpace(p.Condition) q, err := gojq.Parse(p.Condition) if err != nil { return err } p.code, err = gojq.Compile(q) if err != nil { return err } if p.Separator == "" { p.sep = []byte("\n") } else { p.sep = []byte(p.Separator) } p.tags = make([]*regexp.Regexp, 0, len(p.Tags)) for _, reg := range p.Tags { re, err := regexp.Compile(reg) if err != nil { return err } p.tags = append(p.tags, re) } // p.values = make([]*regexp.Regexp, 0, len(p.values)) for _, reg := range p.Values { re, err := regexp.Compile(reg) if err != nil { return err } p.values = append(p.values, re) } // p.tagNames = make([]*regexp.Regexp, 0, len(p.TagNames)) for _, reg := range p.TagNames { re, err := regexp.Compile(reg) if err != nil { return err } p.tagNames = append(p.tagNames, re) } // p.valueNames = make([]*regexp.Regexp, 0, len(p.ValueNames)) for _, reg := range p.ValueNames { re, err := regexp.Compile(reg) if err != nil { return err } p.valueNames = append(p.valueNames, re) } switch p.Dst { case "", "stdout": p.dst = os.Stdout case "stderr": p.dst = os.Stderr default: p.dst, err = os.OpenFile(p.Dst, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { return err } } b, err := json.Marshal(p) if err != nil { p.logger.Printf("initialized processor '%s': %+v", processorType, p) return nil } p.logger.Printf("initialized processor '%s': %s", processorType, string(b)) return nil } func (p *write) Apply(es ...*formatters.EventMsg) []*formatters.EventMsg { OUTER: for _, e := range es { if e == nil { p.dst.Write([]byte("")) continue } ok, err := formatters.CheckCondition(p.code, e) if err != nil { p.logger.Printf("condition check failed: %v", err) } if ok { err := p.write(e) if err != nil { p.logger.Printf("failed to write to destination: %v", err) continue OUTER } } for k, v := range e.Values { for _, re := range p.values { if vs, ok := v.(string); ok { if re.MatchString(vs) { err := p.write(e) if err != nil { p.logger.Printf("failed to write to destination: %v", err) continue OUTER } continue OUTER } } } for _, re := range p.valueNames { if re.MatchString(k) { err := p.write(e) if err != nil { p.logger.Printf("failed to write to destination: %v", err) continue OUTER } continue OUTER } } } for k, v := range e.Tags { for _, re := range p.tagNames { if re.MatchString(k) { err := p.write(e) if err != nil { p.logger.Printf("failed to write to destination: %v", err) continue OUTER } continue OUTER } } for _, re := range p.tags { if re.MatchString(v) { err := p.write(e) if err != nil { p.logger.Printf("failed to write to destination: %v", err) continue OUTER } continue OUTER } } } } return es } func (p *write) WithLogger(l *log.Logger) { if p.Debug && l != nil { p.logger = log.New(l.Writer(), loggingPrefix, l.Flags()) } else if p.Debug { p.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } } func (p *write) write(e *formatters.EventMsg) error { var b []byte var err error if len(p.Indent) > 0 { b, err = json.MarshalIndent(e, "", p.Indent) if err != nil { return err } } else { b, err = json.Marshal(e) if err != nil { return err } } p.dst.Write(append(b, p.sep...)) return nil } ================================================ FILE: pkg/formatters/event_write/event_write_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package event_write import ( "bytes" "io" "log" "testing" "github.com/openconfig/gnmic/pkg/formatters" ) type item struct { input *formatters.EventMsg output string } var testset = map[string]struct { processorType string processor map[string]interface{} tests []item }{ "write_condition": { processorType: processorType, processor: map[string]interface{}{ "condition": `.values.number == "42"`, "separator": "sep", }, tests: []item{ { input: nil, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{}}, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{}}, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{"number": "42"}}, output: `{"values":{"number":"42"}}sep`, }, { input: &formatters.EventMsg{ Tags: map[string]string{"name": "foo"}, Values: map[string]interface{}{"number": "42"}}, output: `{"tags":{"name":"foo"},"values":{"number":"42"}}sep`, }, }, }, "write_values_all": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"."}, "separator": "sep", }, tests: []item{ { input: nil, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{}}, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{}}, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{"number": "42"}}, output: `{"values":{"number":"42"}}sep`, }, { input: &formatters.EventMsg{ Tags: map[string]string{"name": "foo"}, Values: map[string]interface{}{"number": "42"}}, output: `{"tags":{"name":"foo"},"values":{"number":"42"}}sep`, }, }, }, "write_values_some": { processorType: processorType, processor: map[string]interface{}{ "value-names": []string{"^number"}, "separator": "sep", }, tests: []item{ { input: nil, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{}}, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{}}, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{"number": "42"}}, output: `{"values":{"number":"42"}}sep`, }, { input: &formatters.EventMsg{ Tags: map[string]string{"name": "foo"}, Values: map[string]interface{}{"not_number": "42"}}, output: ``, }, }, }, "write_tags_all": { processorType: processorType, processor: map[string]interface{}{ "tag-names": []string{"."}, "separator": "sep", }, tests: []item{ { input: nil, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{}}, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{}}, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{"number": "42"}}, output: ``, }, { input: &formatters.EventMsg{ Tags: map[string]string{"name": "foo"}, Values: map[string]interface{}{"number": "42"}}, output: `{"tags":{"name":"foo"},"values":{"number":"42"}}sep`, }, }, }, "write_tags_some": { processorType: processorType, processor: map[string]interface{}{ "tag-names": []string{"^name"}, "separator": "sep", }, tests: []item{ { input: nil, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{}}, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{}}, output: "", }, { input: &formatters.EventMsg{ Tags: map[string]string{}, Values: map[string]interface{}{"number": "42"}}, output: ``, }, { input: &formatters.EventMsg{ Tags: map[string]string{"name": "foo"}, Values: map[string]interface{}{"number": "42"}}, output: `{"tags":{"name":"foo"},"values":{"number":"42"}}sep`, }, }, }, } func TestEventWrite(t *testing.T) { for name, ts := range testset { p := &write{logger: log.New(io.Discard, "", 0)} err := p.Init(ts.processor) if err != nil { t.Errorf("failed to initialize processors: %v", err) return } t.Logf("initialized for test %s: %+v", name, p) for i, item := range ts.tests { t.Run(name, func(t *testing.T) { buff := new(bytes.Buffer) p.dst = buff t.Logf("running '%s' test item %d", name, i) p.Apply(item.input) if buff.String() != item.output { t.Errorf("failed at %s item %d, expected %+v, got: %+v", name, i, item.output, buff.String()) } }) } } } ================================================ FILE: pkg/formatters/flat.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package formatters import ( "errors" "path/filepath" "github.com/openconfig/gnmi/proto/gnmi" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/path" ) func ResponsesFlat(msgs ...proto.Message) (map[string]interface{}, error) { rs := make(map[string]interface{}) for _, msg := range msgs { mr, err := responseFlat(msg) if err != nil { return nil, err } for k, v := range mr { rs[k] = v } } return rs, nil } func responseFlat(msg proto.Message) (map[string]interface{}, error) { switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.GetResponse: rs := make(map[string]interface{}) for _, n := range msg.GetNotification() { prefix := path.GnmiPathToXPath(n.GetPrefix(), false) for _, u := range n.GetUpdate() { p := path.GnmiPathToXPath(u.GetPath(), false) // If there is no prefix whatsoever, prepend // leading slash to the path if prefix == "" { p = filepath.Join("/", p) } // If a prefix is populated without an origin, // prepend leading slash to the prefix if n.GetPrefix().GetOrigin() == "" && n.GetPrefix().GetElem() != nil { prefix = filepath.Join("/", prefix) } vmap, err := getValueFlat(filepath.Join(prefix, p), u.GetVal()) if err != nil { return nil, err } if len(vmap) == 0 { rs[p] = "{}" continue } for p, v := range vmap { rs[p] = v } } } return rs, nil case *gnmi.SubscribeResponse: rs := make(map[string]interface{}) n := msg.GetUpdate() if n != nil { prefix := path.GnmiPathToXPath(n.GetPrefix(), false) for _, u := range n.GetUpdate() { p := path.GnmiPathToXPath(u.GetPath(), false) // If there is no prefix whatsoever, prepend // leading slash to the path if prefix == "" { p = filepath.Join("/", p) } // If a prefix is populated without an origin, // prepend leading slash to the prefix if n.GetPrefix().GetOrigin() == "" && n.GetPrefix().GetElem() != nil { prefix = filepath.Join("/", prefix) } vmap, err := getValueFlat(filepath.Join(prefix, p), u.GetVal()) if err != nil { return nil, err } if len(vmap) == 0 { rs[p] = "{}" continue } for p, v := range vmap { rs[p] = v } } } return rs, nil } return nil, errors.New("unsupported message type") } ================================================ FILE: pkg/formatters/formats.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package formatters import ( "bytes" "fmt" "sort" "time" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/utils" ) type MarshalOptions struct { Multiline bool Indent string Format string OverrideTS bool ValuesOnly bool CalculateLatency bool RegisteredExtensions utils.RegisteredExtensions ProtoFiles []string ProtoDir []string } // Marshal // func (o *MarshalOptions) Marshal(msg proto.Message, meta map[string]string, eps ...EventProcessor) ([]byte, error) { msg = o.OverrideTimestamp(msg) switch o.Format { default: // json return o.FormatJSON(msg, meta) case "proto": return proto.Marshal(msg) case "protojson": return protojson.MarshalOptions{Multiline: o.Multiline, Indent: o.Indent}.Marshal(msg) case "prototext": return prototext.MarshalOptions{Multiline: o.Multiline, Indent: o.Indent}.Marshal(msg) case "event": b := make([]byte, 0) switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SubscribeResponse: var subscriptionName string var ok bool if subscriptionName, ok = meta["subscription-name"]; !ok { subscriptionName = "default" } switch msg.GetResponse().(type) { case *gnmi.SubscribeResponse_Update: events, err := ResponseToEventMsgs(subscriptionName, msg, meta, eps...) if err != nil { return nil, fmt.Errorf("failed converting response to events: %v", err) } if len(events) == 0 { return nil, nil } if o.Multiline { b, err = jsonMarshalIndent(events, "", o.Indent) } else { b, err = jsonMarshal(events) } if err != nil { return nil, fmt.Errorf("failed marshaling format 'event': %v", err) } } return b, nil case *gnmi.GetResponse: events, err := GetResponseToEventMsgs(msg, meta, eps...) if err != nil { return nil, fmt.Errorf("failed converting response to events: %v", err) } if o.Multiline { b, err = jsonMarshalIndent(events, "", o.Indent) } else { b, err = jsonMarshal(events) } if err != nil { return nil, fmt.Errorf("failed marshaling format 'event': %v", err) } return b, nil default: return nil, fmt.Errorf("format 'event' not supported for msg type %T", msg.ProtoReflect().Interface()) } case "flat": flatMsg, err := responseFlat(msg) if err != nil { return nil, err } msgLen := len(flatMsg) if msgLen == 0 { return nil, nil } sortedPaths := make([]string, 0, msgLen) for k := range flatMsg { sortedPaths = append(sortedPaths, k) } sort.Strings(sortedPaths) buf := new(bytes.Buffer) for _, p := range sortedPaths { buf.WriteString(fmt.Sprintf("%s: %v\n", p, flatMsg[p])) } return buf.Bytes(), nil } } func (o *MarshalOptions) OverrideTimestamp(msg proto.Message) proto.Message { if o.OverrideTS { ts := time.Now().UnixNano() switch msg := msg.ProtoReflect().Interface().(type) { case *gnmi.SubscribeResponse: switch msg.GetResponse().(type) { case *gnmi.SubscribeResponse_Update: upd := msg.GetUpdate() if upd != nil { upd.Timestamp = ts } return msg } } } return msg } ================================================ FILE: pkg/formatters/json.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package formatters import ( "bytes" "encoding/json" "strings" "sync" "time" "github.com/fullstorydev/grpcurl" "github.com/jhump/protoreflect/dynamic" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmi/proto/gnmi_ext" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/path" "github.com/openconfig/gnmic/pkg/utils" ) var bytesBufferPool = sync.Pool{ New: func() any { return new(bytes.Buffer) }, } // jsonMarshal encodes v to JSON without HTML-escaping '<', '>', or '&'. func jsonMarshal(v any) ([]byte, error) { buf := bytesBufferPool.Get().(*bytes.Buffer) defer func() { buf.Reset() bytesBufferPool.Put(buf) }() enc := json.NewEncoder(buf) enc.SetEscapeHTML(false) if err := enc.Encode(v); err != nil { return nil, err } result := bytes.TrimRight(buf.Bytes(), "\n") out := make([]byte, len(result)) copy(out, result) return out, nil } // jsonMarshalIndent is like jsonMarshal but applies indented formatting. func jsonMarshalIndent(v any, prefix, indent string) ([]byte, error) { buf := bytesBufferPool.Get().(*bytes.Buffer) defer func() { buf.Reset() bytesBufferPool.Put(buf) }() enc := json.NewEncoder(buf) enc.SetEscapeHTML(false) enc.SetIndent(prefix, indent) if err := enc.Encode(v); err != nil { return nil, err } result := bytes.TrimRight(buf.Bytes(), "\n") out := make([]byte, len(result)) copy(out, result) return out, nil } func formatRegisteredExtensions( extensions []*gnmi_ext.Extension, protoDir, protoFiles []string, extensionDecodeMap utils.RegisteredExtensions, ) (map[int32]decodedExtension, error) { decodedExtensions := map[int32]decodedExtension{} if len(extensions) == 0 { return decodedExtensions, nil } if len(protoFiles) == 0 { return decodedExtensions, nil } descSource, err := grpcurl.DescriptorSourceFromProtoFiles(protoDir, protoFiles...) if err != nil { return nil, err } for _, ext := range extensions { rext := ext.GetRegisteredExt() if rext == nil { continue } id := int32(rext.Id) msg, exists := extensionDecodeMap[id] if !exists { continue } desc, err := descSource.FindSymbol(msg) if err != nil { return nil, err } pm := dynamic.NewMessage(desc.GetFile().FindMessage(msg)) if err = pm.Unmarshal(rext.Msg); err != nil { return nil, err } jsondata, err := pm.MarshalJSON() if err != nil { return nil, err } msgJson := map[string]any{} if err = json.Unmarshal(jsondata, &msgJson); err != nil { return nil, err } decodedExtensions[id] = msgJson } return decodedExtensions, nil } // FormatJSON formats a proto.Message and returns a []byte and an error func (o *MarshalOptions) FormatJSON(m proto.Message, meta map[string]string) ([]byte, error) { if m == nil { return nil, nil } switch m := m.ProtoReflect().Interface().(type) { case *gnmi.CapabilityRequest: return o.formatCapabilitiesRequest(m) case *gnmi.CapabilityResponse: return o.formatCapabilitiesResponse(m) case *gnmi.GetRequest: return o.formatGetRequest(m) case *gnmi.GetResponse: return o.formatGetResponse(m, meta) case *gnmi.SetRequest: return o.formatSetRequest(m) case *gnmi.SetResponse: return o.formatSetResponse(m, meta) case *gnmi.SubscribeRequest: return o.formatSubscribeRequest(m) case *gnmi.SubscribeResponse: return o.formatSubscribeResponse(m, meta) } return nil, nil } func (o *MarshalOptions) formatSubscribeRequest(m *gnmi.SubscribeRequest) ([]byte, error) { msg := subscribeReq{} switch m := m.Request.(type) { case *gnmi.SubscribeRequest_Subscribe: msg.Subscribe.Prefix = path.GnmiPathToXPath(m.Subscribe.GetPrefix(), false) msg.Subscribe.Target = m.Subscribe.GetPrefix().GetTarget() msg.Subscribe.Subscriptions = make([]subscription, 0, len(m.Subscribe.GetSubscription())) if m.Subscribe != nil { msg.Subscribe.AllowAggregation = m.Subscribe.AllowAggregation msg.Subscribe.UpdatesOnly = m.Subscribe.UpdatesOnly msg.Subscribe.Encoding = m.Subscribe.Encoding.String() msg.Subscribe.Mode = m.Subscribe.Mode.String() if m.Subscribe.Qos != nil { msg.Subscribe.Qos = m.Subscribe.GetQos().GetMarking() } for _, sub := range m.Subscribe.Subscription { msg.Subscribe.Subscriptions = append(msg.Subscribe.Subscriptions, subscription{ Path: path.GnmiPathToXPath(sub.Path, false), Mode: sub.GetMode().String(), SampleInterval: sub.SampleInterval, HeartbeatInterval: sub.HeartbeatInterval, SuppressRedundant: sub.SuppressRedundant, }) } } case *gnmi.SubscribeRequest_Poll: msg.Poll = new(poll) } if len(m.GetExtension()) > 0 { msg.Extensions = m.GetExtension() } if o.Multiline { return jsonMarshalIndent(msg, "", o.Indent) } return jsonMarshal(msg) } func (o *MarshalOptions) formatSubscribeResponse(m *gnmi.SubscribeResponse, meta map[string]string) ([]byte, error) { dext, err := formatRegisteredExtensions(m.GetExtension(), o.ProtoDir, o.ProtoFiles, o.RegisteredExtensions) if err != nil { return nil, err } switch mr := m.GetResponse().(type) { default: if len(m.GetExtension()) > 0 { msg := notificationRspMsg{ Extensions: m.GetExtension(), DecodedExtensions: dext, } if o.Multiline { return jsonMarshalIndent(msg, "", o.Indent) } return jsonMarshal(msg) } case *gnmi.SubscribeResponse_SyncResponse: msg := &syncResponseMsg{ SyncResponse: mr.SyncResponse, Extensions: m.GetExtension(), DecodedExtensions: dext, } if o.Multiline { return jsonMarshalIndent(msg, "", o.Indent) } return jsonMarshal(msg) case *gnmi.SubscribeResponse_Update: msg := notificationRspMsg{ Timestamp: mr.Update.Timestamp, } t := time.Unix(0, mr.Update.Timestamp) msg.Time = &t if o.CalculateLatency { msg.RecvTimestamp = time.Now().UnixNano() rt := time.Unix(0, msg.RecvTimestamp) msg.RecvTime = &rt msg.LatencyNano = msg.RecvTimestamp - msg.Timestamp msg.LatencyMilli = msg.LatencyNano / 1000 / 1000 } if meta == nil { meta = make(map[string]string) } msg.Prefix = path.GnmiPathToXPath(mr.Update.GetPrefix(), false) msg.Target = mr.Update.Prefix.GetTarget() if s, ok := meta["source"]; ok { msg.Source = s } if s, ok := meta["system-name"]; ok { msg.SystemName = s } if s, ok := meta["subscription-name"]; ok { msg.SubscriptionName = s } for i, upd := range mr.Update.Update { if upd.Path == nil { upd.Path = new(gnmi.Path) } pathElems := make([]string, 0, len(upd.Path.Elem)) for _, pElem := range upd.Path.Elem { pathElems = append(pathElems, pElem.GetName()) } value, err := getValue(upd.Val) if err != nil { return nil, err } msg.Updates = append(msg.Updates, update{ Path: path.GnmiPathToXPath(upd.Path, false), Values: make(map[string]interface{}), }) msg.Updates[i].Values[strings.Join(pathElems, "/")] = value } for _, del := range mr.Update.Delete { msg.Deletes = append(msg.Deletes, path.GnmiPathToXPath(del, false)) } if len(m.GetExtension()) > 0 { msg.Extensions = m.GetExtension() msg.DecodedExtensions = dext } if o.Multiline { return jsonMarshalIndent(msg, "", o.Indent) } return jsonMarshal(msg) } return nil, nil } func (o *MarshalOptions) formatCapabilitiesRequest(m *gnmi.CapabilityRequest) ([]byte, error) { capReq := capRequest{ Extensions: m.Extension, } if o.Multiline { return jsonMarshalIndent(capReq, "", o.Indent) } return jsonMarshal(capReq) } func (o *MarshalOptions) formatCapabilitiesResponse(m *gnmi.CapabilityResponse) ([]byte, error) { capRspMsg := capResponse{ Extensions: m.Extension, } capRspMsg.Version = m.GetGNMIVersion() for _, sm := range m.SupportedModels { capRspMsg.SupportedModels = append(capRspMsg.SupportedModels, model{ Name: sm.GetName(), Organization: sm.GetOrganization(), Version: sm.GetVersion(), }) } for _, se := range m.SupportedEncodings { capRspMsg.Encodings = append(capRspMsg.Encodings, se.String()) } if o.Multiline { return jsonMarshalIndent(capRspMsg, "", o.Indent) } return jsonMarshal(capRspMsg) } func (o *MarshalOptions) formatGetRequest(m *gnmi.GetRequest) ([]byte, error) { msg := getRqMsg{ Prefix: path.GnmiPathToXPath(m.GetPrefix(), false), Target: m.GetPrefix().GetTarget(), Paths: make([]string, 0, len(m.Path)), Encoding: m.GetEncoding().String(), DataType: m.GetType().String(), Extensions: m.Extension, } for _, p := range m.Path { msg.Paths = append(msg.Paths, path.GnmiPathToXPath(p, false)) } for _, um := range m.UseModels { msg.Models = append(msg.Models, model{ Name: um.GetName(), Organization: um.GetOrganization(), Version: um.GetVersion(), }) } if o.Multiline { return jsonMarshalIndent(msg, "", o.Indent) } return jsonMarshal(msg) } func (o *MarshalOptions) formatGetResponse(m *gnmi.GetResponse, meta map[string]string) ([]byte, error) { dext, err := formatRegisteredExtensions(m.GetExtension(), o.ProtoDir, o.ProtoFiles, o.RegisteredExtensions) if err != nil { return nil, err } getRsp := getRspMsg{ Notifications: make([]notificationRspMsg, 0, len(m.GetNotification())), Extensions: m.GetExtension(), DecodedExtensions: dext, } for _, notif := range m.GetNotification() { msg := notificationRspMsg{ Prefix: path.GnmiPathToXPath(notif.GetPrefix(), false), Updates: make([]update, 0, len(notif.GetUpdate())), Deletes: make([]string, 0, len(notif.GetDelete())), } msg.Timestamp = notif.Timestamp t := time.Unix(0, notif.Timestamp) msg.Time = &t if o.CalculateLatency && !o.ValuesOnly { msg.RecvTimestamp = time.Now().UnixNano() rt := time.Unix(0, msg.RecvTimestamp) msg.RecvTime = &rt msg.LatencyNano = msg.RecvTimestamp - msg.Timestamp msg.LatencyMilli = msg.LatencyNano / 1000 / 1000 } if meta == nil { meta = make(map[string]string) } msg.Prefix = path.GnmiPathToXPath(notif.GetPrefix(), false) msg.Target = notif.GetPrefix().GetTarget() if s, ok := meta["source"]; ok { msg.Source = s } for i, upd := range notif.GetUpdate() { pathElems := make([]string, 0, len(upd.GetPath().GetElem())) for _, pElem := range upd.GetPath().GetElem() { pathElems = append(pathElems, pElem.GetName()) } value, err := getValue(upd.GetVal()) if err != nil { return nil, err } msg.Updates = append(msg.Updates, update{ Path: path.GnmiPathToXPath(upd.GetPath(), false), Values: make(map[string]interface{}), }) msg.Updates[i].Values[strings.Join(pathElems, "/")] = value } for _, del := range notif.GetDelete() { msg.Deletes = append(msg.Deletes, path.GnmiPathToXPath(del, false)) } getRsp.Notifications = append(getRsp.Notifications, msg) } if o.ValuesOnly { result := make([]interface{}, 0, len(getRsp.Notifications)) for _, n := range getRsp.Notifications { for _, u := range n.Updates { for _, v := range u.Values { result = append(result, v) } } } return jsonMarshalIndent(result, "", " ") } var data any if len(getRsp.Extensions) > 0 { data = getRsp } else { data = getRsp.Notifications } if o.Multiline { return jsonMarshalIndent(data, "", o.Indent) } return jsonMarshal(data) } func (o *MarshalOptions) formatSetRequest(m *gnmi.SetRequest) ([]byte, error) { req := setReqMsg{ Prefix: path.GnmiPathToXPath(m.GetPrefix(), false), Target: m.GetPrefix().GetTarget(), Delete: make([]string, 0, len(m.GetDelete())), Replace: make([]updateMsg, 0, len(m.GetReplace())), Update: make([]updateMsg, 0, len(m.GetUpdate())), Extensions: m.GetExtension(), } for _, del := range m.GetDelete() { p := path.GnmiPathToXPath(del, false) req.Delete = append(req.Delete, p) } for _, upd := range m.GetReplace() { req.Replace = append(req.Replace, updateMsg{ Path: path.GnmiPathToXPath(upd.GetPath(), false), Val: upd.Val.String(), }) } for _, upd := range m.GetUpdate() { req.Update = append(req.Update, updateMsg{ Path: path.GnmiPathToXPath(upd.GetPath(), false), Val: upd.Val.String(), }) } if o.Multiline { return jsonMarshalIndent(req, "", o.Indent) } return jsonMarshal(req) } func (o *MarshalOptions) formatSetResponse(m *gnmi.SetResponse, meta map[string]string) ([]byte, error) { dext, err := formatRegisteredExtensions(m.GetExtension(), o.ProtoDir, o.ProtoFiles, o.RegisteredExtensions) if err != nil { return nil, err } msg := setRspMsg{ Prefix: path.GnmiPathToXPath(m.GetPrefix(), false), Target: m.GetPrefix().GetTarget(), Timestamp: m.GetTimestamp(), Time: time.Unix(0, m.Timestamp), Extensions: m.GetExtension(), DecodedExtensions: dext, } if meta == nil { meta = make(map[string]string) } msg.Results = make([]updateResultMsg, 0, len(m.GetResponse())) if s, ok := meta["source"]; ok { msg.Source = s } for _, u := range m.GetResponse() { msg.Results = append(msg.Results, updateResultMsg{ Operation: u.Op.String(), Path: path.GnmiPathToXPath(u.GetPath(), false), Target: u.GetPath().GetTarget(), }) } if o.Multiline { return jsonMarshalIndent(msg, "", o.Indent) } return jsonMarshal(msg) } ================================================ FILE: pkg/formatters/msg.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package formatters import ( "encoding/json" "time" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmi/proto/gnmi_ext" ) type syncResponseMsg struct { SyncResponse bool `json:"sync-response,omitempty"` Extensions []*gnmi_ext.Extension `json:"extensions,omitempty"` DecodedExtensions map[int32]decodedExtension `json:"decodedExtensions,omitempty"` } type notificationRspMsg struct { Meta map[string]interface{} `json:"meta,omitempty"` Source string `json:"source,omitempty"` SystemName string `json:"system-name,omitempty"` SubscriptionName string `json:"subscription-name,omitempty"` Timestamp int64 `json:"timestamp,omitempty"` Time *time.Time `json:"time,omitempty"` RecvTimestamp int64 `json:"recv-timestamp,omitempty"` RecvTime *time.Time `json:"recv-time,omitempty"` LatencyNano int64 `json:"latency-nano,omitempty"` LatencyMilli int64 `json:"latency-milli,omitempty"` Prefix string `json:"prefix,omitempty"` Target string `json:"target,omitempty"` Updates []update `json:"updates,omitempty"` Deletes []string `json:"deletes,omitempty"` Extensions []*gnmi_ext.Extension `json:"extensions,omitempty"` DecodedExtensions map[int32]decodedExtension `json:"decodedExtensions,omitempty"` } type update struct { Path string Values map[string]interface{} `json:"values,omitempty"` } type capRequest struct { Extensions []*gnmi_ext.Extension `json:"extensions,omitempty"` } type capResponse struct { Version string `json:"version,omitempty"` SupportedModels []model `json:"supported-models,omitempty"` Encodings []string `json:"encodings,omitempty"` Extensions []*gnmi_ext.Extension `json:"extensions,omitempty"` } type model struct { Name string `json:"name,omitempty"` Organization string `json:"organization,omitempty"` Version string `json:"version,omitempty"` } type getRqMsg struct { Prefix string `json:"prefix,omitempty"` Target string `json:"target,omitempty"` Paths []string `json:"paths,omitempty"` Encoding string `json:"encoding,omitempty"` DataType string `json:"data-type,omitempty"` Models []model `json:"models,omitempty"` Extensions []*gnmi_ext.Extension `json:"extensions,omitempty"` } type decodedExtension map[string]any type getRspMsg struct { Notifications []notificationRspMsg `json:"notifications,omitempty"` Extensions []*gnmi_ext.Extension `json:"extensions,omitempty"` DecodedExtensions map[int32]decodedExtension `json:"decodedExtensions,omitempty"` } type setRspMsg struct { Source string `json:"source,omitempty"` Timestamp int64 `json:"timestamp,omitempty"` Time time.Time `json:"time,omitempty"` Prefix string `json:"prefix,omitempty"` Target string `json:"target,omitempty"` Results []updateResultMsg `json:"results,omitempty"` Extensions []*gnmi_ext.Extension `json:"extensions,omitempty"` DecodedExtensions map[int32]decodedExtension `json:"decodedExtensions,omitempty"` } type updateResultMsg struct { Operation string `json:"operation,omitempty"` Path string `json:"path,omitempty"` Target string `json:"target,omitempty"` } type setReqMsg struct { Prefix string `json:"prefix,omitempty"` Target string `json:"target,omitempty"` Delete []string `json:"delete,omitempty"` Replace []updateMsg `json:"replace,omitempty"` Update []updateMsg `json:"update,omitempty"` Extensions []*gnmi_ext.Extension `json:"extensions,omitempty"` } type updateMsg struct { Path string `json:"path,omitempty"` Val string `json:"val,omitempty"` } type subscribeReq struct { Subscribe subscribe `json:"subscribe,omitempty"` Poll *poll `json:"poll,omitempty"` Aliases map[string]string `json:"aliases,omitempty"` Extensions []*gnmi_ext.Extension `json:"extensions,omitempty"` } type poll struct{} type subscribe struct { Target string `json:"target,omitempty"` Prefix string `json:"prefix,omitempty"` Subscriptions []subscription `json:"subscriptions,omitempty"` UseAliases bool `json:"use-aliases,omitempty"` Qos uint32 `json:"qos,omitempty"` Mode string `json:"mode,omitempty"` AllowAggregation bool `json:"allow-aggregation,omitempty"` UseModels []model `json:"use-models,omitempty"` Encoding string `json:"encoding,omitempty"` UpdatesOnly bool `json:"updates-only,omitempty"` } type subscription struct { Path string `json:"path,omitempty"` Mode string `json:"mode,omitempty"` SampleInterval uint64 `json:"sample-interval,omitempty"` SuppressRedundant bool `json:"suppress-redundant,omitempty"` HeartbeatInterval uint64 `json:"heartbeat-interval,omitempty"` } func getValue(updValue *gnmi.TypedValue) (interface{}, error) { if updValue == nil { return nil, nil } var value interface{} var jsondata []byte switch updValue.Value.(type) { case *gnmi.TypedValue_AsciiVal: value = updValue.GetAsciiVal() case *gnmi.TypedValue_BoolVal: value = updValue.GetBoolVal() case *gnmi.TypedValue_BytesVal: value = updValue.GetBytesVal() case *gnmi.TypedValue_DecimalVal: //lint:ignore SA1019 still need DecimalVal for backward compatibility value = updValue.GetDecimalVal() case *gnmi.TypedValue_FloatVal: //lint:ignore SA1019 still need GetFloatVal for backward compatibility value = updValue.GetFloatVal() case *gnmi.TypedValue_DoubleVal: value = updValue.GetDoubleVal() case *gnmi.TypedValue_IntVal: value = updValue.GetIntVal() case *gnmi.TypedValue_StringVal: value = updValue.GetStringVal() case *gnmi.TypedValue_UintVal: value = updValue.GetUintVal() case *gnmi.TypedValue_JsonIetfVal: jsondata = updValue.GetJsonIetfVal() case *gnmi.TypedValue_JsonVal: jsondata = updValue.GetJsonVal() case *gnmi.TypedValue_LeaflistVal: value = updValue.GetLeaflistVal() case *gnmi.TypedValue_ProtoBytes: value = updValue.GetProtoBytes() case *gnmi.TypedValue_AnyVal: value = updValue.GetAnyVal() } if value == nil && len(jsondata) != 0 { err := json.Unmarshal(jsondata, &value) if err != nil { return nil, err } } return value, nil } ================================================ FILE: pkg/formatters/plugin_manager/manager.go ================================================ package plugin_manager import ( "fmt" "io" "os" "os/exec" "path/filepath" "sync" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "github.com/openconfig/gnmic/pkg/config" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/formatters/event_plugin" ) var handshakeConfig = plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "GNMIC_PLUGIN", MagicCookieValue: "gnmic", } type PluginManager struct { config *config.PluginsConfig logOutput io.Writer m *sync.Mutex pluginClients []*plugin.Client logger hclog.Logger } func New(pc *config.PluginsConfig, logOutput io.Writer) *PluginManager { pm := &PluginManager{ config: pc, logOutput: logOutput, m: new(sync.Mutex), pluginClients: make([]*plugin.Client, 0), } pm.logger = hclog.New( &hclog.LoggerOptions{ Name: "plugin-manager", Level: hclog.Info, Output: logOutput, TimeFormat: "2006/01/02 15:04:05.999999", }, ) if pc.Debug { pm.logger.SetLevel(hclog.Debug) } return pm } func (p *PluginManager) Load() error { if p.config == nil { return nil } // discover plugins in the supplied path pluginPaths, err := plugin.Discover(p.config.Glob, p.config.Path) if err != nil { return err } // initialize plugins clients and register plugin processors for _, pluginPath := range pluginPaths { name := filepath.Base(pluginPath) formatters.EventProcessorTypes = append(formatters.EventProcessorTypes, name) formatters.Register(name, p.initProcessorFn(name, pluginPath)) } return nil } func (p *PluginManager) Cleanup() { p.m.Lock() defer p.m.Unlock() for _, client := range p.pluginClients { client.Kill() } } func (p *PluginManager) initProcessorFn(name, pluginPath string) func() formatters.EventProcessor { return func() formatters.EventProcessor { client := plugin.NewClient(&plugin.ClientConfig{ HandshakeConfig: handshakeConfig, Plugins: map[string]plugin.Plugin{name: &event_plugin.EventProcessorPlugin{}}, Cmd: exec.Command(pluginPath), StartTimeout: p.config.StartTimeout, SyncStdout: p.logOutput, SyncStderr: p.logOutput, Logger: p.logger, }) p.m.Lock() p.pluginClients = append(p.pluginClients, client) p.m.Unlock() rpcClient, err := client.Client() if err != nil { fmt.Fprintf(os.Stderr, "failed to initialize plugin processor %s: %v\n", name, err) os.Exit(1) } raw, err := rpcClient.Dispense(name) if err != nil { fmt.Fprintf(os.Stderr, "failed to dispense plugin processor %s: %v\n", name, err) os.Exit(1) } eventPlugin, ok := raw.(formatters.EventProcessor) if !ok { err := fmt.Errorf("plugin %s dispensed an unexpected interface: %T", name, raw) fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } return eventPlugin } } ================================================ FILE: pkg/formatters/processors.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package formatters import ( "encoding/json" "fmt" "log" "github.com/itchyny/gojq" "github.com/mitchellh/mapstructure" "github.com/openconfig/gnmic/pkg/api/types" ) var EventProcessors = map[string]Initializer{} var EventProcessorTypes = []string{ "event-add-tag", "event-allow", "event-combine", "event-convert", "event-data-convert", "event-date-string", "event-delete", "event-drop", "event-duration-convert", "event-extract-tags", "event-group-by", "event-ieeefloat32", "event-jq", "event-merge", "event-override-ts", "event-rate-limit", "event-starlark", "event-strings", "event-time-epoch", "event-to-tag", "event-trigger", "event-value-tag", "event-value-tag-v2", "event-write", } type Initializer func() EventProcessor func Register(name string, initFn Initializer) { EventProcessors[name] = initFn } type Option func(EventProcessor) type EventProcessor interface { Init(interface{}, ...Option) error Apply(...*EventMsg) []*EventMsg WithTargets(map[string]*types.TargetConfig) WithLogger(l *log.Logger) WithActions(act map[string]map[string]interface{}) WithProcessors(procs map[string]map[string]any) } func DecodeConfig(src, dst interface{}) error { decoder, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), Result: dst, }, ) if err != nil { return err } return decoder.Decode(src) } func WithLogger(l *log.Logger) Option { return func(p EventProcessor) { p.WithLogger(l) } } func WithTargets(tcs map[string]*types.TargetConfig) Option { return func(p EventProcessor) { p.WithTargets(tcs) } } func WithActions(acts map[string]map[string]interface{}) Option { return func(p EventProcessor) { p.WithActions(acts) } } func WithProcessors(procs map[string]map[string]interface{}) Option { return func(p EventProcessor) { p.WithProcessors(procs) } } func CheckCondition(code *gojq.Code, e *EventMsg) (bool, error) { if code == nil { return true, nil } var res interface{} input := make(map[string]interface{}) b, err := json.Marshal(e) if err != nil { return false, err } err = json.Unmarshal(b, &input) if err != nil { return false, err } iter := code.Run(input) var ok bool res, ok = iter.Next() // iterator not done, so the final result won't be a boolean if !ok { return false, nil } if err, ok = res.(error); ok { return false, err } switch res := res.(type) { case bool: return res, nil default: return false, fmt.Errorf("unexpected condition return type: %T | %v", res, res) } } func MakeEventProcessors( logger *log.Logger, processorNames []string, ps map[string]map[string]any, tcs map[string]*types.TargetConfig, acts map[string]map[string]any, ) ([]EventProcessor, error) { evps := make([]EventProcessor, len(processorNames)) for i, epName := range processorNames { if epCfg, ok := ps[epName]; ok { ep, err := MakeProcessor(logger, epName, epCfg, ps, tcs, acts) if err != nil { return nil, err } evps[i] = ep continue } return nil, fmt.Errorf("%q event processor not found", epName) } return evps, nil } func MakeProcessor(logger *log.Logger, name string, cfg map[string]any, ps map[string]map[string]any, tcs map[string]*types.TargetConfig, acts map[string]map[string]any) (EventProcessor, error) { epType := "" for k := range cfg { epType = k break } if in, ok := EventProcessors[epType]; ok { ep := in() err := ep.Init(cfg[epType], WithLogger(logger), WithTargets(tcs), WithActions(acts), WithProcessors(ps), ) if err != nil { return nil, fmt.Errorf("failed initializing event processor '%s' of type='%s': %w", name, epType, err) } logger.Printf("added event processor '%s' of type=%s to output", name, epType) return ep, nil } return nil, fmt.Errorf("%q event processor has an unknown type=%q", name, epType) } type BaseProcessor struct { logger *log.Logger } func (p *BaseProcessor) WithLogger(l *log.Logger) { p.logger = l } func (p *BaseProcessor) Init(interface{}, ...Option) error { return nil } func (p *BaseProcessor) Apply(...*EventMsg) []*EventMsg { return nil } func (p *BaseProcessor) WithTargets(map[string]*types.TargetConfig) { } func (p *BaseProcessor) WithActions(act map[string]map[string]interface{}) { } func (p *BaseProcessor) WithProcessors(procs map[string]map[string]any) { } ================================================ FILE: pkg/formatters/processors_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package formatters import ( "testing" "time" "github.com/itchyny/gojq" ) var testset = map[string]struct { condition string input []*EventMsg result bool }{ "always_true": { condition: "any([true])", input: []*EventMsg{ { Name: "dummy1", Timestamp: time.Now().Unix(), Tags: map[string]string{"t1": "t1v"}, Values: map[string]interface{}{ "path/dummy": 1, }, }, { Name: "dummy2", Timestamp: time.Now().Unix(), Tags: map[string]string{"t1": "t1v"}, Values: map[string]interface{}{ "path/dummy": 1, }, }, }, result: true, }, } func TestCheckCondition(t *testing.T) { for name, item := range testset { t.Run(name, func(t *testing.T) { t.Logf("running test item %s", name) q, err := gojq.Parse(item.condition) if err != nil { t.Logf("condition parse failed :%v", err) t.Fail() } code, err := gojq.Compile(q) if err != nil { t.Logf("query compile failed :%v", err) t.Fail() } for _, in := range item.input { ok, err := CheckCondition(code, in) if err != nil { t.Logf("check condition failed :%v", err) t.Fail() } if ok != item.result { t.Logf("failed at %q", name) t.Logf("expected: (%T)%+v", item.result, item.result) t.Logf(" got: (%T)%+v", ok, ok) t.Fail() } } }) } } ================================================ FILE: pkg/gtemplate/template.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package gtemplate import ( "path" "text/template" ) func CreateTemplate(name, text string) (*template.Template, error) { return template.New(name). Option("missingkey=zero"). Funcs(NewTemplateEngine().CreateFuncs()). Parse(text) } func CreateFileTemplate(filename string) (*template.Template, error) { name := path.Base(filename) tpl, err := template.New(name). Funcs(NewTemplateEngine().CreateFuncs()). ParseFiles(filename) template.Must(tpl, err) return tpl, err } ================================================ FILE: pkg/gtemplate/template_funcs.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package gtemplate import ( "context" "text/template" "github.com/hairyhenderson/gomplate/v3" "github.com/hairyhenderson/gomplate/v3/data" ) type templateEngine interface { CreateFuncs() template.FuncMap } func NewTemplateEngine() templateEngine { return &gmplt{} } type gmplt struct{} func (*gmplt) CreateFuncs() template.FuncMap { return gomplate.CreateFuncs(context.TODO(), new(data.Data)) } ================================================ FILE: pkg/inputs/all/all.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package all import ( _ "github.com/openconfig/gnmic/pkg/inputs/jetstream_input" _ "github.com/openconfig/gnmic/pkg/inputs/kafka_input" _ "github.com/openconfig/gnmic/pkg/inputs/nats_input" ) ================================================ FILE: pkg/inputs/input.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package inputs import ( "context" "fmt" "log" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/outputs" "github.com/openconfig/gnmic/pkg/pipeline" pkgutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" "google.golang.org/protobuf/proto" ) type Input interface { // Start initializes the input and starts it. Start(context.Context, string, map[string]any, ...Option) error // Validate validates the input configuration. Validate(map[string]any) error // Update updates the input configuration in place for // a running input. Update(map[string]any) error // UpdateProcessor updates the named processor configuration // for a running input. // if the processor is not used by the Input, it will be ignored. UpdateProcessor(string, map[string]any) error // Close stops the input. Close() error } type Initializer func() Input var InputTypes = []string{ "nats", "kafka", "jetstream", } var Inputs = map[string]Initializer{} func Register(name string, initFn Initializer) { Inputs[name] = initFn } type InputOptions struct { Logger *log.Logger Outputs map[string]outputs.Output Name string Store store.Store[any] Pipeline chan *pipeline.Msg } type PipeMessage interface { Proto() proto.Message Meta() outputs.Meta Events() []*formatters.EventMsg Outputs() map[string]struct{} } type Option func(*InputOptions) error func WithLogger(logger *log.Logger) Option { return func(i *InputOptions) error { i.Logger = logger return nil } } func WithOutputs(outs map[string]outputs.Output) Option { return func(i *InputOptions) error { i.Outputs = outs return nil } } func WithName(name string) Option { return func(i *InputOptions) error { i.Name = name return nil } } func WithConfigStore(st store.Store[any]) Option { return func(i *InputOptions) error { i.Store = st return nil } } func WithPipeline(pipeline chan *pipeline.Msg) Option { return func(i *InputOptions) error { i.Pipeline = pipeline return nil } } type BaseInput struct { } func (b *BaseInput) Start(context.Context, string, map[string]any, ...Option) error { return nil } func (b *BaseInput) Validate(map[string]any) error { return nil } func (b *BaseInput) Update(map[string]any) error { return nil } func (b *BaseInput) UpdateProcessor(string, map[string]any) error { return nil } func (b *BaseInput) Close() error { return nil } func UpdateProcessorInSlice( logger *log.Logger, storeObj store.Store[any], eventProcessors []string, currentEvps []formatters.EventProcessor, processorName string, pcfg map[string]any, ) ([]formatters.EventProcessor, bool, error) { tcs, ps, acts, err := pkgutils.GetConfigMaps(storeObj) if err != nil { return nil, false, err } for i, epName := range eventProcessors { if epName == processorName { ep, err := formatters.MakeProcessor(logger, processorName, pcfg, ps, tcs, acts) if err != nil { return nil, false, err } if i >= len(currentEvps) { return nil, false, fmt.Errorf("output processors are not properly initialized") } // create new slice with updated processor newEvps := make([]formatters.EventProcessor, len(currentEvps)) copy(newEvps, currentEvps) newEvps[i] = ep logger.Printf("updated event processor %s", processorName) return newEvps, true, nil } } // processor not found - return currentEvps return currentEvps, false, nil } ================================================ FILE: pkg/inputs/jetstream_input/jetstream_input.go ================================================ // © 2025 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package jetstream_input import ( "context" "encoding/json" "fmt" "io" "log" "net" "slices" "strings" "sync" "sync/atomic" "time" "google.golang.org/protobuf/proto" "github.com/google/uuid" "github.com/nats-io/nats.go" "github.com/nats-io/nats.go/jetstream" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/inputs" "github.com/openconfig/gnmic/pkg/outputs" "github.com/openconfig/gnmic/pkg/pipeline" pkgutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( loggingPrefix = "[jetstream_input:%s] " natsReconnectBufferSize = 100 * 1024 * 1024 defaultAddress = "localhost:4222" natsConnectWait = 2 * time.Second defaultFormat = "event" defaultNumWorkers = 1 defaultBufferSize = 500 defaultFetchBatchSize = 500 defaultMaxAckPending = 1000 ) type deliverPolicy string const ( deliverPolicyAll deliverPolicy = "all" deliverPolicyLast deliverPolicy = "last" deliverPolicyNew deliverPolicy = "new" deliverPolicyLastPerSubject deliverPolicy = "last-per-subject" ) func toJSDeliverPolicy(dp deliverPolicy) jetstream.DeliverPolicy { switch dp { case deliverPolicyAll: return jetstream.DeliverAllPolicy case deliverPolicyLast: return jetstream.DeliverLastPolicy case deliverPolicyNew: return jetstream.DeliverNewPolicy case deliverPolicyLastPerSubject: return jetstream.DeliverLastPerSubjectPolicy } return 0 } func init() { inputs.Register("jetstream", func() inputs.Input { return &jetstreamInput{ confLock: new(sync.RWMutex), cfg: new(atomic.Pointer[config]), dynCfg: new(atomic.Pointer[dynConfig]), logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), wg: new(sync.WaitGroup), } }) } // jetstreamInput // type jetstreamInput struct { // ensure only one Update or UpdateProcessor operation // are performed at a time confLock *sync.RWMutex inputs.BaseInput cfg *atomic.Pointer[config] dynCfg *atomic.Pointer[dynConfig] ctx context.Context cfn context.CancelFunc logger *log.Logger wg *sync.WaitGroup outputs []outputs.Output // used when the cmd is subscribe store store.Store[any] pipeline chan *pipeline.Msg } type dynConfig struct { evps []formatters.EventProcessor outputsMap map[string]struct{} // used when the cmd is collector } type subjectFormat string const ( subjectFormat_Static = "static" subjectFormat_TargetSub = "target.subscription" subjectFormat_SubTarget = "subscription.target" ) // config // type config struct { Name string `mapstructure:"name,omitempty"` Address string `mapstructure:"address,omitempty"` Stream string `mapstructure:"stream,omitempty"` Subjects []string `mapstructure:"subjects,omitempty"` SubjectFormat subjectFormat `mapstructure:"subject-format,omitempty" json:"subject-format,omitempty"` DeliverPolicy deliverPolicy `mapstructure:"deliver-policy,omitempty"` Username string `mapstructure:"username,omitempty"` Password string `mapstructure:"password,omitempty"` ConnectTimeWait time.Duration `mapstructure:"connect-time-wait,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty" json:"tls,omitempty"` Format string `mapstructure:"format,omitempty"` Debug bool `mapstructure:"debug,omitempty"` NumWorkers int `mapstructure:"num-workers,omitempty"` BufferSize int `mapstructure:"buffer-size,omitempty"` FetchBatchSize int `mapstructure:"fetch-batch-size,omitempty"` MaxAckPending *int `mapstructure:"max-ack-pending,omitempty"` Outputs []string `mapstructure:"outputs,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty"` } // Init // func (n *jetstreamInput) Start(ctx context.Context, name string, cfg map[string]any, opts ...inputs.Option) error { n.confLock.Lock() defer n.confLock.Unlock() newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } if newCfg.Name == "" { newCfg.Name = name } n.logger.SetPrefix(fmt.Sprintf(loggingPrefix, newCfg.Name)) options := &inputs.InputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } n.store = options.Store n.pipeline = options.Pipeline n.setName(options.Name, newCfg) n.setLogger(options.Logger) outputs, outputsMap := n.getOutputs(options.Outputs, newCfg) n.outputs = outputs evps, err := n.buildEventProcessors(options.Logger, newCfg.EventProcessors) if err != nil { return err } err = n.setDefaultsFor(newCfg) if err != nil { return err } n.cfg.Store(newCfg) dc := &dynConfig{ evps: evps, outputsMap: outputsMap, } n.dynCfg.Store(dc) n.ctx = ctx // save context for worker restarts var runCtx context.Context // create a run context for the workers runCtx, n.cfn = context.WithCancel(ctx) n.logger.Printf("input starting with config: %+v", newCfg) n.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { go n.worker(runCtx, i) } return nil } func (n *jetstreamInput) Validate(cfg map[string]any) error { newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } return n.setDefaultsFor(newCfg) } // Update updates the input configuration and restarts the workers if // necessary. // It works only when the command is collector (not subscribe). func (n *jetstreamInput) Update(cfg map[string]any) error { n.confLock.Lock() defer n.confLock.Unlock() newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } n.setDefaultsFor(newCfg) currCfg := n.cfg.Load() restartWorkers := needsWorkerRestart(currCfg, newCfg) rebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 // build new dynamic config dc := &dynConfig{ outputsMap: make(map[string]struct{}), } for _, o := range newCfg.Outputs { dc.outputsMap[o] = struct{}{} } prevDC := n.dynCfg.Load() if rebuildProcessors { dc.evps, err = n.buildEventProcessors(n.logger, newCfg.EventProcessors) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } n.dynCfg.Store(dc) n.cfg.Store(newCfg) if restartWorkers { runCtx, cancel := context.WithCancel(n.ctx) newWG := new(sync.WaitGroup) // save old pointers oldCancel := n.cfn oldWG := n.wg // swap n.cfn = cancel n.wg = newWG n.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { go n.worker(runCtx, i) } // cancel old workers and loops if oldCancel != nil { oldCancel() } if oldWG != nil { oldWG.Wait() } } return nil } func (n *jetstreamInput) UpdateProcessor(name string, pcfg map[string]any) error { n.confLock.Lock() defer n.confLock.Unlock() cfg := n.cfg.Load() dc := n.dynCfg.Load() newEvps, changed, err := inputs.UpdateProcessorInSlice( n.logger, n.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps n.dynCfg.Store(&newDC) n.logger.Printf("updated event processor %s", name) } return nil } func needsWorkerRestart(old, nw *config) bool { if old == nil || nw == nil { return true } return old.NumWorkers != nw.NumWorkers || old.BufferSize != nw.BufferSize || old.FetchBatchSize != nw.FetchBatchSize || old.Address != nw.Address || old.Stream != nw.Stream || slices.Compare(old.Subjects, nw.Subjects) != 0 || old.DeliverPolicy != nw.DeliverPolicy || old.Username != nw.Username || old.Password != nw.Password || !old.TLS.Equal(nw.TLS) || old.ConnectTimeWait != nw.ConnectTimeWait || !maxAckPendingEqual(old.MaxAckPending, nw.MaxAckPending) } func maxAckPendingEqual(a, b *int) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } return *a == *b } func (n *jetstreamInput) worker(ctx context.Context, idx int) { defer n.wg.Done() workerLogPrefix := fmt.Sprintf("worker-%d", idx) n.logger.Printf("%s starting", workerLogPrefix) for { select { case <-ctx.Done(): return default: } n.logger.Printf("worker %d loading config", idx) cfg := n.cfg.Load() wCfg := *cfg // scoped connection, subscription and cleanup err := n.doWork(ctx, idx, &wCfg, workerLogPrefix) if err != nil { n.logger.Printf("%s JetStream client failed: %v", workerLogPrefix, err) } // backoff before retry select { case <-ctx.Done(): return case <-time.After(wCfg.ConnectTimeWait): } } } // scoped connection, subscription and cleanup func (n *jetstreamInput) doWork(ctx context.Context, workerIdx int, wCfg *config, workerLogPrefix string) error { nc, err := n.createNATSConn(wCfg) if err != nil { return fmt.Errorf("create NATS connection: %w", err) } defer nc.Close() js, err := jetstream.New(nc) if err != nil { return fmt.Errorf("create JetStream context: %w", err) } s, err := js.Stream(ctx, wCfg.Stream) if err != nil { return fmt.Errorf("get stream: %w", err) } // Get stream info to determine retention policy streamInfo, err := s.Info(ctx) if err != nil { return fmt.Errorf("get stream info: %w", err) } // Determine ack policy and deliver policy based on stream retention // Workqueue streams have specific requirements ackPolicy := jetstream.AckAllPolicy deliverPolicy := toJSDeliverPolicy(wCfg.DeliverPolicy) consumerName := wCfg.Name if streamInfo.Config.Retention == jetstream.WorkQueuePolicy { // Workqueue streams require explicit ack ackPolicy = jetstream.AckExplicitPolicy // Workqueue streams allow DeliverAllPolicy or DeliverNewPolicy // Use configured policy, but only if it's one of these two if deliverPolicy != jetstream.DeliverAllPolicy && deliverPolicy != jetstream.DeliverNewPolicy { // Default to DeliverAllPolicy for workqueue if configured policy is not compatible deliverPolicy = jetstream.DeliverAllPolicy } // WorkQueue streams only allow one consumer per unfiltered subject set. // All workers must share the same durable consumer so that concurrent // Fetch() calls distribute work correctly instead of each worker // failing to create its own overlapping consumer. } else { // For non-WorkQueue streams each worker gets its own independent // consumer cursor so that all workers see all messages. consumerName = fmt.Sprintf("%s-%d", wCfg.Name, workerIdx) } c, err := s.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{ Name: consumerName, Durable: consumerName, DeliverPolicy: deliverPolicy, AckPolicy: ackPolicy, MemoryStorage: true, FilterSubjects: wCfg.Subjects, MaxAckPending: *wCfg.MaxAckPending, }) if err != nil { return fmt.Errorf("create consumer: %w", err) } for { select { case <-ctx.Done(): return nil default: // load current config for dynamic fields like Format cfg := n.cfg.Load() mb, err := c.FetchNoWait(cfg.FetchBatchSize) if err != nil { return fmt.Errorf("fetch messages: %w", err) } for m := range mb.Messages() { n.msgHandler(ctx, cfg, m) } if mb.Error() != nil { return err } } } } func (n *jetstreamInput) msgHandler(ctx context.Context, cfg *config, msg jetstream.Msg) { msg.Ack() if cfg.Debug { n.logger.Printf("received msg, subject=%s, len=%d, data=%s", msg.Subject(), len(msg.Data()), msg.Data()) } dc := n.dynCfg.Load() switch cfg.Format { case "event": evMsgs := make([]*formatters.EventMsg, 1) err := json.Unmarshal(msg.Data(), &evMsgs) if err != nil { if cfg.Debug { n.logger.Printf("failed to unmarshal event msg: %v", err) } return } for _, p := range dc.evps { evMsgs = p.Apply(evMsgs...) } if n.pipeline != nil { select { case <-ctx.Done(): return case n.pipeline <- &pipeline.Msg{ Events: evMsgs, Outputs: dc.outputsMap, }: default: n.logger.Printf("pipeline channel is full, dropping event") } } for _, o := range n.outputs { for _, ev := range evMsgs { o.WriteEvent(ctx, ev) } } case "proto": var protoMsg = &gnmi.SubscribeResponse{} err := proto.Unmarshal(msg.Data(), protoMsg) if err != nil { if cfg.Debug { n.logger.Printf("failed to unmarshal proto msg: %v", err) } return } meta := n.getMetaFromSubject(msg.Subject(), cfg) if n.pipeline != nil { select { case <-ctx.Done(): return case n.pipeline <- &pipeline.Msg{ Msg: protoMsg, Meta: meta, Outputs: dc.outputsMap, }: default: n.logger.Printf("pipeline channel is full, dropping message") } } for _, o := range n.outputs { o.Write(ctx, protoMsg, meta) } default: n.logger.Printf("unsupported format: %s", cfg.Format) } } func (n *jetstreamInput) getMetaFromSubject(subject string, wCfg *config) outputs.Meta { meta := outputs.Meta{} subjectSections := strings.SplitN(subject, ".", 3) if len(subjectSections) < 3 { return meta } switch wCfg.SubjectFormat { case subjectFormat_Static: case subjectFormat_SubTarget: meta["subscription-name"] = subjectSections[1] meta["source"] = subjectSections[2] case subjectFormat_TargetSub: meta["subscription-name"] = subjectSections[2] meta["source"] = subjectSections[1] } return meta } // Close // func (n *jetstreamInput) Close() error { if n.cfn != nil { n.cfn() } if n.wg != nil { n.wg.Wait() } return nil } // SetLogger // func (n *jetstreamInput) setLogger(logger *log.Logger) { if logger != nil && n.logger != nil { n.logger.SetOutput(logger.Writer()) n.logger.SetFlags(logger.Flags()) } } // SetOutputs // func (n *jetstreamInput) getOutputs(outs map[string]outputs.Output, cfg *config) ([]outputs.Output, map[string]struct{}) { outputs := make([]outputs.Output, 0) if len(cfg.Outputs) == 0 { for _, o := range outs { outputs = append(outputs, o) } return outputs, nil } outputsMap := make(map[string]struct{}) for _, name := range cfg.Outputs { outputsMap[name] = struct{}{} // for collector if o, ok := outs[name]; ok { // for subscribe outputs = append(outputs, o) } } return outputs, outputsMap } func (n *jetstreamInput) setName(name string, cfg *config) { sb := strings.Builder{} if name != "" { sb.WriteString(name) sb.WriteString("-") } sb.WriteString(cfg.Name) sb.WriteString("-jetstream-consumer") cfg.Name = sb.String() } func (n *jetstreamInput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := pkgutils.GetConfigMaps(n.store) if err != nil { return nil, err } return formatters.MakeEventProcessors( logger, eventProcessors, ps, tcs, acts, ) } // helper functions func (n *jetstreamInput) setDefaultsFor(cfg *config) error { if cfg.Format == "" { cfg.Format = defaultFormat } if !(strings.ToLower(cfg.Format) == "event" || strings.ToLower(cfg.Format) == "proto") { return fmt.Errorf("unsupported input format") } cfg.Format = strings.ToLower(cfg.Format) if cfg.Name == "" { cfg.Name = "gnmic-jetstream-consumer" + uuid.New().String() } if cfg.DeliverPolicy == "" { cfg.DeliverPolicy = deliverPolicyAll } if cfg.SubjectFormat == "" { cfg.SubjectFormat = subjectFormat_Static } if cfg.Address == "" { cfg.Address = defaultAddress } if cfg.ConnectTimeWait <= 0 { cfg.ConnectTimeWait = natsConnectWait } if cfg.NumWorkers <= 0 { cfg.NumWorkers = defaultNumWorkers } if cfg.BufferSize <= 0 { cfg.BufferSize = defaultBufferSize } if cfg.FetchBatchSize <= 0 { cfg.FetchBatchSize = defaultFetchBatchSize } if cfg.MaxAckPending == nil || *cfg.MaxAckPending <= -2 { v := defaultMaxAckPending cfg.MaxAckPending = &v } return nil } func (n *jetstreamInput) createNATSConn(c *config) (*nats.Conn, error) { opts := []nats.Option{ nats.Name(c.Name), nats.SetCustomDialer(n), nats.ReconnectWait(c.ConnectTimeWait), nats.ReconnectBufSize(natsReconnectBufferSize), nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { n.logger.Printf("NATS error: %v", err) }), nats.DisconnectHandler(func(*nats.Conn) { n.logger.Println("Disconnected from NATS") }), nats.ClosedHandler(func(*nats.Conn) { n.logger.Println("NATS connection is closed") }), } if c.Username != "" && c.Password != "" { opts = append(opts, nats.UserInfo(c.Username, c.Password)) } if c.TLS != nil { tlsConfig, err := utils.NewTLSConfig( c.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile, "", c.TLS.SkipVerify, false) if err != nil { return nil, err } if tlsConfig != nil { opts = append(opts, nats.Secure(tlsConfig)) } } nc, err := nats.Connect(c.Address, opts...) if err != nil { return nil, err } return nc, nil } // Dial // func (n *jetstreamInput) Dial(network, address string) (net.Conn, error) { ctx, cancel := context.WithCancel(n.ctx) defer cancel() for { n.logger.Printf("attempting to connect to %s", address) if ctx.Err() != nil { return nil, ctx.Err() } cfg := n.cfg.Load() select { case <-n.ctx.Done(): return nil, n.ctx.Err() default: d := &net.Dialer{} if conn, err := d.DialContext(ctx, network, address); err == nil { n.logger.Printf("successfully connected to NATS server %s", address) return conn, nil } time.Sleep(cfg.ConnectTimeWait) } } } ================================================ FILE: pkg/inputs/jetstream_input/jetstream_input_test.go ================================================ package jetstream_input import ( "io" "log" "testing" "github.com/nats-io/nats.go/jetstream" ) func Test_setDefaults(t *testing.T) { tests := []struct { name string cfg *config wantErr bool errMsg string check func(*testing.T, *config) }{ { name: "format defaults to event", cfg: &config{ Stream: "test-stream", }, wantErr: false, check: func(t *testing.T, cfg *config) { if cfg.Format != defaultFormat { t.Errorf("setDefaults() Format = %v, want %v", cfg.Format, defaultFormat) } }, }, { name: "deliver policy defaults to all", cfg: &config{ Stream: "test-stream", }, wantErr: false, check: func(t *testing.T, cfg *config) { if cfg.DeliverPolicy != deliverPolicyAll { t.Errorf("setDefaults() DeliverPolicy = %v, want %v", cfg.DeliverPolicy, deliverPolicyAll) } }, }, { name: "subject format defaults to static", cfg: &config{ Stream: "test-stream", }, wantErr: false, check: func(t *testing.T, cfg *config) { if cfg.SubjectFormat != subjectFormat_Static { t.Errorf("setDefaults() SubjectFormat = %v, want %v", cfg.SubjectFormat, subjectFormat_Static) } }, }, { name: "address defaults to localhost:4222", cfg: &config{ Stream: "test-stream", }, wantErr: false, check: func(t *testing.T, cfg *config) { if cfg.Address != defaultAddress { t.Errorf("setDefaults() Address = %v, want %v", cfg.Address, defaultAddress) } }, }, { name: "num workers defaults to 1", cfg: &config{ Stream: "test-stream", }, wantErr: false, check: func(t *testing.T, cfg *config) { if cfg.NumWorkers != defaultNumWorkers { t.Errorf("setDefaults() NumWorkers = %v, want %v", cfg.NumWorkers, defaultNumWorkers) } }, }, { name: "buffer size defaults to 500", cfg: &config{ Stream: "test-stream", }, wantErr: false, check: func(t *testing.T, cfg *config) { if cfg.BufferSize != defaultBufferSize { t.Errorf("setDefaults() BufferSize = %v, want %v", cfg.BufferSize, defaultBufferSize) } }, }, { name: "fetch batch size defaults to 500", cfg: &config{ Stream: "test-stream", }, wantErr: false, check: func(t *testing.T, cfg *config) { if cfg.FetchBatchSize != defaultFetchBatchSize { t.Errorf("setDefaults() FetchBatchSize = %v, want %v", cfg.FetchBatchSize, defaultFetchBatchSize) } }, }, { name: "max ack pending defaults to 1000", cfg: &config{ Stream: "test-stream", }, wantErr: false, check: func(t *testing.T, cfg *config) { if cfg.MaxAckPending == nil || *cfg.MaxAckPending != defaultMaxAckPending { t.Errorf("setDefaults() MaxAckPending = %v, want %v", cfg.MaxAckPending, defaultMaxAckPending) } }, }, { name: "invalid format event", cfg: &config{ Stream: "test-stream", Format: "invalid", }, wantErr: true, errMsg: "unsupported input format", }, { name: "valid format event", cfg: &config{ Stream: "test-stream", Format: "event", }, wantErr: false, check: func(t *testing.T, cfg *config) { if cfg.Format != "event" { t.Errorf("setDefaults() Format = %v, want event", cfg.Format) } }, }, { name: "valid format proto", cfg: &config{ Stream: "test-stream", Format: "proto", }, wantErr: false, check: func(t *testing.T, cfg *config) { if cfg.Format != "proto" { t.Errorf("setDefaults() Format = %v, want proto", cfg.Format) } }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { n := &jetstreamInput{ logger: log.New(io.Discard, loggingPrefix, 0), } err := n.setDefaultsFor(tt.cfg) if tt.wantErr { if err == nil { t.Errorf("setDefaultsFor() expected error but got nil") return } if tt.errMsg != "" && err.Error() != tt.errMsg { t.Errorf("setDefaultsFor() error = %v, want error containing %v", err.Error(), tt.errMsg) } } else { if err != nil { t.Errorf("setDefaultsFor() unexpected error = %v", err) return } if tt.check != nil { tt.check(t, tt.cfg) } } }) } } func Test_toJSDeliverPolicy(t *testing.T) { tests := []struct { name string policy deliverPolicy want jetstream.DeliverPolicy }{ { name: "deliver policy all", policy: deliverPolicyAll, want: jetstream.DeliverAllPolicy, }, { name: "deliver policy last", policy: deliverPolicyLast, want: jetstream.DeliverLastPolicy, }, { name: "deliver policy new", policy: deliverPolicyNew, want: jetstream.DeliverNewPolicy, }, { name: "deliver policy last-per-subject", policy: deliverPolicyLastPerSubject, want: jetstream.DeliverLastPerSubjectPolicy, }, { name: "invalid deliver policy returns zero", policy: "invalid", want: 0, }, { name: "empty deliver policy returns zero", policy: "", want: 0, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := toJSDeliverPolicy(tt.policy) if got != tt.want { t.Errorf("toJSDeliverPolicy() = %v, want %v", got, tt.want) } }) } } // Test_workqueueDeliverPolicy documents the expected behavior for workqueue streams // When stream retention is WorkQueuePolicy: // - AckPolicy is always set to AckExplicitPolicy // - DeliverPolicy can be DeliverAllPolicy (process all queued jobs) or DeliverNewPolicy (process only new jobs) // - Other deliver policies are converted to DeliverAllPolicy for compatibility func Test_workqueueDeliverPolicy(t *testing.T) { // This is a documentation test - actual behavior is tested in integration tests // The workerStart function should: // 1. Detect stream retention policy // 2. Force AckExplicitPolicy for workqueue streams // 3. Allow DeliverAllPolicy or DeliverNewPolicy // 4. Convert other policies to DeliverAllPolicy t.Log("Workqueue streams support DeliverAllPolicy and DeliverNewPolicy") } ================================================ FILE: pkg/inputs/kafka_input/kafka_input.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package kafka_input import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "log" "slices" "strings" "sync" "sync/atomic" "time" "github.com/IBM/sarama" "github.com/google/uuid" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/inputs" "github.com/openconfig/gnmic/pkg/outputs" "github.com/openconfig/gnmic/pkg/pipeline" pkgutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" "google.golang.org/protobuf/proto" ) const ( loggingPrefix = "[kafka_input] " defaultFormat = "event" defaultTopic = "telemetry" defaultNumWorkers = 1 defaultSessionTimeout = 10 * time.Second defaultHeartbeatInterval = 3 * time.Second defaultRecoveryWaitTime = 2 * time.Second defaultAddress = "localhost:9092" defaultGroupID = "gnmic-consumers" ) var defaultVersion = sarama.V2_5_0_0 var openSquareBracket = []byte("[") var openCurlyBrace = []byte("{") func init() { inputs.Register("kafka", func() inputs.Input { return &KafkaInput{ confLock: new(sync.RWMutex), cfg: new(atomic.Pointer[config]), dynCfg: new(atomic.Pointer[dynConfig]), logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), wg: new(sync.WaitGroup), } }) } // KafkaInput // type KafkaInput struct { // ensure only one Update or UpdateProcessor operation // are performed at a time confLock *sync.RWMutex inputs.BaseInput cfg *atomic.Pointer[config] dynCfg *atomic.Pointer[dynConfig] ctx context.Context cfn context.CancelFunc logger *log.Logger wg *sync.WaitGroup outputs []outputs.Output // used when the cmd is subscribe store store.Store[any] pipeline chan *pipeline.Msg } type dynConfig struct { evps []formatters.EventProcessor outputsMap map[string]struct{} // used when the cmd is collector } // config // type config struct { Name string `mapstructure:"name,omitempty"` Address string `mapstructure:"address,omitempty"` Topics string `mapstructure:"topics,omitempty"` SASL *types.SASL `mapstructure:"sasl,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty"` GroupID string `mapstructure:"group-id,omitempty"` SessionTimeout time.Duration `mapstructure:"session-timeout,omitempty"` HeartbeatInterval time.Duration `mapstructure:"heartbeat-interval,omitempty"` RecoveryWaitTime time.Duration `mapstructure:"recovery-wait-time,omitempty"` Version string `mapstructure:"version,omitempty"` Format string `mapstructure:"format,omitempty"` Debug bool `mapstructure:"debug,omitempty"` NumWorkers int `mapstructure:"num-workers,omitempty"` Outputs []string `mapstructure:"outputs,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty"` kafkaVersion sarama.KafkaVersion } func (k *KafkaInput) Start(ctx context.Context, name string, cfg map[string]interface{}, opts ...inputs.Option) error { k.confLock.Lock() defer k.confLock.Unlock() newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } if newCfg.Name == "" { newCfg.Name = name } options := &inputs.InputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } k.store = options.Store k.pipeline = options.Pipeline k.setName(options.Name, newCfg) k.setLogger(options.Logger) outputs, outputsMap := k.getOutputs(options.Outputs, newCfg) k.outputs = outputs evps, err := k.buildEventProcessors(options.Logger, newCfg.EventProcessors) if err != nil { return err } err = k.setDefaultsFor(newCfg) if err != nil { return err } k.cfg.Store(newCfg) dc := &dynConfig{ evps: evps, outputsMap: outputsMap, } k.dynCfg.Store(dc) k.ctx = ctx // save context for worker restarts var runCtx context.Context // create a run context for the workers runCtx, k.cfn = context.WithCancel(ctx) k.logger.Printf("input starting with config: %+v", newCfg) k.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { go k.worker(runCtx, i) } return nil } func (k *KafkaInput) Validate(cfg map[string]any) error { newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } return k.setDefaultsFor(newCfg) } // Update updates the input configuration and restarts the workers if // necessary. // It works only when the command is collector (not subscribe). func (k *KafkaInput) Update(cfg map[string]any) error { k.confLock.Lock() defer k.confLock.Unlock() newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } k.setDefaultsFor(newCfg) currCfg := k.cfg.Load() restartWorkers := needsWorkerRestart(currCfg, newCfg) rebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 // build new dynamic config dc := &dynConfig{ outputsMap: make(map[string]struct{}), } for _, o := range newCfg.Outputs { dc.outputsMap[o] = struct{}{} } prevDC := k.dynCfg.Load() if rebuildProcessors { dc.evps, err = k.buildEventProcessors(k.logger, newCfg.EventProcessors) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } k.dynCfg.Store(dc) k.cfg.Store(newCfg) if restartWorkers { runCtx, cancel := context.WithCancel(k.ctx) newWG := new(sync.WaitGroup) // save old pointers oldCancel := k.cfn oldWG := k.wg // swap k.cfn = cancel k.wg = newWG k.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { go k.worker(runCtx, i) } // cancel old workers and loops if oldCancel != nil { oldCancel() } if oldWG != nil { oldWG.Wait() } } return nil } func (k *KafkaInput) UpdateProcessor(name string, pcfg map[string]any) error { k.confLock.Lock() defer k.confLock.Unlock() cfg := k.cfg.Load() dc := k.dynCfg.Load() newEvps, changed, err := inputs.UpdateProcessorInSlice( k.logger, k.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps k.dynCfg.Store(&newDC) k.logger.Printf("updated event processor %s", name) } return nil } func (k *KafkaInput) worker(ctx context.Context, idx int) { defer k.wg.Done() workerLogPrefix := fmt.Sprintf("worker-%d", idx) k.logger.Printf("%s starting", workerLogPrefix) for { select { case <-ctx.Done(): return default: } k.logger.Printf("worker %d loading config", idx) cfg := k.cfg.Load() wCfg := *cfg wCfg.Name = fmt.Sprintf("%s-%d", wCfg.Name, idx) // scoped connection, subscription and cleanup err := k.doWork(ctx, &wCfg, workerLogPrefix, idx) if err != nil { k.logger.Printf("%s Kafka client failed: %v", workerLogPrefix, err) } // backoff before retry select { case <-ctx.Done(): return case <-time.After(wCfg.RecoveryWaitTime): } } } // scoped connection, subscription and cleanup func (k *KafkaInput) doWork(ctx context.Context, wCfg *config, workerLogPrefix string, idx int) error { saramaConfig, err := k.createConfig(wCfg) if err != nil { return fmt.Errorf("create Kafka config: %w", err) } saramaConfig.ClientID = fmt.Sprintf("%s-%d", wCfg.Name, idx) consumerGrp, err := sarama.NewConsumerGroup(strings.Split(wCfg.Address, ","), wCfg.GroupID, saramaConfig) if err != nil { return fmt.Errorf("create consumer group: %w", err) } defer consumerGrp.Close() cons := &consumer{ ready: make(chan bool), msgChan: make(chan *sarama.ConsumerMessage), } stopConsume := make(chan struct{}) go func() { var err error for { select { case <-ctx.Done(): return case <-stopConsume: return default: } err = consumerGrp.Consume(ctx, strings.Split(wCfg.Topics, ","), cons) if err != nil { if wCfg.Debug { k.logger.Printf("%s failed to start consumer, topics=%q, group=%q : %v", workerLogPrefix, wCfg.Topics, wCfg.GroupID, err) } continue } cons.ready = make(chan bool) } }() // wait for the consumer to be ready select { case <-ctx.Done(): return nil case <-cons.ready: k.logger.Printf("%s kafka consumer ready", workerLogPrefix) } for { select { case <-ctx.Done(): return nil case m := <-cons.msgChan: if len(m.Value) == 0 { continue } // load current config for dynamic fields like Format cfg := k.cfg.Load() if cfg.Debug { k.logger.Printf("%s client=%s received msg, topic=%s, partition=%d, key=%q, length=%d, value=%s", workerLogPrefix, saramaConfig.ClientID, m.Topic, m.Partition, string(m.Key), len(m.Value), string(m.Value)) } dc := k.dynCfg.Load() switch cfg.Format { case "event": m.Value = bytes.TrimSpace(m.Value) evMsgs := make([]*formatters.EventMsg, 1) var err error switch { case len(m.Value) == 0: continue case m.Value[0] == openSquareBracket[0]: err = json.Unmarshal(m.Value, &evMsgs) case m.Value[0] == openCurlyBrace[0]: evMsgs[0] = &formatters.EventMsg{} err = json.Unmarshal(m.Value, evMsgs[0]) } if err != nil { if cfg.Debug { k.logger.Printf("%s failed to unmarshal event msg: %v", workerLogPrefix, err) } continue } for _, p := range dc.evps { evMsgs = p.Apply(evMsgs...) } if k.pipeline != nil { select { case <-ctx.Done(): return nil case k.pipeline <- &pipeline.Msg{ Events: evMsgs, Outputs: dc.outputsMap, }: default: k.logger.Printf("pipeline channel is full, dropping event") } } for _, o := range k.outputs { for _, ev := range evMsgs { o.WriteEvent(ctx, ev) } } case "proto": protoMsg := new(gnmi.SubscribeResponse) if err := proto.Unmarshal(m.Value, protoMsg); err != nil { if cfg.Debug { k.logger.Printf("%s failed to unmarshal proto msg: %v", workerLogPrefix, err) } continue } fmt.Printf("m.Key: %s\n", string(m.Key)) meta := k.partitionKeyToMeta(m.Key) fmt.Printf("meta: %+v\n", meta) if k.pipeline != nil { select { case <-ctx.Done(): return nil case k.pipeline <- &pipeline.Msg{ Msg: protoMsg, Meta: meta, Outputs: dc.outputsMap, }: default: k.logger.Printf("pipeline channel is full, dropping message") } } for _, o := range k.outputs { o.Write(ctx, protoMsg, meta) } } case err := <-consumerGrp.Errors(): cfg := k.cfg.Load() k.logger.Printf("%s client=%s, consumer-group=%s error: %v", workerLogPrefix, saramaConfig.ClientID, cfg.GroupID, err) select { case <-ctx.Done(): return nil case <-time.After(cfg.RecoveryWaitTime): } close(stopConsume) // restart worker in case of error go k.doWork(ctx, cfg, workerLogPrefix, idx) return nil } } } func (k *KafkaInput) Close() error { if k.cfn != nil { k.cfn() } if k.wg != nil { k.wg.Wait() } return nil } const ( partitionKeySeparator = ":::" ) func (k *KafkaInput) partitionKeyToMeta(key []byte) outputs.Meta { if len(key) == 0 { return outputs.Meta{} } parts := strings.SplitN(string(key), partitionKeySeparator, 2) if len(parts) != 2 { return outputs.Meta{} } return outputs.Meta{ "source": parts[0], "subscription-name": parts[1], } } func (k *KafkaInput) setLogger(logger *log.Logger) { if logger != nil { k.logger = log.New(logger.Writer(), loggingPrefix, logger.Flags()) sarama.Logger = k.logger } } func (k *KafkaInput) getOutputs(outs map[string]outputs.Output, cfg *config) ([]outputs.Output, map[string]struct{}) { outputs := make([]outputs.Output, 0) if len(cfg.Outputs) == 0 { for _, o := range outs { outputs = append(outputs, o) } return outputs, nil } outputsMap := make(map[string]struct{}) for _, name := range cfg.Outputs { outputsMap[name] = struct{}{} // for collector if o, ok := outs[name]; ok { // for subscribe outputs = append(outputs, o) } } return outputs, outputsMap } func (k *KafkaInput) setName(name string, cfg *config) { sb := strings.Builder{} if name != "" { sb.WriteString(name) sb.WriteString("-") } sb.WriteString(cfg.Name) sb.WriteString("-kafka-cons") cfg.Name = sb.String() } func (k *KafkaInput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := pkgutils.GetConfigMaps(k.store) if err != nil { return nil, err } return formatters.MakeEventProcessors( logger, eventProcessors, ps, tcs, acts, ) } // helper funcs func (k *KafkaInput) setDefaultsFor(cfg *config) error { var err error if cfg.Version != "" { cfg.kafkaVersion, err = sarama.ParseKafkaVersion(cfg.Version) if err != nil { return err } } else { cfg.kafkaVersion = defaultVersion } if cfg.Format == "" { cfg.Format = defaultFormat } if !(strings.ToLower(cfg.Format) == "event" || strings.ToLower(cfg.Format) == "proto") { return fmt.Errorf("unsupported input format") } cfg.Format = strings.ToLower(cfg.Format) if cfg.Topics == "" { cfg.Topics = defaultTopic } if cfg.Address == "" { cfg.Address = defaultAddress } if cfg.NumWorkers <= 0 { cfg.NumWorkers = defaultNumWorkers } if cfg.SessionTimeout <= 2*time.Millisecond { cfg.SessionTimeout = defaultSessionTimeout } if cfg.HeartbeatInterval <= 1*time.Millisecond { cfg.HeartbeatInterval = defaultHeartbeatInterval } if cfg.GroupID == "" { cfg.GroupID = defaultGroupID } if cfg.RecoveryWaitTime <= 0 { cfg.RecoveryWaitTime = defaultRecoveryWaitTime } if cfg.Name == "" { cfg.Name = "gnmic-" + uuid.New().String() } if cfg.SASL == nil { return nil } cfg.SASL.Mechanism = strings.ToUpper(cfg.SASL.Mechanism) switch cfg.SASL.Mechanism { case "": cfg.SASL.Mechanism = "PLAIN" case "OAUTHBEARER": if cfg.SASL.TokenURL == "" { return errors.New("missing token-url for kafka SASL mechanism OAUTHBEARER") } } return nil } func (k *KafkaInput) createConfig(cfg *config) (*sarama.Config, error) { saramaCfg := sarama.NewConfig() saramaCfg.Version = cfg.kafkaVersion saramaCfg.Consumer.Return.Errors = true saramaCfg.Consumer.Group.Session.Timeout = cfg.SessionTimeout saramaCfg.Consumer.Group.Heartbeat.Interval = cfg.HeartbeatInterval saramaCfg.Consumer.Group.Rebalance.Strategy = sarama.NewBalanceStrategyRange() // SASL_PLAINTEXT or SASL_SSL if cfg.SASL != nil { saramaCfg.Net.SASL.Enable = true saramaCfg.Net.SASL.User = cfg.SASL.User saramaCfg.Net.SASL.Password = cfg.SASL.Password saramaCfg.Net.SASL.Mechanism = sarama.SASLMechanism(cfg.SASL.Mechanism) switch saramaCfg.Net.SASL.Mechanism { case sarama.SASLTypeSCRAMSHA256: saramaCfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} } case sarama.SASLTypeSCRAMSHA512: saramaCfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} } case sarama.SASLTypeOAuth: saramaCfg.Net.SASL.TokenProvider = pkgutils.NewTokenProvider(saramaCfg.Net.SASL.User, saramaCfg.Net.SASL.Password, cfg.SASL.TokenURL) } } // SSL or SASL_SSL if cfg.TLS != nil { var err error saramaCfg.Net.TLS.Enable = true saramaCfg.Net.TLS.Config, err = utils.NewTLSConfig( cfg.TLS.CaFile, cfg.TLS.CertFile, cfg.TLS.KeyFile, "", cfg.TLS.SkipVerify, false) if err != nil { return nil, err } } return saramaCfg, nil } func needsWorkerRestart(old, nw *config) bool { if old == nil || nw == nil { return true } return old.NumWorkers != nw.NumWorkers || old.Address != nw.Address || old.Topics != nw.Topics || old.GroupID != nw.GroupID || old.SessionTimeout != nw.SessionTimeout || old.HeartbeatInterval != nw.HeartbeatInterval || old.RecoveryWaitTime != nw.RecoveryWaitTime || old.kafkaVersion != nw.kafkaVersion || !old.TLS.Equal(nw.TLS) || !saslEq(old.SASL, nw.SASL) } func saslEq(a, b *types.SASL) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } return a.User == b.User && a.Password == b.Password && strings.EqualFold(a.Mechanism, b.Mechanism) && a.TokenURL == b.TokenURL } // consumer // ref: https://github.com/Shopify/sarama/blob/master/examples/consumergroup/main.go // consumer represents a Sarama consumer group consumer type consumer struct { ready chan bool msgChan chan *sarama.ConsumerMessage } // Setup is run at the beginning of a new session, before ConsumeClaim func (consumer *consumer) Setup(sarama.ConsumerGroupSession) error { // Mark the consumer as ready close(consumer.ready) return nil } // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited func (consumer *consumer) Cleanup(sarama.ConsumerGroupSession) error { return nil } // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). func (consumer *consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { for message := range claim.Messages() { consumer.msgChan <- message session.MarkMessage(message, "") } return nil } ================================================ FILE: pkg/inputs/kafka_input/kafka_scram_client.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package kafka_input import ( "crypto/sha256" "crypto/sha512" "hash" "github.com/xdg/scram" ) var SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() } var SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() } type XDGSCRAMClient struct { *scram.Client *scram.ClientConversation scram.HashGeneratorFcn } func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) if err != nil { return err } x.ClientConversation = x.Client.NewConversation() return nil } func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { response, err = x.ClientConversation.Step(challenge) return } func (x *XDGSCRAMClient) Done() bool { return x.ClientConversation.Done() } ================================================ FILE: pkg/inputs/nats_input/nats_input.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package nats_input import ( "context" "encoding/json" "fmt" "io" "log" "net" "slices" "strings" "sync" "sync/atomic" "time" "google.golang.org/protobuf/proto" "github.com/google/uuid" "github.com/nats-io/nats.go" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/inputs" "github.com/openconfig/gnmic/pkg/outputs" "github.com/openconfig/gnmic/pkg/pipeline" gutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( loggingPrefix = "[nats_input] " natsReconnectBufferSize = 100 * 1024 * 1024 defaultAddress = "localhost:4222" natsConnectWait = 2 * time.Second defaultFormat = "event" defaultSubject = "telemetry" defaultNumWorkers = 1 defaultBufferSize = 100 ) func init() { inputs.Register("nats", func() inputs.Input { return &natsInput{ confLock: new(sync.RWMutex), cfg: new(atomic.Pointer[config]), dynCfg: new(atomic.Pointer[dynConfig]), logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), wg: new(sync.WaitGroup), } }) } // natsInput // type natsInput struct { // ensure only one Update or UpdateProcessor operation // are performed at a time confLock *sync.RWMutex inputs.BaseInput cfg *atomic.Pointer[config] dynCfg *atomic.Pointer[dynConfig] ctx context.Context cfn context.CancelFunc logger *log.Logger wg *sync.WaitGroup outputs []outputs.Output // used when the cmd is subscribe store store.Store[any] pipeline chan *pipeline.Msg } type dynConfig struct { evps []formatters.EventProcessor outputsMap map[string]struct{} // used when the cmd is collector } // config // type config struct { Name string `mapstructure:"name,omitempty"` Address string `mapstructure:"address,omitempty"` Subject string `mapstructure:"subject,omitempty"` Queue string `mapstructure:"queue,omitempty"` Username string `mapstructure:"username,omitempty"` Password string `mapstructure:"password,omitempty"` ConnectTimeWait time.Duration `mapstructure:"connect-time-wait,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty" json:"tls,omitempty"` Format string `mapstructure:"format,omitempty"` Debug bool `mapstructure:"debug,omitempty"` NumWorkers int `mapstructure:"num-workers,omitempty"` BufferSize int `mapstructure:"buffer-size,omitempty"` Outputs []string `mapstructure:"outputs,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty"` } // Init // func (n *natsInput) Start(ctx context.Context, name string, cfg map[string]any, opts ...inputs.Option) error { n.confLock.Lock() defer n.confLock.Unlock() newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } if newCfg.Name == "" { newCfg.Name = name } n.logger.SetPrefix(fmt.Sprintf("%s%s", loggingPrefix, newCfg.Name)) options := &inputs.InputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } n.store = options.Store n.pipeline = options.Pipeline n.setName(options.Name, newCfg) n.setLogger(options.Logger) outputs, outputsMap := n.getOutputs(options.Outputs, newCfg) n.outputs = outputs evps, err := n.buildEventProcessors(options.Logger, newCfg.EventProcessors) if err != nil { return err } err = n.setDefaultsFor(newCfg) if err != nil { return err } n.cfg.Store(newCfg) dc := &dynConfig{ evps: evps, outputsMap: outputsMap, } n.dynCfg.Store(dc) n.ctx = ctx // save context for worker restarts var runCtx context.Context // create a run context for the workers runCtx, n.cfn = context.WithCancel(ctx) n.logger.Printf("input starting with config: %+v", newCfg) n.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { go n.worker(runCtx, i) } return nil } func (n *natsInput) Validate(cfg map[string]any) error { newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } return n.setDefaultsFor(newCfg) } // Update updates the input configuration and restarts the workers if // necessary. // It works only when the command is collector (not subscribe). func (n *natsInput) Update(cfg map[string]any) error { n.confLock.Lock() defer n.confLock.Unlock() newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } n.setDefaultsFor(newCfg) currCfg := n.cfg.Load() restartWorkers := needsWorkerRestart(currCfg, newCfg) rebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 // build new dynamic config dc := &dynConfig{ outputsMap: make(map[string]struct{}), } for _, o := range newCfg.Outputs { dc.outputsMap[o] = struct{}{} } prevDC := n.dynCfg.Load() if rebuildProcessors { dc.evps, err = n.buildEventProcessors(n.logger, newCfg.EventProcessors) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } n.dynCfg.Store(dc) n.cfg.Store(newCfg) if restartWorkers { runCtx, cancel := context.WithCancel(n.ctx) newWG := new(sync.WaitGroup) // save old pointers oldCancel := n.cfn oldWG := n.wg // swap n.cfn = cancel n.wg = newWG n.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { go n.worker(runCtx, i) } // cancel old workers and loops if oldCancel != nil { oldCancel() } if oldWG != nil { oldWG.Wait() } } return nil } func (n *natsInput) UpdateProcessor(name string, pcfg map[string]any) error { n.confLock.Lock() defer n.confLock.Unlock() cfg := n.cfg.Load() dc := n.dynCfg.Load() newEvps, changed, err := inputs.UpdateProcessorInSlice( n.logger, n.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps n.dynCfg.Store(&newDC) n.logger.Printf("updated event processor %s", name) } return nil } func needsWorkerRestart(old, nw *config) bool { if old == nil || nw == nil { return true } return old.NumWorkers != nw.NumWorkers || old.BufferSize != nw.BufferSize || old.Address != nw.Address || old.Subject != nw.Subject || old.Queue != nw.Queue || old.Username != nw.Username || old.Password != nw.Password || !old.TLS.Equal(nw.TLS) || old.ConnectTimeWait != nw.ConnectTimeWait } func (n *natsInput) worker(ctx context.Context, idx int) { defer n.wg.Done() workerLogPrefix := fmt.Sprintf("worker-%d", idx) n.logger.Printf("%s starting", workerLogPrefix) for { select { case <-ctx.Done(): return default: } n.logger.Printf("worker %d loading config", idx) cfg := n.cfg.Load() wCfg := *cfg wCfg.Name = fmt.Sprintf("%s-%d", wCfg.Name, idx) fmt.Printf("worker %d starting with config: %+v", idx, wCfg) // scoped connection, subscription and cleanup err := n.doWork(ctx, &wCfg, workerLogPrefix) if err != nil { n.logger.Printf("%s NATS client failed: %v", workerLogPrefix, err) } // backoff before retry select { case <-ctx.Done(): return case <-time.After(wCfg.ConnectTimeWait): } } } // scoped connection, subscription and cleanup func (n *natsInput) doWork(ctx context.Context, wCfg *config, workerLogPrefix string) error { nc, err := n.createNATSConn(wCfg) if err != nil { return fmt.Errorf("create NATS connection: %w", err) } defer nc.Close() msgChan := make(chan *nats.Msg, wCfg.BufferSize) sub, err := nc.ChanQueueSubscribe(wCfg.Subject, wCfg.Queue, msgChan) if err != nil { return fmt.Errorf("create subscription: %w", err) } defer sub.Unsubscribe() for { select { case <-ctx.Done(): return nil case m, ok := <-msgChan: if !ok { return fmt.Errorf("msg channel closed") } if len(m.Data) == 0 { continue } // load current config for dynamic fields like Format cfg := n.cfg.Load() if cfg.Debug { n.logger.Printf("received msg, subject=%s, queue=%s, len=%d, data=%s", m.Subject, m.Sub.Queue, len(m.Data), string(m.Data)) } dc := n.dynCfg.Load() switch cfg.Format { case "event": var evMsgs []*formatters.EventMsg if err := json.Unmarshal(m.Data, &evMsgs); err != nil { if cfg.Debug { n.logger.Printf("%s failed to unmarshal event msg: %v", workerLogPrefix, err) } continue } for _, p := range dc.evps { evMsgs = p.Apply(evMsgs...) } if n.pipeline != nil { select { case <-ctx.Done(): return nil case n.pipeline <- &pipeline.Msg{ Events: evMsgs, Outputs: dc.outputsMap, }: default: n.logger.Printf("pipeline channel is full, dropping event") } } for _, o := range n.outputs { for _, ev := range evMsgs { o.WriteEvent(ctx, ev) } } case "proto": protoMsg := new(gnmi.SubscribeResponse) if err := proto.Unmarshal(m.Data, protoMsg); err != nil { n.logger.Printf("failed to unmarshal proto msg: %v", err) continue } meta := outputs.Meta{} parts := strings.SplitN(m.Subject, ".", 3) if len(parts) == 3 { meta["source"] = strings.ReplaceAll(parts[1], "-", ".") meta["subscription-name"] = parts[2] } if n.pipeline != nil { select { case <-ctx.Done(): return nil case n.pipeline <- &pipeline.Msg{ Msg: protoMsg, Meta: meta, Outputs: dc.outputsMap, }: default: n.logger.Printf("pipeline channel is full, dropping message") } } for _, o := range n.outputs { o.Write(ctx, protoMsg, meta) } } } } } // Close // func (n *natsInput) Close() error { if n.cfn != nil { n.cfn() } if n.wg != nil { n.wg.Wait() } return nil } // SetLogger // func (n *natsInput) setLogger(logger *log.Logger) { if logger != nil && n.logger != nil { n.logger.SetOutput(logger.Writer()) n.logger.SetFlags(logger.Flags()) } } // SetOutputs // func (n *natsInput) getOutputs(outs map[string]outputs.Output, cfg *config) ([]outputs.Output, map[string]struct{}) { outputs := make([]outputs.Output, 0) if len(cfg.Outputs) == 0 { for _, o := range outs { outputs = append(outputs, o) } return outputs, nil } outputsMap := make(map[string]struct{}) for _, name := range cfg.Outputs { outputsMap[name] = struct{}{} // for collector if o, ok := outs[name]; ok { // for subscribe outputs = append(outputs, o) } } return outputs, outputsMap } func (n *natsInput) setName(name string, cfg *config) { sb := strings.Builder{} if name != "" { sb.WriteString(name) sb.WriteString("-") } sb.WriteString(cfg.Name) sb.WriteString("-nats-sub") cfg.Name = sb.String() } func (n *natsInput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := gutils.GetConfigMaps(n.store) if err != nil { return nil, err } return formatters.MakeEventProcessors( logger, eventProcessors, ps, tcs, acts, ) } // helper functions func (n *natsInput) setDefaultsFor(cfg *config) error { if cfg.Format == "" { cfg.Format = defaultFormat } if !(strings.ToLower(cfg.Format) == "event" || strings.ToLower(cfg.Format) == "proto") { return fmt.Errorf("unsupported input format") } cfg.Format = strings.ToLower(cfg.Format) if cfg.Name == "" { cfg.Name = "gnmic-" + uuid.New().String() } if cfg.Subject == "" { cfg.Subject = defaultSubject } if cfg.Address == "" { cfg.Address = defaultAddress } if cfg.ConnectTimeWait <= 0 { cfg.ConnectTimeWait = natsConnectWait } if cfg.Queue == "" { cfg.Queue = cfg.Name } if cfg.NumWorkers <= 0 { cfg.NumWorkers = defaultNumWorkers } if cfg.BufferSize <= 0 { cfg.BufferSize = defaultBufferSize } return nil } func (n *natsInput) createNATSConn(c *config) (*nats.Conn, error) { opts := []nats.Option{ nats.Name(c.Name), nats.SetCustomDialer(n), nats.ReconnectWait(c.ConnectTimeWait), nats.ReconnectBufSize(natsReconnectBufferSize), nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { n.logger.Printf("NATS error: %v", err) }), nats.DisconnectHandler(func(*nats.Conn) { n.logger.Println("Disconnected from NATS") }), nats.ClosedHandler(func(*nats.Conn) { n.logger.Println("NATS connection is closed") }), } if c.Username != "" && c.Password != "" { opts = append(opts, nats.UserInfo(c.Username, c.Password)) } if c.TLS != nil { tlsConfig, err := utils.NewTLSConfig( c.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile, "", c.TLS.SkipVerify, false) if err != nil { return nil, err } if tlsConfig != nil { opts = append(opts, nats.Secure(tlsConfig)) } } nc, err := nats.Connect(c.Address, opts...) if err != nil { return nil, err } return nc, nil } // Dial // func (n *natsInput) Dial(network, address string) (net.Conn, error) { ctx, cancel := context.WithCancel(n.ctx) defer cancel() for { n.logger.Printf("attempting to connect to %s", address) if ctx.Err() != nil { return nil, ctx.Err() } cfg := n.cfg.Load() select { case <-n.ctx.Done(): return nil, n.ctx.Err() default: d := &net.Dialer{} if conn, err := d.DialContext(ctx, network, address); err == nil { n.logger.Printf("successfully connected to NATS server %s", address) return conn, nil } time.Sleep(cfg.ConnectTimeWait) } } } ================================================ FILE: pkg/loaders/all/all.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package all import ( _ "github.com/openconfig/gnmic/pkg/loaders/consul_loader" _ "github.com/openconfig/gnmic/pkg/loaders/docker_loader" _ "github.com/openconfig/gnmic/pkg/loaders/file_loader" _ "github.com/openconfig/gnmic/pkg/loaders/http_loader" ) ================================================ FILE: pkg/loaders/consul_loader/consul_loader.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package consul_loader import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "log" "net" "slices" "strconv" "strings" "sync" "text/template" "time" "gopkg.in/yaml.v2" "github.com/hashicorp/consul/api" "github.com/mitchellh/mapstructure" "github.com/openconfig/gnmic/pkg/actions" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" gfile "github.com/openconfig/gnmic/pkg/file" "github.com/openconfig/gnmic/pkg/loaders" ) const ( loggingPrefix = "[consul_loader] " loaderType = "consul" defaultAddress = "localhost:8500" defaultPrefix = "gnmic/config/targets" // defaultWatchTimeout = 1 * time.Minute defaultActionTimeout = 30 * time.Second ) var templateFunctions = template.FuncMap{"join": strings.Join} func init() { loaders.Register(loaderType, func() loaders.TargetLoader { return &consulLoader{ cfg: &cfg{}, m: new(sync.Mutex), lastTargets: make(map[string]map[string]*types.TargetConfig), logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), } }) } type consulLoader struct { cfg *cfg // decoder *consulstructure.Decoder client *api.Client m *sync.Mutex // map of targets per service lastTargets map[string]map[string]*types.TargetConfig targetConfigFn func(*types.TargetConfig) error logger *log.Logger // vars map[string]interface{} actionsConfig map[string]map[string]interface{} addActions []actions.Action delActions []actions.Action numActions int } type cfg struct { // Consul server address Address string `mapstructure:"address,omitempty" json:"address,omitempty"` // Consul datacenter name, defaults to dc1 Datacenter string `mapstructure:"datacenter,omitempty" json:"datacenter,omitempty"` // Consul username Username string `mapstructure:"username,omitempty" json:"username,omitempty"` // Consul Password Password string `mapstructure:"password,omitempty" json:"password,omitempty"` // Consul token Token string `mapstructure:"token,omitempty" json:"token,omitempty"` // enable debug Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` // KV based target config loading KeyPrefix string `mapstructure:"key-prefix,omitempty" json:"key-prefix,omitempty"` // Service based target config loading Services []*serviceDef `mapstructure:"services,omitempty" json:"services,omitempty"` // if true, registers consulLoader prometheus metrics with the provided // prometheus registry EnableMetrics bool `mapstructure:"enable-metrics,omitempty" json:"enable-metrics,omitempty"` // variables definitions to be passed to the actions Vars map[string]interface{} // variable file, values in this file will be overwritten by // the ones defined in Vars VarsFile string `mapstructure:"vars-file,omitempty" json:"vars-file,omitempty"` // list of Actions to run on new target discovery OnAdd []string `mapstructure:"on-add,omitempty" json:"on-add,omitempty"` // list of Actions to run on target removal OnDelete []string `mapstructure:"on-delete,omitempty" json:"on-delete,omitempty"` // timeout for the actions, this applies for all actions as a whole (on-add + on-delete), // not to each action individually. ActionsTimeout time.Duration `mapstructure:"actions-timeout,omitempty" json:"actions-timeout,omitempty"` } type serviceDef struct { Name string `mapstructure:"name,omitempty" json:"name,omitempty"` Tags []string `mapstructure:"tags,omitempty" json:"tags,omitempty"` Config map[string]interface{} `mapstructure:"config,omitempty" json:"config,omitempty"` tags map[string]struct{} targetNameTemplate *template.Template targetTagsTemplate map[string]*template.Template } func (c *consulLoader) Init(ctx context.Context, cfg map[string]interface{}, logger *log.Logger, opts ...loaders.Option) error { err := loaders.DecodeConfig(cfg, c.cfg) if err != nil { return err } err = c.setDefaults() if err != nil { return err } for _, opt := range opts { opt(c) } if logger != nil { c.logger.SetOutput(logger.Writer()) c.logger.SetFlags(logger.Flags()) } for _, se := range c.cfg.Services { se.tags = make(map[string]struct{}) for _, t := range se.Tags { se.tags[t] = struct{}{} } } // parse tempaltes if present for i, se := range c.cfg.Services { if se.Config == nil { continue } if name, ok := se.Config["name"].(string); ok { nameTemplate, err := template.New(fmt.Sprintf("targetName-%d", i)).Funcs(templateFunctions).Option("missingkey=zero").Parse(name) if err != nil { return err } se.targetNameTemplate = nameTemplate } if eventTags, ok := se.Config["event-tags"].(map[string]any); ok { se.targetTagsTemplate = make(map[string]*template.Template) for tagName, tagTemplateString := range eventTags { tagTemplate, err := template.New(fmt.Sprintf("tagTemplate-%s-%d", tagName, i)).Funcs(templateFunctions).Option("missingkey=zero").Parse(fmt.Sprintf("%v", tagTemplateString)) if err != nil { return err } se.targetTagsTemplate[tagName] = tagTemplate } } } err = c.readVars(ctx) if err != nil { return err } for _, actName := range c.cfg.OnAdd { if cfg, ok := c.actionsConfig[actName]; ok { a, err := c.initializeAction(cfg) if err != nil { return err } c.addActions = append(c.addActions, a) continue } return fmt.Errorf("unknown action name %q", actName) } for _, actName := range c.cfg.OnDelete { if cfg, ok := c.actionsConfig[actName]; ok { a, err := c.initializeAction(cfg) if err != nil { return err } c.delActions = append(c.delActions, a) continue } return fmt.Errorf("unknown action name %q", actName) } c.numActions = len(c.addActions) + len(c.delActions) c.logger.Printf("initialized consul loader: %+v", c.cfg) return nil } func (c *consulLoader) Start(ctx context.Context) chan *loaders.TargetOperation { opChan := make(chan *loaders.TargetOperation) var err error CLIENT: err = c.initClient() if err != nil { c.logger.Printf("Failed to create a Consul client:%v", err) consulLoaderWatchError.WithLabelValues(loaderType, fmt.Sprintf("%v", err)).Add(1) time.Sleep(2 * time.Second) goto CLIENT } sChan := make(chan []*api.ServiceEntry) go func() { for { select { case <-ctx.Done(): return case ses, ok := <-sChan: if !ok { return } tcs := make(map[string]*types.TargetConfig) srvName := "" for _, se := range ses { srvName = se.Service.Service tc, err := c.serviceEntryToTargetConfig(se) if err != nil { c.logger.Printf("Failed to convert service entry %+v to a target config: %v", se, err) continue } tcs[tc.Name] = tc } c.updateTargets(ctx, srvName, tcs, opChan) } } }() for _, s := range c.cfg.Services { go func(s *serviceDef) { err := c.startServicesWatch(ctx, s.Name, s.Tags, sChan, time.Minute) if err != nil { c.logger.Printf("service %q watch stopped: %v", s.Name, err) } }(s) } return opChan } func (c *consulLoader) RunOnce(ctx context.Context) (map[string]*types.TargetConfig, error) { if err := c.initClient(); err != nil { return nil, err } result := make(map[string]*types.TargetConfig) rsChan := make(chan *api.ServiceEntry) wg := new(sync.WaitGroup) // fan-out queries for _, s := range c.cfg.Services { wg.Add(1) go func(s *serviceDef) { defer wg.Done() ses, _, err := c.client.Health().ServiceMultipleTags(s.Name, s.Tags, true, &api.QueryOptions{}) if err != nil { c.logger.Printf("failed to get service %q instances: %v", s.Name, err) return } for _, se := range ses { select { case rsChan <- se: case <-ctx.Done(): return } } }(s) } // closer go func() { wg.Wait() close(rsChan) }() for { select { case se, ok := <-rsChan: if !ok { return result, nil } tc, err := c.serviceEntryToTargetConfig(se) if err != nil { c.logger.Printf("failed to convert service %+v to target config: %v", se, err) continue } if tc != nil { result[tc.Name] = tc } case <-ctx.Done(): return result, ctx.Err() } } } // func (c *consulLoader) initClient() error { var err error if c.client != nil { _, err = c.client.Agent().Self() if err == nil { return nil } } // create a new client clientConfig := &api.Config{ Address: c.cfg.Address, Scheme: "http", Datacenter: c.cfg.Datacenter, Token: c.cfg.Token, } if c.cfg.Username != "" && c.cfg.Password != "" { clientConfig.HttpAuth = &api.HttpBasicAuth{ Username: c.cfg.Username, Password: c.cfg.Password, } } c.client, err = api.NewClient(clientConfig) return err } func (c *consulLoader) setDefaults() error { if c.cfg.Address == "" { c.cfg.Address = defaultAddress } if c.cfg.Datacenter == "" { c.cfg.Datacenter = "dc1" } if c.cfg.KeyPrefix == "" && len(c.cfg.Services) == 0 { c.cfg.KeyPrefix = defaultPrefix } if c.cfg.ActionsTimeout <= 0 { c.cfg.ActionsTimeout = defaultActionTimeout } return nil } func (c *consulLoader) startServicesWatch(ctx context.Context, serviceName string, tags []string, sChan chan<- []*api.ServiceEntry, watchTimeout time.Duration) error { if watchTimeout <= 0 { watchTimeout = defaultWatchTimeout } var index uint64 qOpts := &api.QueryOptions{ WaitIndex: index, WaitTime: watchTimeout, } var err error // long blocking watch for { select { case <-ctx.Done(): return ctx.Err() default: if c.cfg.Debug { c.logger.Printf("(re)starting watch service=%q, index=%d", serviceName, qOpts.WaitIndex) } index, err = c.watch(qOpts.WithContext(ctx), serviceName, tags, sChan) if err != nil { c.logger.Printf("service %q watch failed: %v", serviceName, err) } if index == 1 { qOpts.WaitIndex = index time.Sleep(2 * time.Second) continue } if index > qOpts.WaitIndex { qOpts.WaitIndex = index } // reset WaitIndex if the returned index decreases // https://www.consul.io/api-docs/features/blocking#implementation-details if index < qOpts.WaitIndex { qOpts.WaitIndex = 0 } } } } func (c *consulLoader) watch(qOpts *api.QueryOptions, serviceName string, tags []string, sChan chan<- []*api.ServiceEntry) (uint64, error) { se, meta, err := c.client.Health().ServiceMultipleTags(serviceName, tags, true, qOpts) if err != nil { return 0, err } if meta.LastIndex == qOpts.WaitIndex { c.logger.Printf("service=%q did not change", serviceName) return meta.LastIndex, nil } if len(se) == 0 { return 1, nil } sChan <- se return meta.LastIndex, nil } func (c *consulLoader) serviceEntryToTargetConfig(se *api.ServiceEntry) (*types.TargetConfig, error) { tc := new(types.TargetConfig) if se.Service == nil { return tc, nil } SRV: for _, sd := range c.cfg.Services { // match service name if se.Service.Service != sd.Name { continue } // match service tags if len(sd.tags) > 0 { for requiredTag := range sd.tags { if !slices.Contains(se.Service.Tags, requiredTag) { goto SRV } } } // decode config if present if sd.Config != nil { err := mapstructure.Decode(sd.Config, tc) if err != nil { return nil, err } } tc.Address = se.Service.Address if tc.Address == "" { tc.Address = se.Node.Address } tc.Address = net.JoinHostPort(tc.Address, strconv.Itoa(se.Service.Port)) var buffer bytes.Buffer tc.Name = se.Service.ID if sd.targetNameTemplate != nil { buffer.Reset() err := sd.targetNameTemplate.Execute(&buffer, se.Service) if err != nil { c.logger.Println("Could not execute nameTemplate") continue } tc.Name = buffer.String() } // Create Event tags from Consul via templates if len(sd.targetTagsTemplate) > 0 { eventTags := make(map[string]string) for tagName, tagTemplate := range sd.targetTagsTemplate { buffer.Reset() err := tagTemplate.Execute(&buffer, se.Service) if err != nil { c.logger.Println("Could not execute tagTemplate:", tagName) return nil, err } eventTags[tagName] = buffer.String() } tc.EventTags = eventTags } return tc, nil } return nil, errors.New("unable to find a match in Consul service(s)") } func (c *consulLoader) updateTargets(ctx context.Context, srvName string, tcs map[string]*types.TargetConfig, opChan chan *loaders.TargetOperation) { targetOp, err := c.runActions(ctx, tcs, loaders.Diff(c.lastTargets[srvName], tcs)) if err != nil { c.logger.Printf("failed to run actions: %v", err) return } numAdds := len(targetOp.Add) numDels := len(targetOp.Del) if c.cfg.Debug { c.logger.Printf("updating service %s with targets=%v", srvName, tcs) c.logger.Printf("updating service %s with op=%v", srvName, targetOp) } defer func() { consulLoaderLoadedTargets.WithLabelValues(loaderType).Set(float64(numAdds)) consulLoaderDeletedTargets.WithLabelValues(loaderType).Set(float64(numDels)) }() if numAdds+numDels == 0 { return } c.m.Lock() if _, ok := c.lastTargets[srvName]; !ok { c.lastTargets[srvName] = make(map[string]*types.TargetConfig) } // do delete first since change is delete+add for _, del := range targetOp.Del { delete(c.lastTargets[srvName], del) } for _, add := range targetOp.Add { c.lastTargets[srvName][add.Name] = add } c.m.Unlock() opChan <- targetOp } // func (c *consulLoader) readVars(ctx context.Context) error { if c.cfg.VarsFile == "" { c.vars = c.cfg.Vars return nil } b, err := gfile.ReadFile(ctx, c.cfg.VarsFile) if err != nil { return err } v := make(map[string]interface{}) err = yaml.Unmarshal(b, &v) if err != nil { return err } c.vars = utils.MergeMaps(v, c.cfg.Vars) return nil } func (c *consulLoader) initializeAction(cfg map[string]interface{}) (actions.Action, error) { if len(cfg) == 0 { return nil, errors.New("missing action definition") } if actType, ok := cfg["type"]; ok { switch actType := actType.(type) { case string: if in, ok := actions.Actions[actType]; ok { act := in() err := act.Init(cfg, actions.WithLogger(c.logger), actions.WithTargets(nil)) if err != nil { return nil, err } return act, nil } return nil, fmt.Errorf("unknown action type %q", actType) default: return nil, fmt.Errorf("unexpected action field type %T", actType) } } return nil, errors.New("missing type field under action") } func (c *consulLoader) runActions(ctx context.Context, tcs map[string]*types.TargetConfig, targetOp *loaders.TargetOperation) (*loaders.TargetOperation, error) { if c.numActions == 0 { return targetOp, nil } var err error // some actions are defined for _, tc := range tcs { err = c.targetConfigFn(tc) if err != nil { c.logger.Printf("failed running target config fn on target %q", tc.Name) } } // run target config func and build map of targets configs for i, tAdd := range targetOp.Add { err = c.targetConfigFn(tAdd) if err != nil { return nil, err } targetOp.Add[i] = tAdd } opChan := make(chan *loaders.TargetOperation) doneCh := make(chan struct{}) result := &loaders.TargetOperation{ Add: make(map[string]*types.TargetConfig, len(targetOp.Add)), Del: make([]string, 0, len(targetOp.Del)), } ctx, cancel := context.WithTimeout(ctx, c.cfg.ActionsTimeout) defer cancel() // start operation gathering goroutine go func() { for { select { case <-ctx.Done(): return case op, ok := <-opChan: if !ok { close(doneCh) return } for n, t := range op.Add { result.Add[n] = t } result.Del = append(result.Del, op.Del...) } } }() // create waitGroup and add the number of target operations to it wg := new(sync.WaitGroup) wg.Add(len(targetOp.Add) + len(targetOp.Del)) // run OnAdd actions for n, tAdd := range targetOp.Add { go func(n string, tc *types.TargetConfig) { defer wg.Done() err := c.runOnAddActions(ctx, tc.Name, tcs) if err != nil { c.logger.Printf("failed running OnAdd actions: %v", err) return } opChan <- &loaders.TargetOperation{Add: map[string]*types.TargetConfig{n: tc}} }(n, tAdd) } // run OnDelete actions for _, tDel := range targetOp.Del { go func(name string) { defer wg.Done() err := c.runOnDeleteActions(ctx, name, tcs) if err != nil { c.logger.Printf("failed running OnDelete actions: %v", err) return } opChan <- &loaders.TargetOperation{Del: []string{name}} }(tDel) } wg.Wait() close(opChan) <-doneCh //wait for gathering goroutine to finish return result, nil } func (c *consulLoader) runOnAddActions(ctx context.Context, tName string, tcs map[string]*types.TargetConfig) error { aCtx := &actions.Context{ Input: tName, Env: make(map[string]any), Vars: c.vars, Targets: tcs, } for _, act := range c.addActions { c.logger.Printf("running action %q for target %q", act.NName(), tName) res, err := act.Run(ctx, aCtx) if err != nil { return fmt.Errorf("action %q for target %q failed: %v", act.NName(), tName, err) } aCtx.Env[act.NName()] = utils.Convert(res) if c.cfg.Debug { c.logger.Printf("action %q, target %q result: %+v", act.NName(), tName, res) b, _ := json.MarshalIndent(aCtx, "", " ") c.logger.Printf("action %q context:\n%s", act.NName(), string(b)) } } return nil } func (c *consulLoader) runOnDeleteActions(ctx context.Context, tName string, _ map[string]*types.TargetConfig) error { env := make(map[string]interface{}) for _, act := range c.delActions { res, err := act.Run(ctx, &actions.Context{Input: tName, Env: env, Vars: c.vars}) if err != nil { return fmt.Errorf("action %q for target %q failed: %v", act.NName(), tName, err) } env[act.NName()] = res } return nil } ================================================ FILE: pkg/loaders/consul_loader/consul_loader_metrics.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package consul_loader import "github.com/prometheus/client_golang/prometheus" var consulLoaderLoadedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "consul_loader", Name: "number_of_loaded_targets", Help: "Number of new targets successfully loaded", }, []string{"loader_type"}) var consulLoaderDeletedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "consul_loader", Name: "number_of_deleted_targets", Help: "Number of targets successfully deleted", }, []string{"loader_type"}) var consulLoaderWatchError = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "consul_loader", Name: "number_of_watch_errors", Help: "Number of watch errors", }, []string{"loader_type", "error"}) func initMetrics() { consulLoaderLoadedTargets.WithLabelValues(loaderType).Set(0) consulLoaderDeletedTargets.WithLabelValues(loaderType).Set(0) consulLoaderWatchError.WithLabelValues(loaderType, "").Add(0) } func registerMetrics(reg *prometheus.Registry) error { initMetrics() var err error if err = reg.Register(consulLoaderLoadedTargets); err != nil { return err } if err = reg.Register(consulLoaderDeletedTargets); err != nil { return err } return reg.Register(consulLoaderWatchError) } ================================================ FILE: pkg/loaders/consul_loader/consul_loader_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package consul_loader import ( "context" "io" "log" "testing" "github.com/hashicorp/consul/api" "github.com/openconfig/gnmic/pkg/api/utils" ) // Test the specific bug scenario described in issue #706 // This test reproduces the exact problem: services with extra metadata tags // were being silently filtered out by the old logic func TestIssue706_ServicesWithExtraTagsFiltered(t *testing.T) { cl := &consulLoader{ logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), cfg: &cfg{ Services: []*serviceDef{ { Name: "test-service", Tags: []string{"gnmic", "network-device"}, tags: map[string]struct{}{ "gnmic": {}, "network-device": {}, }, Config: map[string]interface{}{ "name": "test-target", }, }, }, }, } err := cl.Init(context.Background(), nil, nil) if err != nil { t.Fatalf("Expected Init to succeed, but got error: %v", err) } // Service with extra metadata tags - this should NOT be filtered out serviceEntry := &api.ServiceEntry{ Service: &api.AgentService{ ID: "test-service-1", Service: "test-service", Tags: []string{"gnmic", "network-device", "vendor:arista", "environment:production"}, Address: "192.168.1.100", Port: 57400, }, Node: &api.Node{ Address: "192.168.1.100", }, } result, err := cl.serviceEntryToTargetConfig(serviceEntry) if err != nil { t.Fatalf("Expected service with extra tags to be accepted, but got error: %v", err) } if result == nil { t.Fatal("Expected service with extra tags to be accepted, but got nil result") } if result.Name != "test-target" { t.Errorf("Expected target name 'test-target', got: %s", result.Name) } if result.Address != "192.168.1.100:57400" { t.Errorf("Expected address '192.168.1.100:57400', got: %s", result.Address) } } // Test case that would demonstrate the old buggy behavior // This test explicitly documents what the old code was doing wrong func TestOldBuggyLogicWouldReject(t *testing.T) { // Simulate what the OLD buggy logic was doing: // for _, t := range se.Service.Tags { // if _, ok := sd.tags[t]; !ok { // goto SRV // Reject service because of extra tag // } // } requiredTags := map[string]struct{}{ "gnmic": {}, "network-device": {}, } serviceTags := []string{"gnmic", "network-device", "vendor:arista", "environment:production"} // This is what the OLD code was doing (buggy logic) oldLogicWouldReject := false for _, serviceTag := range serviceTags { if _, ok := requiredTags[serviceTag]; !ok { oldLogicWouldReject = true break } } // The old logic would incorrectly reject this service if !oldLogicWouldReject { t.Error("This test is invalid - the old buggy logic should have rejected this service") } // But the NEW logic should accept it (all required tags are present) newLogicShouldAccept := true for requiredTag := range requiredTags { found := false for _, serviceTag := range serviceTags { if serviceTag == requiredTag { found = true break } } if !found { newLogicShouldAccept = false break } } if !newLogicShouldAccept { t.Error("The new logic should accept this service since all required tags are present") } t.Logf("✓ Old logic would incorrectly reject: %v", oldLogicWouldReject) t.Logf("✓ New logic correctly accepts: %v", newLogicShouldAccept) } ================================================ FILE: pkg/loaders/consul_loader/options.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package consul_loader import ( "github.com/prometheus/client_golang/prometheus" "github.com/openconfig/gnmic/pkg/api/types" ) func (c *consulLoader) RegisterMetrics(reg *prometheus.Registry) { if !c.cfg.EnableMetrics { return } if reg == nil { c.logger.Printf("ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`") return } if err := registerMetrics(reg); err != nil { c.logger.Printf("failed to register metrics: %v", err) } } func (c *consulLoader) WithActions(acts map[string]map[string]interface{}) { c.actionsConfig = acts } func (c *consulLoader) WithTargetsDefaults(fn func(tc *types.TargetConfig) error) { c.targetConfigFn = fn } ================================================ FILE: pkg/loaders/docker_loader/docker_loader.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package docker_loader import ( "context" "encoding/json" "errors" "fmt" "io" "log" "net" "net/url" "strconv" "strings" "sync" "time" "gopkg.in/yaml.v2" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/network" dClient "github.com/docker/docker/client" "github.com/mitchellh/mapstructure" "github.com/openconfig/gnmic/pkg/actions" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" gfile "github.com/openconfig/gnmic/pkg/file" "github.com/openconfig/gnmic/pkg/loaders" ) const ( loggingPrefix = "[docker_loader] " watchInterval = 30 * time.Second loaderType = "docker" ) func init() { loaders.Register(loaderType, func() loaders.TargetLoader { return &dockerLoader{ cfg: new(cfg), wg: new(sync.WaitGroup), m: new(sync.Mutex), lastTargets: make(map[string]*types.TargetConfig), logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), } }) } type dockerLoader struct { cfg *cfg client *dClient.Client wg *sync.WaitGroup m *sync.Mutex lastTargets map[string]*types.TargetConfig targetConfigFn func(*types.TargetConfig) error logger *log.Logger fl []*targetFilterComp // vars map[string]interface{} actionsConfig map[string]map[string]interface{} addActions []actions.Action delActions []actions.Action numActions int } type targetFilterComp struct { fl []filters.Args nt filters.Args port string cfg map[string]interface{} } type cfg struct { // address of docker daemon API Address string `json:"address,omitempty" mapstructure:"address,omitempty"` // interval between docker daemon queries Interval time.Duration `json:"interval,omitempty" mapstructure:"interval,omitempty"` // timeout of docker daemon queries Timeout time.Duration `json:"timeout,omitempty" mapstructure:"timeout,omitempty"` // docker filter to apply on queried docker containers Filters []*targetFilter `json:"filters,omitempty" mapstructure:"filters,omitempty"` // time to wait before the first docker filter query StartDelay time.Duration `json:"start-delay,omitempty" mapstructure:"start-delay,omitempty"` // enable debug mode for more logging messages Debug bool `json:"debug,omitempty" mapstructure:"debug,omitempty"` // if true, registers dockerLoader prometheus metrics with the provided // prometheus registry EnableMetrics bool `json:"enable-metrics,omitempty" mapstructure:"enable-metrics,omitempty"` // variables definitions to be passed to the actions Vars map[string]interface{} // variable file, values in this file will be overwritten by // the ones defined in Vars VarsFile string `mapstructure:"vars-file,omitempty"` // list of Actions to run on new target discovery OnAdd []string `json:"on-add,omitempty" mapstructure:"on-add,omitempty"` // list of Actions to run on target removal OnDelete []string `json:"on-delete,omitempty" mapstructure:"on-delete,omitempty"` } type targetFilter struct { Containers []map[string]string `json:"containers,omitempty" mapstructure:"containers,omitempty"` Network map[string]string `json:"network,omitempty" mapstructure:"network,omitempty"` Port string `json:"port,omitempty" mapstructure:"port,omitempty"` Config map[string]interface{} `json:"config,omitempty" mapstructure:"config,omitempty"` } func (d *dockerLoader) Init(ctx context.Context, cfg map[string]interface{}, logger *log.Logger, opts ...loaders.Option) error { err := loaders.DecodeConfig(cfg, d.cfg) if err != nil { return err } d.setDefaults() for _, opt := range opts { opt(d) } d.fl = make([]*targetFilterComp, 0, len(d.cfg.Filters)) for _, fm := range d.cfg.Filters { // network filter nflt := filters.NewArgs() for k, v := range fm.Network { nflt.Add(k, v) } // container filters cflt := make([]filters.Args, 0, len(fm.Containers)) for _, sfm := range fm.Containers { flt := filters.NewArgs(filters.KeyValuePair{ Key: "status", Value: "running", }) for k, v := range sfm { if strings.Contains(k, "=") { ks := strings.SplitN(k, "=", 2) flt.Add(ks[0], strings.Join(append(ks[1:], v), "=")) continue } flt.Add(k, v) } cflt = append(cflt, flt) } // target filters d.fl = append(d.fl, &targetFilterComp{ fl: cflt, nt: nflt, port: fm.Port, cfg: fm.Config, }) } if logger != nil { d.logger.SetOutput(logger.Writer()) d.logger.SetFlags(logger.Flags()) } d.client, err = d.createDockerClient() if err != nil { return err } ping, err := d.client.Ping(ctx) if err != nil { return err } err = d.readVars(ctx) if err != nil { return err } for _, actName := range d.cfg.OnAdd { if cfg, ok := d.actionsConfig[actName]; ok { fmt.Println(cfg) a, err := d.initializeAction(cfg) if err != nil { return err } d.addActions = append(d.addActions, a) continue } return fmt.Errorf("unknown action name %q", actName) } for _, actName := range d.cfg.OnDelete { if cfg, ok := d.actionsConfig[actName]; ok { a, err := d.initializeAction(cfg) if err != nil { return err } d.delActions = append(d.delActions, a) continue } return fmt.Errorf("unknown action name %q", actName) } d.numActions = len(d.addActions) + len(d.delActions) d.logger.Printf("connected to docker daemon: %+v", ping) d.logger.Printf("initialized loader type %q: %s", loaderType, d) return nil } func (d *dockerLoader) setDefaults() { if d.cfg.Interval <= 0 { d.cfg.Interval = watchInterval } if d.cfg.Timeout <= 0 || d.cfg.Timeout >= d.cfg.Interval { d.cfg.Timeout = d.cfg.Interval / 2 } if len(d.cfg.Filters) == 0 { d.cfg.Filters = []*targetFilter{ { Containers: []map[string]string{ {"status": "running"}, }, }, } } } func (d *dockerLoader) createDockerClient() (*dClient.Client, error) { var opts []dClient.Opt if d.cfg.Address == "" { opts = []dClient.Opt{ dClient.FromEnv, dClient.WithTimeout(d.cfg.Timeout), } } else { opts = []dClient.Opt{ dClient.WithAPIVersionNegotiation(), dClient.WithHost(d.cfg.Address), dClient.WithTimeout(d.cfg.Timeout), } } return dClient.NewClientWithOpts(opts...) } func (d *dockerLoader) Start(ctx context.Context) chan *loaders.TargetOperation { opChan := make(chan *loaders.TargetOperation) ticker := time.NewTicker(d.cfg.Interval) go func() { defer close(opChan) defer ticker.Stop() time.Sleep(d.cfg.StartDelay) // first run d.update(ctx, opChan) // periodic runs for { select { case <-ctx.Done(): d.logger.Printf("%q context done: %v", loaderType, ctx.Err()) return case <-ticker.C: d.update(ctx, opChan) } } }() return opChan } func (d *dockerLoader) RunOnce(ctx context.Context) (map[string]*types.TargetConfig, error) { d.logger.Printf("querying %q targets", loaderType) readTargets, err := d.getTargets(ctx) if err != nil { return nil, err } if d.cfg.Debug { d.logger.Printf("docker loader discovered %d target(s)", len(readTargets)) } return readTargets, nil } // update runs the docker loader once and updates the added/remove target to the opChan func (d *dockerLoader) update(ctx context.Context, opChan chan *loaders.TargetOperation) { readTargets, err := d.RunOnce(ctx) if err != nil { d.logger.Printf("failed to read targets from docker daemon: %v", err) return } select { case <-ctx.Done(): return default: d.updateTargets(ctx, readTargets, opChan) } } func (d *dockerLoader) getTargets(ctx context.Context) (map[string]*types.TargetConfig, error) { d.wg = new(sync.WaitGroup) d.wg.Add(len(d.fl)) readTargets := make(map[string]*types.TargetConfig) m := new(sync.Mutex) errChan := make(chan error, len(d.fl)) start := time.Now() // https://github.com/golang/go/issues/60048 defer func() { dockerLoaderListRequestDuration.WithLabelValues(loaderType). Set(float64(time.Since(start).Nanoseconds())) }() for _, targetFilter := range d.fl { go func(fl *targetFilterComp) { dockerLoaderListRequestsTotal.WithLabelValues(loaderType).Add(1) defer d.wg.Done() // get networks nrs, err := d.client.NetworkList(ctx, network.ListOptions{ Filters: fl.nt, }) if err != nil { errChan <- fmt.Errorf("failed getting networks list using filter %+v: %v", fl.nt, err) return } // get containers for each defined filter for _, cfl := range fl.fl { conts, err := d.client.ContainerList(ctx, container.ListOptions{ Filters: cfl, }) if err != nil { errChan <- fmt.Errorf("failed getting containers list using filter %+v: %v", cfl, err) continue } for _, cont := range conts { d.logger.Printf("building target from container %q", cont.Names) tc := new(types.TargetConfig) if fl.cfg != nil { err = mapstructure.Decode(fl.cfg, tc) if err != nil { d.logger.Printf("failed to decode config map: %v", err) } } // set target name tc.Name = cont.ID if len(cont.Names) > 0 { tc.Name = strings.TrimLeft(cont.Names[0], "/") } // discover target address and port switch strings.ToLower(cont.HostConfig.NetworkMode) { case "host": if d.cfg.Address == "" || strings.HasPrefix(d.cfg.Address, "unix://") { tc.Address = "localhost" } else { tc.Address, _, err = net.SplitHostPort(d.cfg.Address) if err != nil { errChan <- err continue } } if fl.port != "" { if !strings.Contains(fl.port, "=") { tc.Address = fmt.Sprintf("%s:%s", tc.Address, fl.port) } else { portLabel := strings.Replace(fl.port, "label=", "", 1) if p, ok := cont.Labels[portLabel]; ok { tc.Address = fmt.Sprintf("%s:%s", tc.Address, p) } } } default: if strings.HasPrefix(d.cfg.Address, "unix:///") { for _, nr := range nrs { if n, ok := cont.NetworkSettings.Networks[nr.Name]; ok { if n.IPAddress != "" { tc.Address = n.IPAddress break } tc.Address = n.GlobalIPv6Address break } } if tc.Address == "" { d.logger.Printf("%q no address found", tc.Name) continue } if fl.port != "" { if !strings.Contains(fl.port, "=") { tc.Address = fmt.Sprintf("%s:%s", tc.Address, fl.port) } else { portLabel := strings.Replace(fl.port, "label=", "", 1) if p, ok := cont.Labels[portLabel]; ok { tc.Address = fmt.Sprintf("%s:%s", tc.Address, p) } } } } else { // get port from config/label port := getPortNumber(cont.Labels, fl.port) // check if port is exposed, find the public port and build the target address for _, p := range cont.Ports { // the container private port matches the port from the docker label if p.PrivatePort == port && p.Type == "tcp" { ipAddr := p.IP if ipAddr == "0.0.0.0" || ipAddr == "::" { if d.cfg.Address == "" { // if docker daemon is empty use localhost as target address ipAddr = "localhost" } else { // derive target address from daemon address if not empty u, err := url.Parse(d.cfg.Address) if err != nil { d.logger.Printf("failed to parse docker daemon address") continue } ipAddr, _, _ = net.SplitHostPort(u.Host) } } if ipAddr != "" && p.PublicPort != 0 { tc.Address = fmt.Sprintf("%s:%d", ipAddr, p.PublicPort) } } } // if an address was not found using the exposed ports // select the bridge address, and use the port from label if not zero if tc.Address == "" { for _, nr := range nrs { if n, ok := cont.NetworkSettings.Networks[nr.Name]; ok { if n.IPAddress != "" { tc.Address = n.IPAddress break } tc.Address = n.GlobalIPv6Address break } } if tc.Address == "" { d.logger.Printf("%q no address found", tc.Name) continue } if port != 0 { tc.Address = fmt.Sprintf("%s:%d", tc.Address, port) } } } } // if d.cfg.Debug { d.logger.Printf("discovered target config %s with filter: %v", tc, cfl) } m.Lock() readTargets[tc.Name] = tc m.Unlock() } } }(targetFilter) } var errors = make([]error, 0) go func() { for err := range errChan { errors = append(errors, err) } }() d.wg.Wait() close(errChan) if len(errors) > 0 { for _, err := range errors { dockerLoaderFailedListRequests.WithLabelValues(loaderType, fmt.Sprintf("%v", err)).Add(1) d.logger.Printf("%v", err) } return nil, fmt.Errorf("there was %d error(s)", len(errors)) } return readTargets, nil } func (d *dockerLoader) diff(m map[string]*types.TargetConfig) *loaders.TargetOperation { d.m.Lock() defer d.m.Unlock() result := loaders.Diff(d.lastTargets, m) for _, t := range result.Add { if _, ok := d.lastTargets[t.Name]; !ok { d.lastTargets[t.Name] = t } } for _, n := range result.Del { delete(d.lastTargets, n) } dockerLoaderLoadedTargets.WithLabelValues(loaderType).Set(float64(len(result.Add))) dockerLoaderDeletedTargets.WithLabelValues(loaderType).Set(float64(len(result.Del))) if d.cfg.Debug { b, err := json.MarshalIndent(result, "", " ") if err != nil { d.logger.Printf("discovery diff result: %v", result) } else { d.logger.Printf("discovery diff result:\n%s", string(b)) } } return result } func (d *dockerLoader) String() string { b, err := json.Marshal(d.cfg) if err != nil { return fmt.Sprintf("%+v", d.cfg) } return string(b) } func (d *dockerLoader) updateTargets(ctx context.Context, tcs map[string]*types.TargetConfig, opChan chan *loaders.TargetOperation) { var err error for _, tc := range tcs { err = d.targetConfigFn(tc) if err != nil { d.logger.Printf("failed running target config fn on target %q", tc.Name) } } targetOp, err := d.runActions(ctx, tcs, d.diff(tcs)) if err != nil { d.logger.Printf("failed to run actions: %v", err) return } numAdds := len(targetOp.Add) numDels := len(targetOp.Del) defer func() { dockerLoaderLoadedTargets.WithLabelValues(loaderType).Set(float64(numAdds)) dockerLoaderDeletedTargets.WithLabelValues(loaderType).Set(float64(numDels)) }() if numAdds+numDels == 0 { return } d.m.Lock() // do deletes first since change is delete+add for _, del := range targetOp.Del { delete(d.lastTargets, del) } for _, add := range targetOp.Add { d.lastTargets[add.Name] = add } d.m.Unlock() opChan <- targetOp } func (d *dockerLoader) readVars(ctx context.Context) error { if d.cfg.VarsFile == "" { d.vars = d.cfg.Vars return nil } b, err := gfile.ReadFile(ctx, d.cfg.VarsFile) if err != nil { return err } v := make(map[string]interface{}) err = yaml.Unmarshal(b, &v) if err != nil { return err } d.vars = utils.MergeMaps(v, d.cfg.Vars) return nil } func (d *dockerLoader) initializeAction(cfg map[string]interface{}) (actions.Action, error) { if len(cfg) == 0 { return nil, errors.New("missing action definition") } if actType, ok := cfg["type"]; ok { switch actType := actType.(type) { case string: if in, ok := actions.Actions[actType]; ok { act := in() err := act.Init(cfg, actions.WithLogger(d.logger), actions.WithTargets(nil)) if err != nil { return nil, err } return act, nil } return nil, fmt.Errorf("unknown action type %q", actType) default: return nil, fmt.Errorf("unexpected action field type %T", actType) } } return nil, errors.New("missing type field under action") } func (d *dockerLoader) runActions(ctx context.Context, tcs map[string]*types.TargetConfig, targetOp *loaders.TargetOperation) (*loaders.TargetOperation, error) { if d.numActions == 0 { return targetOp, nil } opChan := make(chan *loaders.TargetOperation) // some actions are defined, doneCh := make(chan struct{}) result := &loaders.TargetOperation{ Add: make(map[string]*types.TargetConfig, len(targetOp.Add)), Del: make([]string, 0, len(targetOp.Del)), } ctx, cancel := context.WithTimeout(ctx, d.cfg.Interval) defer cancel() // start gathering goroutine go func() { for { select { case <-ctx.Done(): close(doneCh) return case op, ok := <-opChan: if !ok { close(doneCh) return } for n, t := range op.Add { result.Add[n] = t } result.Del = append(result.Del, op.Del...) } } }() // create waitGroup and add the number of target operations to it wg := new(sync.WaitGroup) wg.Add(len(targetOp.Add) + len(targetOp.Del)) // run OnAdd actions for n, tAdd := range targetOp.Add { go func(n string, tc *types.TargetConfig) { defer wg.Done() err := d.runOnAddActions(ctx, tc.Name, tcs) if err != nil { d.logger.Printf("failed running OnAdd actions: %v", err) return } opChan <- &loaders.TargetOperation{Add: map[string]*types.TargetConfig{n: tc}} }(n, tAdd) } // run OnDelete actions for _, tDel := range targetOp.Del { go func(name string) { defer wg.Done() err := d.runOnDeleteActions(ctx, name) if err != nil { d.logger.Printf("failed running OnDelete actions: %v", err) return } opChan <- &loaders.TargetOperation{Del: []string{name}} }(tDel) } wg.Wait() close(opChan) <-doneCh //wait for gathering goroutine to finish return result, nil } func (d *dockerLoader) runOnAddActions(ctx context.Context, tName string, tcs map[string]*types.TargetConfig) error { aCtx := &actions.Context{ Input: tName, Env: make(map[string]interface{}), Vars: d.vars, Targets: tcs, } for _, act := range d.addActions { d.logger.Printf("running action %q for target %q", act.NName(), tName) res, err := act.Run(ctx, aCtx) if err != nil { // delete target from known targets map d.m.Lock() delete(d.lastTargets, tName) d.m.Unlock() return fmt.Errorf("action %q for target %q failed: %v", act.NName(), tName, err) } aCtx.Env[act.NName()] = utils.Convert(res) if d.cfg.Debug { d.logger.Printf("action %q, target %q result: %+v", act.NName(), tName, res) b, _ := json.MarshalIndent(aCtx, "", " ") d.logger.Printf("action %q context:\n%s", act.NName(), string(b)) } } return nil } func (d *dockerLoader) runOnDeleteActions(ctx context.Context, tName string) error { env := make(map[string]interface{}) for _, act := range d.delActions { res, err := act.Run(ctx, &actions.Context{Input: tName, Env: env, Vars: d.vars}) if err != nil { return fmt.Errorf("action %q for target %q failed: %v", act.NName(), tName, err) } env[act.NName()] = res } return nil } /// helpers func getPortNumber(labels map[string]string, p string) uint16 { var port uint16 if p != "" { if !strings.Contains(p, "=") { p, _ := strconv.Atoi(p) port = uint16(p) } else { s := labels[strings.Replace(p, "label=", "", 1)] p, _ := strconv.Atoi(s) port = uint16(p) } } return port } ================================================ FILE: pkg/loaders/docker_loader/docker_loader_metrics.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package docker_loader import "github.com/prometheus/client_golang/prometheus" var dockerLoaderLoadedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "docker_loader", Name: "number_of_loaded_targets", Help: "Number of new targets successfully loaded", }, []string{"loader_type"}) var dockerLoaderDeletedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "docker_loader", Name: "number_of_deleted_targets", Help: "Number of targets successfully deleted", }, []string{"loader_type"}) var dockerLoaderFailedListRequests = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "docker_loader", Name: "number_of_failed_docker_list", Help: "Number of times a docker list failed", }, []string{"loader_type", "error"}) var dockerLoaderListRequestsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "docker_loader", Name: "number_of_docker_list_total", Help: "Number of times the loader sent a docker list request", }, []string{"loader_type"}) var dockerLoaderListRequestDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "docker_loader", Name: "docker_list_duration_ns", Help: "Duration of docker list request in ns", }, []string{"loader_type"}) func initMetrics() { dockerLoaderLoadedTargets.WithLabelValues(loaderType).Set(0) dockerLoaderDeletedTargets.WithLabelValues(loaderType).Set(0) dockerLoaderFailedListRequests.WithLabelValues(loaderType, "").Add(0) dockerLoaderListRequestsTotal.WithLabelValues(loaderType).Add(0) dockerLoaderListRequestDuration.WithLabelValues(loaderType).Set(0) } func registerMetrics(reg *prometheus.Registry) error { if reg == nil { return nil } initMetrics() var err error if err = reg.Register(dockerLoaderLoadedTargets); err != nil { return err } if err = reg.Register(dockerLoaderDeletedTargets); err != nil { return err } if err = reg.Register(dockerLoaderFailedListRequests); err != nil { return err } if err = reg.Register(dockerLoaderListRequestsTotal); err != nil { return err } if err = reg.Register(dockerLoaderListRequestDuration); err != nil { return err } return nil } ================================================ FILE: pkg/loaders/docker_loader/options.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package docker_loader import ( "github.com/openconfig/gnmic/pkg/api/types" "github.com/prometheus/client_golang/prometheus" ) func (d *dockerLoader) RegisterMetrics(reg *prometheus.Registry) { if !d.cfg.EnableMetrics { return } if reg == nil { d.logger.Printf("ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`") return } if err := registerMetrics(reg); err != nil { d.logger.Printf("failed to register metrics: %v", err) } } func (d *dockerLoader) WithActions(acts map[string]map[string]interface{}) { d.actionsConfig = acts } func (d *dockerLoader) WithTargetsDefaults(fn func(tc *types.TargetConfig) error) { d.targetConfigFn = fn } ================================================ FILE: pkg/loaders/file_loader/file_loader.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package file_loader import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "log" "os" "sync" "text/template" "time" "gopkg.in/yaml.v2" "github.com/openconfig/gnmic/pkg/actions" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" gfile "github.com/openconfig/gnmic/pkg/file" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/loaders" ) const ( loggingPrefix = "[file_loader] " watchInterval = 30 * time.Second loaderType = "file" ) func init() { loaders.Register(loaderType, func() loaders.TargetLoader { return &fileLoader{ cfg: &cfg{}, m: new(sync.RWMutex), lastTargets: make(map[string]*types.TargetConfig), logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), } }) } // fileLoader implements the loaders.Loader interface. // it reads a configured file (local, ftp, sftp, http) periodically, // expects the file to contain a dictionary of types.TargetConfig. // It then adds new targets to gNMIc's targets and deletes the removed ones. type fileLoader struct { cfg *cfg m *sync.RWMutex lastTargets map[string]*types.TargetConfig targetConfigFn func(*types.TargetConfig) error logger *log.Logger // tpl *template.Template vars map[string]interface{} actionsConfig map[string]map[string]interface{} addActions []actions.Action delActions []actions.Action numActions int } type cfg struct { // path the the file, if remote, // must include the proper protocol prefix ftp://, sftp://, http:// Path string `json:"path,omitempty" mapstructure:"path,omitempty"` // the interval at which the file will be re read to load new targets // or delete removed ones. Interval time.Duration `json:"interval,omitempty" mapstructure:"interval,omitempty"` // a Go text template that can be used to transform the targets format read from the file to match // gNMIc's expected format. Template string `json:"template,omitempty" mapstructure:"template,omitempty"` // time to wait before the first file read StartDelay time.Duration `json:"start-delay,omitempty" mapstructure:"start-delay,omitempty"` // if true, registers fileLoader prometheus metrics with the provided // prometheus registry EnableMetrics bool `json:"enable-metrics,omitempty" mapstructure:"enable-metrics,omitempty"` // enable Debug Debug bool `json:"debug,omitempty" mapstructure:"debug,omitempty"` // variables definitions to be passed to the actions Vars map[string]interface{} `json:"vars,omitempty" mapstructure:"vars,omitempty"` // variable file, values in this file will be overwritten by // the ones defined in Vars VarsFile string `json:"vars-file,omitempty" mapstructure:"vars-file,omitempty"` // list of Actions to run on new target discovery OnAdd []string `json:"on-add,omitempty" mapstructure:"on-add,omitempty"` // list of Actions to run on target removal OnDelete []string `json:"on-delete,omitempty" mapstructure:"on-delete,omitempty"` } func (f *fileLoader) Init(ctx context.Context, cfg map[string]interface{}, logger *log.Logger, opts ...loaders.Option) error { err := loaders.DecodeConfig(cfg, f.cfg) if err != nil { return err } for _, o := range opts { o(f) } if f.cfg.Path == "" { return errors.New("missing file path") } if f.cfg.Interval <= 0 { f.cfg.Interval = watchInterval } if logger != nil { f.logger.SetOutput(logger.Writer()) f.logger.SetFlags(logger.Flags()) } if f.cfg.Template != "" { f.tpl, err = gtemplate.CreateTemplate("file-loader-template", f.cfg.Template) if err != nil { return err } } err = f.readVars(ctx) if err != nil { return err } for _, actName := range f.cfg.OnAdd { if cfg, ok := f.actionsConfig[actName]; ok { a, err := f.initializeAction(cfg) if err != nil { return err } f.addActions = append(f.addActions, a) continue } return fmt.Errorf("unknown action name %q", actName) } for _, actName := range f.cfg.OnDelete { if cfg, ok := f.actionsConfig[actName]; ok { a, err := f.initializeAction(cfg) if err != nil { return err } f.delActions = append(f.delActions, a) continue } return fmt.Errorf("unknown action name %q", actName) } f.numActions = len(f.addActions) + len(f.delActions) f.logger.Printf("initialized loader type %q: %s", loaderType, f) return nil } func (f *fileLoader) String() string { b, err := json.Marshal(f.cfg) if err != nil { return fmt.Sprintf("%+v", f.cfg) } return string(b) } func (f *fileLoader) Start(ctx context.Context) chan *loaders.TargetOperation { opChan := make(chan *loaders.TargetOperation) ticker := time.NewTicker(f.cfg.Interval) go func() { defer close(opChan) defer ticker.Stop() time.Sleep(f.cfg.StartDelay) f.update(ctx, opChan) for { select { case <-ctx.Done(): f.logger.Printf("%q context done: %v", loaderType, ctx.Err()) return case <-ticker.C: f.update(ctx, opChan) } } }() return opChan } func (f *fileLoader) RunOnce(ctx context.Context) (map[string]*types.TargetConfig, error) { readTargets, err := f.getTargets(ctx) if err != nil { return nil, err } if f.cfg.Debug { f.logger.Printf("file loader discovered %d target(s)", len(readTargets)) } return readTargets, nil } func (f *fileLoader) update(ctx context.Context, opChan chan *loaders.TargetOperation) { readTargets, err := f.RunOnce(ctx) if _, ok := err.(*os.PathError); ok { f.logger.Printf("path err: %v", err) return } if err != nil { f.logger.Printf("failed to read targets file: %v", err) return } select { // check if the context is done before // updating the targets to the channel case <-ctx.Done(): f.logger.Printf("context done: %v", ctx.Err()) return default: f.updateTargets(ctx, readTargets, opChan) } } func (f *fileLoader) getTargets(ctx context.Context) (map[string]*types.TargetConfig, error) { fileLoaderFileReadTotal.WithLabelValues(loaderType).Add(1) start := time.Now() // read file bytes based on the path prefix ctx, cancel := context.WithTimeout(ctx, f.cfg.Interval/2) defer cancel() b, err := gfile.ReadFile(ctx, f.cfg.Path) fileLoaderFileReadDuration.WithLabelValues(loaderType).Set(float64(time.Since(start).Nanoseconds())) if err != nil { fileLoaderFailedFileRead.WithLabelValues(loaderType, fmt.Sprintf("%v", err)).Add(1) return nil, err } if f.tpl != nil { var input interface{} err = json.Unmarshal(b, input) if err != nil { fileLoaderFailedFileRead.WithLabelValues(loaderType, fmt.Sprintf("%v", err)).Add(1) return nil, err } buf := new(bytes.Buffer) err = f.tpl.Execute(buf, input) if err != nil { fileLoaderFailedFileRead.WithLabelValues(loaderType, fmt.Sprintf("%v", err)).Add(1) return nil, err } b = buf.Bytes() } result := make(map[string]*types.TargetConfig) // unmarshal the bytes into a map of targetConfigs err = yaml.Unmarshal(b, result) if err != nil { fileLoaderFailedFileRead.WithLabelValues(loaderType, fmt.Sprintf("%v", err)).Add(1) return nil, err } // properly initialize address and name if not set for n, t := range result { if t == nil && n != "" { result[n] = &types.TargetConfig{ Name: n, Address: n, } continue } if t.Name == "" { t.Name = n } if t.Address == "" { t.Address = n } } if f.cfg.Debug { f.logger.Printf("result: %s", result) } return result, nil } func (f *fileLoader) updateTargets(ctx context.Context, tcs map[string]*types.TargetConfig, opChan chan *loaders.TargetOperation) { var err error if f.targetConfigFn != nil { for _, tc := range tcs { err = f.targetConfigFn(tc) if err != nil { f.logger.Printf("failed running target config fn on target %q", tc.Name) } } } targetOp, err := f.runActions(ctx, tcs, loaders.Diff(f.lastTargets, tcs)) if err != nil { f.logger.Printf("failed to run actions: %v", err) return } numAdds := len(targetOp.Add) numDels := len(targetOp.Del) defer func() { fileLoaderLoadedTargets.WithLabelValues(loaderType).Set(float64(numAdds)) fileLoaderDeletedTargets.WithLabelValues(loaderType).Set(float64(numDels)) }() if numAdds+numDels == 0 { return } f.m.Lock() // do delete first since change is delete+add for _, del := range targetOp.Del { delete(f.lastTargets, del) } for _, add := range targetOp.Add { f.lastTargets[add.Name] = add } f.m.Unlock() opChan <- targetOp } func (f *fileLoader) readVars(ctx context.Context) error { if f.cfg.VarsFile == "" { f.vars = f.cfg.Vars return nil } b, err := gfile.ReadFile(ctx, f.cfg.VarsFile) if err != nil { return err } v := make(map[string]interface{}) err = yaml.Unmarshal(b, &v) if err != nil { return err } f.vars = utils.MergeMaps(v, f.cfg.Vars) return nil } func (f *fileLoader) initializeAction(cfg map[string]interface{}) (actions.Action, error) { if len(cfg) == 0 { return nil, errors.New("missing action definition") } if actType, ok := cfg["type"]; ok { switch actType := actType.(type) { case string: if in, ok := actions.Actions[actType]; ok { act := in() err := act.Init(cfg, actions.WithLogger(f.logger), actions.WithTargets(nil)) if err != nil { return nil, err } return act, nil } return nil, fmt.Errorf("unknown action type %q", actType) default: return nil, fmt.Errorf("unexpected action field type %T", actType) } } return nil, errors.New("missing type field under action") } func (f *fileLoader) runActions(ctx context.Context, tcs map[string]*types.TargetConfig, targetOp *loaders.TargetOperation) (*loaders.TargetOperation, error) { if f.numActions == 0 { return targetOp, nil } opChan := make(chan *loaders.TargetOperation) // some actions are defined, doneCh := make(chan struct{}) result := &loaders.TargetOperation{ Add: make(map[string]*types.TargetConfig, len(targetOp.Add)), Del: make([]string, 0, len(targetOp.Del)), } ctx, cancel := context.WithTimeout(ctx, f.cfg.Interval) defer cancel() // start gathering goroutine go func() { for { select { case <-ctx.Done(): close(doneCh) return case op, ok := <-opChan: if !ok { close(doneCh) return } for n, t := range op.Add { result.Add[n] = t } result.Del = append(result.Del, op.Del...) } } }() // create waitGroup and add the number of target operations to it wg := new(sync.WaitGroup) wg.Add(len(targetOp.Add) + len(targetOp.Del)) // run OnAdd actions for n, tAdd := range targetOp.Add { go func(n string, tc *types.TargetConfig) { defer wg.Done() err := f.runOnAddActions(ctx, tc.Name, tcs) if err != nil { f.logger.Printf("failed running OnAdd actions: %v", err) return } opChan <- &loaders.TargetOperation{Add: map[string]*types.TargetConfig{n: tc}} }(n, tAdd) } // run OnDelete actions for _, tDel := range targetOp.Del { go func(name string) { defer wg.Done() err := f.runOnDeleteActions(ctx, name, tcs) if err != nil { f.logger.Printf("failed running OnDelete actions: %v", err) return } opChan <- &loaders.TargetOperation{Del: []string{name}} }(tDel) } wg.Wait() close(opChan) <-doneCh //wait for gathering goroutine to finish return result, nil } func (d *fileLoader) runOnAddActions(ctx context.Context, tName string, tcs map[string]*types.TargetConfig) error { aCtx := &actions.Context{ Input: tName, Env: make(map[string]interface{}), Vars: d.vars, Targets: tcs, } for _, act := range d.addActions { d.logger.Printf("running action %q for target %q", act.NName(), tName) res, err := act.Run(ctx, aCtx) if err != nil { // delete target from known targets map d.m.Lock() delete(d.lastTargets, tName) d.m.Unlock() return fmt.Errorf("action %q for target %q failed: %v", act.NName(), tName, err) } aCtx.Env[act.NName()] = utils.Convert(res) if d.cfg.Debug { d.logger.Printf("action %q, target %q result: %+v", act.NName(), tName, res) b, _ := json.MarshalIndent(aCtx, "", " ") d.logger.Printf("action %q context:\n%s", act.NName(), string(b)) } } return nil } func (d *fileLoader) runOnDeleteActions(ctx context.Context, tName string, tcs map[string]*types.TargetConfig) error { env := make(map[string]interface{}) for _, act := range d.delActions { res, err := act.Run(ctx, &actions.Context{Input: tName, Env: env, Vars: d.vars}) if err != nil { return fmt.Errorf("action %q for target %q failed: %v", act.NName(), tName, err) } env[act.NName()] = res } return nil } ================================================ FILE: pkg/loaders/file_loader/file_loader_metrics.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package file_loader import "github.com/prometheus/client_golang/prometheus" var fileLoaderLoadedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "file_loader", Name: "number_of_loaded_targets", Help: "Number of new targets successfully loaded", }, []string{"loader_type"}) var fileLoaderDeletedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "file_loader", Name: "number_of_deleted_targets", Help: "Number of targets successfully deleted", }, []string{"loader_type"}) var fileLoaderFailedFileRead = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "file_loader", Name: "number_of_failed_file_reads", Help: "Number of times gnmic failed to read the file", }, []string{"loader_type", "error"}) var fileLoaderFileReadTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "file_loader", Name: "number_of_file_read_attempts_total", Help: "Number of times the loader attempted to read the file", }, []string{"loader_type"}) var fileLoaderFileReadDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "file_loader", Name: "file_read_duration_ns", Help: "Duration of file read in ns", }, []string{"loader_type"}) func initMetrics() { fileLoaderLoadedTargets.WithLabelValues(loaderType).Set(0) fileLoaderDeletedTargets.WithLabelValues(loaderType).Set(0) fileLoaderFailedFileRead.WithLabelValues(loaderType, "").Add(0) fileLoaderFileReadTotal.WithLabelValues(loaderType).Add(0) fileLoaderFileReadDuration.WithLabelValues(loaderType).Set(0) } func registerMetrics(reg *prometheus.Registry) error { initMetrics() var err error if err = reg.Register(fileLoaderLoadedTargets); err != nil { return err } if err = reg.Register(fileLoaderDeletedTargets); err != nil { return err } if err = reg.Register(fileLoaderFailedFileRead); err != nil { return err } if err = reg.Register(fileLoaderFileReadTotal); err != nil { return err } if err = reg.Register(fileLoaderFileReadDuration); err != nil { return err } return nil } ================================================ FILE: pkg/loaders/file_loader/options.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package file_loader import ( "github.com/openconfig/gnmic/pkg/api/types" "github.com/prometheus/client_golang/prometheus" ) func (f *fileLoader) RegisterMetrics(reg *prometheus.Registry) { if !f.cfg.EnableMetrics { return } if reg == nil { f.logger.Printf("ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`") return } if err := registerMetrics(reg); err != nil { f.logger.Printf("failed to register metrics: %v", err) } } func (f *fileLoader) WithActions(acts map[string]map[string]interface{}) { f.actionsConfig = acts } func (f *fileLoader) WithTargetsDefaults(fn func(tc *types.TargetConfig) error) { f.targetConfigFn = fn } ================================================ FILE: pkg/loaders/http_loader/http_loader.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package http_loader import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "log" "sync" "text/template" "time" "gopkg.in/yaml.v2" "github.com/go-resty/resty/v2" "github.com/openconfig/gnmic/pkg/actions" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" gfile "github.com/openconfig/gnmic/pkg/file" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/loaders" ) const ( loggingPrefix = "[http_loader] " loaderType = "http" defaultInterval = 1 * time.Minute defaultTimeout = 50 * time.Second ) func init() { loaders.Register(loaderType, func() loaders.TargetLoader { return &httpLoader{ cfg: &cfg{}, m: new(sync.RWMutex), lastTargets: make(map[string]*types.TargetConfig), logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), } }) } type httpLoader struct { cfg *cfg m *sync.RWMutex lastTargets map[string]*types.TargetConfig targetConfigFn func(*types.TargetConfig) error logger *log.Logger // tpl *template.Template vars map[string]interface{} actionsConfig map[string]map[string]interface{} addActions []actions.Action delActions []actions.Action numActions int } type cfg struct { // the server URL, must include http or https as a prefix URL string `json:"url,omitempty" mapstructure:"url,omitempty"` // server query interval Interval time.Duration `json:"interval,omitempty" mapstructure:"interval,omitempty"` // query timeout Timeout time.Duration `json:"timeout,omitempty" mapstructure:"timeout,omitempty"` // TLS config TLS *types.TLSConfig `json:"tls,omitempty" mapstructure:"tls,omitempty"` // SkipVerify bool `json:"skip-verify,omitempty" mapstructure:"skip-verify,omitempty"` // CAFile string `json:"ca-file,omitempty" mapstructure:"ca-file,omitempty"` // CertFile string `json:"cert-file,omitempty" mapstructure:"cert-file,omitempty"` // KeyFile string `json:"key-file,omitempty" mapstructure:"key-file,omitempty"` // HTTP basicAuth Username string `json:"username,omitempty" mapstructure:"username,omitempty"` Password string `json:"password,omitempty" mapstructure:"password,omitempty"` // Oauth2 Token string `json:"token,omitempty" mapstructure:"token,omitempty"` // the auth scheme. The default auth scheme is `Bearer``. AuthScheme string `json:"auth-scheme,omitempty" mapstructure:"auth-scheme,omitempty"` // a Go text template that can be used to transform the targets format // read from the remote http server to match gNMIc's expected format. Template string `json:"template,omitempty" mapstructure:"template,omitempty"` // a Go text template that can be used to transform the targets format // read from the remote http server to match gNMIc's expected format. TemplateFile string `json:"template-file,omitempty" mapstructure:"template-file,omitempty"` // time to wait before the first http query StartDelay time.Duration `json:"start-delay,omitempty" mapstructure:"start-delay,omitempty"` // if true, registers httpLoader prometheus metrics with the provided // prometheus registry EnableMetrics bool `json:"enable-metrics,omitempty" mapstructure:"enable-metrics,omitempty"` // enable Debug Debug bool `json:"debug,omitempty" mapstructure:"debug,omitempty"` // variables definitions to be passed to the actions Vars map[string]interface{} // variable file, values in this file will be overwritten by // the ones defined in Vars VarsFile string `mapstructure:"vars-file,omitempty"` // list of Actions to run on new target discovery OnAdd []string `json:"on-add,omitempty" mapstructure:"on-add,omitempty"` // list of Actions to run on target removal OnDelete []string `json:"on-delete,omitempty" mapstructure:"on-delete,omitempty"` } func (h *httpLoader) Init(ctx context.Context, cfg map[string]interface{}, logger *log.Logger, opts ...loaders.Option) error { err := loaders.DecodeConfig(cfg, h.cfg) if err != nil { return err } err = h.setDefaults() if err != nil { return err } for _, o := range opts { o(h) } if logger != nil { h.logger.SetOutput(logger.Writer()) h.logger.SetFlags(logger.Flags()) } if h.cfg.Template != "" { h.tpl, err = gtemplate.CreateTemplate("http-loader-template", h.cfg.Template) if err != nil { return err } } if h.cfg.TemplateFile != "" { h.tpl, err = gtemplate.CreateFileTemplate(h.cfg.TemplateFile) if err != nil { return err } } err = h.readVars(ctx) if err != nil { return err } for _, actName := range h.cfg.OnAdd { if cfg, ok := h.actionsConfig[actName]; ok { a, err := h.initializeAction(cfg) if err != nil { return err } h.addActions = append(h.addActions, a) continue } return fmt.Errorf("unknown action name %q", actName) } for _, actName := range h.cfg.OnDelete { if cfg, ok := h.actionsConfig[actName]; ok { a, err := h.initializeAction(cfg) if err != nil { return err } h.delActions = append(h.delActions, a) continue } return fmt.Errorf("unknown action name %q", actName) } h.numActions = len(h.addActions) + len(h.delActions) return nil } func (h *httpLoader) Start(ctx context.Context) chan *loaders.TargetOperation { opChan := make(chan *loaders.TargetOperation) ticker := time.NewTicker(h.cfg.Interval) go func() { defer close(opChan) defer ticker.Stop() time.Sleep(h.cfg.StartDelay) h.update(ctx, opChan) for { select { case <-ctx.Done(): h.logger.Printf("%q context done: %v", loaderType, ctx.Err()) return case <-ticker.C: h.update(ctx, opChan) } } }() return opChan } func (h *httpLoader) RunOnce(ctx context.Context) (map[string]*types.TargetConfig, error) { readTargets, err := h.getTargets() if err != nil { return nil, err } if h.cfg.Debug { h.logger.Printf("http loader discovered %d target(s)", len(readTargets)) } return readTargets, nil } func (h *httpLoader) update(ctx context.Context, opChan chan *loaders.TargetOperation) { readTargets, err := h.getTargets() if err != nil { h.logger.Printf("failed to read targets from HTTP server: %v", err) return } select { case <-ctx.Done(): return default: h.updateTargets(ctx, readTargets, opChan) } } func (h *httpLoader) setDefaults() error { if h.cfg.URL == "" { return errors.New("missing URL") } if h.cfg.Interval <= 0 { h.cfg.Interval = defaultInterval } if h.cfg.Timeout <= 0 { h.cfg.Timeout = defaultTimeout } return nil } func (h *httpLoader) getTargets() (map[string]*types.TargetConfig, error) { c := resty.New() if h.cfg.TLS != nil { tlsCfg, err := utils.NewTLSConfig(h.cfg.TLS.CaFile, h.cfg.TLS.CertFile, h.cfg.TLS.KeyFile, "", h.cfg.TLS.SkipVerify, false) if err != nil { httpLoaderFailedGetRequests.WithLabelValues(loaderType, fmt.Sprintf("%v", err)).Add(1) return nil, err } if tlsCfg != nil { c = c.SetTLSClientConfig(tlsCfg) } } c.SetTimeout(h.cfg.Timeout) if h.cfg.Username != "" && h.cfg.Password != "" { c.SetBasicAuth(h.cfg.Username, h.cfg.Password) } if h.cfg.Token != "" { c.SetAuthToken(h.cfg.Token) } if h.cfg.AuthScheme != "" { c.SetAuthScheme(h.cfg.AuthScheme) } start := time.Now() httpLoaderGetRequestsTotal.WithLabelValues(loaderType).Add(1) rsp, err := c.R().SetHeader("Accept", "application/json").Get(h.cfg.URL) if err != nil { httpLoaderFailedGetRequests.WithLabelValues(loaderType, fmt.Sprintf("%v", err)).Add(1) return nil, err } httpLoaderGetRequestDuration.WithLabelValues(loaderType).Set(float64(time.Since(start).Nanoseconds())) if rsp.StatusCode() != 200 { httpLoaderFailedGetRequests.WithLabelValues(loaderType, rsp.Status()) return nil, fmt.Errorf("failed request, code=%d", rsp.StatusCode()) } b := rsp.Body() if h.tpl != nil { var input interface{} err = json.Unmarshal(b, &input) if err != nil { httpLoaderFailedGetRequests.WithLabelValues(loaderType, fmt.Sprintf("%v", err)).Add(1) return nil, err } buf := new(bytes.Buffer) err = h.tpl.Execute(buf, input) if err != nil { httpLoaderFailedGetRequests.WithLabelValues(loaderType, fmt.Sprintf("%v", err)).Add(1) return nil, err } b = buf.Bytes() } result := make(map[string]*types.TargetConfig) // unmarshal the bytes into a map of targetConfigs err = json.Unmarshal(b, &result) if err != nil { httpLoaderFailedGetRequests.WithLabelValues(loaderType, fmt.Sprintf("%v", err)).Add(1) return nil, err } // properly initialize address and name if not set for n, t := range result { if t == nil && n != "" { result[n] = &types.TargetConfig{ Name: n, Address: n, } continue } if t.Name == "" { t.Name = n } if t.Address == "" { t.Address = n } } if h.cfg.Debug { h.logger.Printf("result: %+v", result) } return result, nil } func (h *httpLoader) updateTargets(ctx context.Context, tcs map[string]*types.TargetConfig, opChan chan *loaders.TargetOperation) { var err error for _, tc := range tcs { err = h.targetConfigFn(tc) if err != nil { h.logger.Printf("failed running target config fn on target %q", tc.Name) } } targetOp, err := h.runActions(ctx, tcs, loaders.Diff(h.lastTargets, tcs)) if err != nil { h.logger.Printf("failed to run actions: %v", err) return } numAdds := len(targetOp.Add) numDels := len(targetOp.Del) defer func() { httpLoaderLoadedTargets.WithLabelValues(loaderType).Set(float64(numAdds)) httpLoaderDeletedTargets.WithLabelValues(loaderType).Set(float64(numDels)) }() if numAdds+numDels == 0 { return } h.m.Lock() // do delete first, since target change // consists of delete and add for _, n := range targetOp.Del { delete(h.lastTargets, n) } for n, t := range targetOp.Add { if _, ok := h.lastTargets[n]; !ok { h.lastTargets[n] = t } } h.m.Unlock() opChan <- targetOp } func (h *httpLoader) readVars(ctx context.Context) error { if h.cfg.VarsFile == "" { h.vars = h.cfg.Vars return nil } ctx, cancel := context.WithTimeout(ctx, h.cfg.Interval) defer cancel() b, err := gfile.ReadFile(ctx, h.cfg.VarsFile) if err != nil { return err } v := make(map[string]interface{}) err = yaml.Unmarshal(b, &v) if err != nil { return err } h.vars = utils.MergeMaps(v, h.cfg.Vars) return nil } func (h *httpLoader) initializeAction(cfg map[string]interface{}) (actions.Action, error) { if len(cfg) == 0 { return nil, errors.New("missing action definition") } if actType, ok := cfg["type"]; ok { switch actType := actType.(type) { case string: if in, ok := actions.Actions[actType]; ok { act := in() err := act.Init(cfg, actions.WithLogger(h.logger), actions.WithTargets(nil)) if err != nil { return nil, err } return act, nil } return nil, fmt.Errorf("unknown action type %q", actType) default: return nil, fmt.Errorf("unexpected action field type %T", actType) } } return nil, errors.New("missing type field under action") } func (f *httpLoader) runActions(ctx context.Context, tcs map[string]*types.TargetConfig, targetOp *loaders.TargetOperation) (*loaders.TargetOperation, error) { if f.numActions == 0 { return targetOp, nil } result := &loaders.TargetOperation{ Add: make(map[string]*types.TargetConfig, len(targetOp.Add)), Del: make([]string, 0, len(targetOp.Del)), } var resultMu sync.Mutex ctx, cancel := context.WithTimeout(ctx, f.cfg.Interval) defer cancel() // create waitGroup and add the number of target operations to it wgDelete := new(sync.WaitGroup) wgDelete.Add(len(targetOp.Del)) // run OnDelete actions first, since change==delete+add for _, tDel := range targetOp.Del { go func(name string) { defer wgDelete.Done() err := f.runOnDeleteActions(ctx, name, tcs) if err != nil { f.logger.Printf("failed running OnDelete actions: %v", err) return } resultMu.Lock() result.Del = append(result.Del, name) resultMu.Unlock() }(tDel) } wgDelete.Wait() wgAdd := new(sync.WaitGroup) wgAdd.Add(len(targetOp.Add)) // run OnAdd actions for n, tAdd := range targetOp.Add { go func(n string, tc *types.TargetConfig) { defer wgAdd.Done() err := f.runOnAddActions(ctx, tc.Name, tcs) if err != nil { f.logger.Printf("failed running OnAdd actions: %v", err) return } resultMu.Lock() result.Add[n] = tc resultMu.Unlock() }(n, tAdd) } wgAdd.Wait() return result, nil } func (d *httpLoader) runOnAddActions(ctx context.Context, tName string, tcs map[string]*types.TargetConfig) error { aCtx := &actions.Context{ Input: tName, Env: make(map[string]interface{}), Vars: d.vars, Targets: tcs, } for _, act := range d.addActions { d.logger.Printf("running action %q for target %q", act.NName(), tName) res, err := act.Run(ctx, aCtx) if err != nil { // delete target from known targets map d.m.Lock() delete(d.lastTargets, tName) d.m.Unlock() return fmt.Errorf("action %q for target %q failed: %v", act.NName(), tName, err) } aCtx.Env[act.NName()] = utils.Convert(res) if d.cfg.Debug { d.logger.Printf("action %q, target %q result: %+v", act.NName(), tName, res) b, _ := json.MarshalIndent(aCtx, "", " ") d.logger.Printf("action %q context:\n%s", act.NName(), string(b)) } } return nil } func (d *httpLoader) runOnDeleteActions(ctx context.Context, tName string, _ map[string]*types.TargetConfig) error { env := make(map[string]any) for _, act := range d.delActions { res, err := act.Run(ctx, &actions.Context{Input: tName, Env: env, Vars: d.vars}) if err != nil { return fmt.Errorf("action %q for target %q failed: %v", act.NName(), tName, err) } env[act.NName()] = res } return nil } ================================================ FILE: pkg/loaders/http_loader/http_loader_metrics.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package http_loader import "github.com/prometheus/client_golang/prometheus" var httpLoaderLoadedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "http_loader", Name: "number_of_loaded_targets", Help: "Number of new targets successfully loaded", }, []string{"loader_type"}) var httpLoaderDeletedTargets = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "http_loader", Name: "number_of_deleted_targets", Help: "Number of targets successfully deleted", }, []string{"loader_type"}) var httpLoaderFailedGetRequests = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "http_loader", Name: "number_of_failed_http_requests", Help: "Number of times the http Get request failed", }, []string{"loader_type", "error"}) var httpLoaderGetRequestsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "http_loader", Name: "number_of_http_requests_total", Help: "Number of times the loader sent an HTTP request", }, []string{"loader_type"}) var httpLoaderGetRequestDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "http_loader", Name: "http_request_duration_ns", Help: "Duration of http request in ns", }, []string{"loader_type"}) func initMetrics() { httpLoaderLoadedTargets.WithLabelValues(loaderType).Set(0) httpLoaderDeletedTargets.WithLabelValues(loaderType).Set(0) httpLoaderFailedGetRequests.WithLabelValues(loaderType, "").Add(0) httpLoaderGetRequestsTotal.WithLabelValues(loaderType).Add(0) httpLoaderGetRequestDuration.WithLabelValues(loaderType).Set(0) } func registerMetrics(reg *prometheus.Registry) error { initMetrics() var err error if err = reg.Register(httpLoaderLoadedTargets); err != nil { return err } if err = reg.Register(httpLoaderDeletedTargets); err != nil { return err } if err = reg.Register(httpLoaderFailedGetRequests); err != nil { return err } if err = reg.Register(httpLoaderGetRequestsTotal); err != nil { return err } if err = reg.Register(httpLoaderGetRequestDuration); err != nil { return err } return nil } ================================================ FILE: pkg/loaders/http_loader/http_loader_test.go ================================================ // © 2025 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package http_loader import ( "context" "errors" "fmt" "io" "log" "net/http" "net/http/httptest" "os" "path/filepath" "slices" "sync" "testing" "time" "gopkg.in/yaml.v2" "github.com/openconfig/gnmic/pkg/actions" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/loaders" ) // fakeAction is a minimal implementation of actions.Action for testing. type fakeAction struct { name string delay time.Duration fail bool } func (f *fakeAction) Init(cfg map[string]interface{}, opts ...actions.Option) error { if v, ok := cfg["name"].(string); ok { f.name = v } return nil } func (f *fakeAction) Run(ctx context.Context, aCtx *actions.Context) (interface{}, error) { if f.delay > 0 { select { case <-time.After(f.delay): case <-ctx.Done(): } } if f.fail { return nil, errors.New("forced failure") } return "ok", nil } func (f *fakeAction) NName() string { return f.name } func (f *fakeAction) WithTargets(map[string]*types.TargetConfig) {} func (f *fakeAction) WithLogger(*log.Logger) {} func newTestLoader(t *testing.T) *httpLoader { t.Helper() return &httpLoader{ cfg: &cfg{Interval: 500 * time.Millisecond}, m: new(sync.RWMutex), lastTargets: make(map[string]*types.TargetConfig), targetConfigFn: func(tc *types.TargetConfig) error { return nil }, logger: log.New(io.Discard, "", 0), } } func TestRunActions_AddAndDelete_NoDeadlock(t *testing.T) { hl := newTestLoader(t) // ensure actions are present to exercise the concurrent paths hl.addActions = []actions.Action{&fakeAction{name: "add1", delay: 10 * time.Millisecond}} hl.delActions = []actions.Action{&fakeAction{name: "del1", delay: 10 * time.Millisecond}} hl.numActions = len(hl.addActions) + len(hl.delActions) tcs := map[string]*types.TargetConfig{ "t-add": {Name: "t-add", Address: "10.0.0.1"}, "t-del": {Name: "t-del", Address: "10.0.0.2"}, } op := &loaders.TargetOperation{ Add: map[string]*types.TargetConfig{ "t-add": tcs["t-add"], }, Del: []string{"t-del"}, } ctx := context.Background() done := make(chan struct{}) var res *loaders.TargetOperation var err error go func() { res, err = hl.runActions(ctx, tcs, op) close(done) }() select { case <-done: if err != nil { t.Fatalf("runActions returned error: %v", err) } if _, ok := res.Add["t-add"]; !ok { t.Fatalf("expected add for 't-add', got: %+v", res.Add) } if !slices.Contains(res.Del, "t-del") { t.Fatalf("expected delete for 't-del', got: %+v", res.Del) } case <-time.After(2 * time.Second): t.Fatal("runActions timed out (possible deadlock)") } } func TestRunActions_ReplaceSameName_NoDeadlock(t *testing.T) { hl := newTestLoader(t) hl.addActions = []actions.Action{&fakeAction{name: "add1", delay: 10 * time.Millisecond}} hl.delActions = []actions.Action{&fakeAction{name: "del1", delay: 10 * time.Millisecond}} hl.numActions = len(hl.addActions) + len(hl.delActions) oldTC := &types.TargetConfig{Name: "t1", Address: "10.0.0.1"} newTC := &types.TargetConfig{Name: "t1", Address: "10.0.0.1"} tcs := map[string]*types.TargetConfig{ "t1": newTC, } op := &loaders.TargetOperation{ Add: map[string]*types.TargetConfig{ "t1": newTC, }, Del: []string{"t1"}, } // seed lastTargets to emulate prior state (not directly used by runActions but mirrors scenario) hl.lastTargets["t1"] = oldTC ctx := context.Background() done := make(chan struct{}) var res *loaders.TargetOperation var err error go func() { res, err = hl.runActions(ctx, tcs, op) close(done) }() select { case <-done: if err != nil { t.Fatalf("runActions returned error: %v", err) } if _, ok := res.Add["t1"]; !ok { t.Fatalf("expected add for 't1', got: %+v", res.Add) } if !slices.Contains(res.Del, "t1") { t.Fatalf("expected delete for 't1', got: %+v", res.Del) } case <-time.After(2 * time.Second): t.Fatal("runActions timed out (possible deadlock)") } } func TestSetDefaults(t *testing.T) { // missing URL hl := newTestLoader(t) hl.cfg.URL = "" if err := hl.setDefaults(); err == nil { t.Fatal("expected error for missing URL") } // valid URL sets default interval/timeout hl = newTestLoader(t) hl.cfg.URL = "http://localhost" hl.cfg.Interval = 0 hl.cfg.Timeout = 0 if err := hl.setDefaults(); err != nil { t.Fatalf("unexpected error: %v", err) } if hl.cfg.Interval <= 0 || hl.cfg.Timeout <= 0 { t.Fatal("expected defaults for interval and timeout to be set") } } func TestReadVars_FromFileAndMerge(t *testing.T) { // create temp vars yaml dir := t.TempDir() varsPath := filepath.Join(dir, "vars.yaml") orig := map[string]interface{}{"a": 1, "b": map[string]interface{}{"x": "y"}} b, _ := yaml.Marshal(orig) if err := os.WriteFile(varsPath, b, 0600); err != nil { t.Fatalf("write vars file: %v", err) } hl := newTestLoader(t) hl.cfg.VarsFile = varsPath hl.cfg.Vars = map[string]interface{}{"b": map[string]interface{}{"x": "z", "k": "v"}, "c": 3} if err := hl.readVars(context.Background()); err != nil { t.Fatalf("readVars error: %v", err) } // merged expectations: b.x overridden to z, b.k added, c added, a kept if fmt.Sprint(hl.vars["a"]) != "1" { t.Fatalf("expected a=1, got %v", hl.vars["a"]) } if m, ok := hl.vars["b"].(map[string]interface{}); !ok || m["x"] != "z" || m["k"] != "v" { t.Fatalf("unexpected b: %#v", hl.vars["b"]) } if fmt.Sprint(hl.vars["c"]) != "3" { t.Fatalf("expected c=3, got %v", hl.vars["c"]) } } func TestGetTargets_JSONAndTemplateAndNilEntries(t *testing.T) { plain := `{"t1": {"name":"t1","address":"1.1.1.1"}, "t2": null}` ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte(plain)) })) defer ts.Close() // no template hl := newTestLoader(t) hl.cfg.URL = ts.URL res, err := hl.getTargets() if err != nil { t.Fatalf("getTargets error: %v", err) } if res["t1"].Name != "t1" || res["t1"].Address != "1.1.1.1" { t.Fatalf("unexpected t1: %#v", res["t1"]) } // t2 is nil in input, should be auto-filled name/address if res["t2"].Name != "t2" || res["t2"].Address != "t2" { t.Fatalf("unexpected t2: %#v", res["t2"]) } // template path: server returns array of objects -> map arr := `[{"n":"a","a":"10.0.0.1"},{"n":"b"}]` ts2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") _, _ = w.Write([]byte(arr)) })) defer ts2.Close() hl2 := newTestLoader(t) hl2.cfg.URL = ts2.URL // static template hl2.cfg.Template = `{"a":{"name":"a","address":"10.0.0.1"},"b":{"name":"b"}}` // initialize template via Init if err := hl2.Init(context.Background(), map[string]interface{}{"url": hl2.cfg.URL, "template": hl2.cfg.Template}, hl2.logger); err != nil { t.Fatalf("init with template: %v", err) } res2, err := hl2.getTargets() if err != nil { t.Fatalf("getTargets with template error: %v", err) } if res2["a"].Name != "a" || res2["a"].Address != "10.0.0.1" { t.Fatalf("unexpected a: %#v", res2["a"]) } // address missing -> should default to name if res2["b"].Name != "b" || res2["b"].Address != "b" { t.Fatalf("unexpected b: %#v", res2["b"]) } } func TestInitializeAction(t *testing.T) { hl := newTestLoader(t) // register a temporary action type orig := actions.Actions["fake"] actions.Actions["fake"] = func() actions.Action { return &fakeAction{} } defer func() { actions.Actions["fake"] = orig }() // success a, err := hl.initializeAction(map[string]interface{}{"type": "fake", "name": "x"}) if err != nil || a == nil { t.Fatalf("expected success, got err=%v action=%v", err, a) } // unknown type a, err = hl.initializeAction(map[string]interface{}{"type": "does-not-exist"}) if err == nil || a != nil { t.Fatalf("expected error for unknown type") } // missing type a, err = hl.initializeAction(map[string]interface{}{}) if err == nil || a != nil { t.Fatalf("expected error for missing type") } } func TestRunOnAddActions_ErrorRemovesTarget(t *testing.T) { hl := newTestLoader(t) hl.addActions = []actions.Action{&fakeAction{name: "bad", fail: true}} hl.numActions = len(hl.addActions) hl.lastTargets["t1"] = &types.TargetConfig{Name: "t1"} ctx := context.Background() // should return error and remove t1 from lastTargets if err := hl.runOnAddActions(ctx, "t1", map[string]*types.TargetConfig{"t1": {Name: "t1"}}); err == nil { t.Fatal("expected error from failing action") } hl.m.RLock() _, exists := hl.lastTargets["t1"] hl.m.RUnlock() if exists { t.Fatal("expected t1 to be removed from lastTargets") } } func TestUpdateTargets_NoChange_NoOp(t *testing.T) { hl := newTestLoader(t) hl.numActions = 0 // two identical targets in lastTargets and tcs t1 := &types.TargetConfig{Name: "t1", Address: "1.1.1.1"} t2 := &types.TargetConfig{Name: "t2", Address: "2.2.2.2"} hl.lastTargets["t1"] = &types.TargetConfig{Name: "t1", Address: "1.1.1.1"} hl.lastTargets["t2"] = &types.TargetConfig{Name: "t2", Address: "2.2.2.2"} called := 0 hl.targetConfigFn = func(tc *types.TargetConfig) error { called++; return nil } ch := make(chan *loaders.TargetOperation, 1) hl.updateTargets(context.Background(), map[string]*types.TargetConfig{"t1": t1, "t2": t2}, ch) select { case op := <-ch: t.Fatalf("unexpected op received: %+v", op) default: // ok, no op expected } if called != 2 { t.Fatalf("expected targetConfigFn to be called twice, got %d", called) } } func TestUpdateTargets_Add(t *testing.T) { hl := newTestLoader(t) hl.numActions = 0 t1 := &types.TargetConfig{Name: "t1", Address: "1.1.1.1"} ch := make(chan *loaders.TargetOperation, 1) hl.updateTargets(context.Background(), map[string]*types.TargetConfig{"t1": t1}, ch) select { case op := <-ch: if len(op.Add) != 1 || len(op.Del) != 0 { t.Fatalf("unexpected op: %+v", op) } if _, ok := hl.lastTargets["t1"]; !ok { t.Fatal("expected t1 to be added to lastTargets") } case <-time.After(time.Second): t.Fatal("timed out waiting for op") } } func TestUpdateTargets_Delete(t *testing.T) { hl := newTestLoader(t) hl.numActions = 0 hl.lastTargets["t1"] = &types.TargetConfig{Name: "t1", Address: "1.1.1.1"} ch := make(chan *loaders.TargetOperation, 1) hl.updateTargets(context.Background(), map[string]*types.TargetConfig{}, ch) select { case op := <-ch: if len(op.Add) != 0 || len(op.Del) != 1 || op.Del[0] != "t1" { t.Fatalf("unexpected op: %+v", op) } if _, ok := hl.lastTargets["t1"]; ok { t.Fatal("expected t1 to be deleted from lastTargets") } case <-time.After(time.Second): t.Fatal("timed out waiting for op") } } func TestUpdateTargets_Change_TriggersDelAndAdd(t *testing.T) { hl := newTestLoader(t) hl.numActions = 0 hl.lastTargets["t1"] = &types.TargetConfig{Name: "t1", Address: "1.1.1.1"} newT1 := &types.TargetConfig{Name: "t1", Address: "1.1.1.2"} ch := make(chan *loaders.TargetOperation, 1) hl.updateTargets(context.Background(), map[string]*types.TargetConfig{"t1": newT1}, ch) select { case op := <-ch: if len(op.Add) != 1 || len(op.Del) != 1 || op.Del[0] != "t1" { t.Fatalf("unexpected op: %+v", op) } if lt, ok := hl.lastTargets["t1"]; !ok || lt.Address != "1.1.1.2" { t.Fatalf("expected lastTargets to have updated address, got: %+v", lt) } case <-time.After(time.Second): t.Fatal("timed out waiting for op") } } func TestUpdateTargets_Change_TriggersRename(t *testing.T) { hl := newTestLoader(t) hl.numActions = 0 hl.lastTargets["t1"] = &types.TargetConfig{Name: "t1", Address: "1.1.1.1"} newT2 := &types.TargetConfig{Name: "t2", Address: "1.1.1.1"} ch := make(chan *loaders.TargetOperation, 1) hl.updateTargets(context.Background(), map[string]*types.TargetConfig{"t2": newT2}, ch) select { case op := <-ch: if len(op.Add) != 1 || len(op.Del) != 1 || op.Del[0] != "t1" { t.Fatalf("unexpected op: %+v", op) } if _, ok := hl.lastTargets["t2"]; !ok { t.Fatal("expected t2 to be added to lastTargets") } case <-time.After(time.Second): t.Fatal("timed out waiting for op") } } ================================================ FILE: pkg/loaders/http_loader/options.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package http_loader import ( "github.com/openconfig/gnmic/pkg/api/types" "github.com/prometheus/client_golang/prometheus" ) func (h *httpLoader) RegisterMetrics(reg *prometheus.Registry) { if !h.cfg.EnableMetrics { return } if reg == nil { h.logger.Printf("ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`") return } if err := registerMetrics(reg); err != nil { h.logger.Printf("failed to register metrics: %v", err) } } func (h *httpLoader) WithActions(acts map[string]map[string]interface{}) { h.actionsConfig = acts } func (h *httpLoader) WithTargetsDefaults(fn func(tc *types.TargetConfig) error) { h.targetConfigFn = fn } ================================================ FILE: pkg/loaders/loaders.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package loaders import ( "context" "log" "maps" "reflect" "github.com/mitchellh/mapstructure" "github.com/openconfig/gnmic/pkg/api/types" "github.com/prometheus/client_golang/prometheus" ) // TargetLoader discovers a set of target configurations for gNMIc to run RPCs against. // RunOnce should return a map of target configs and is meant to be used with Unary RPCs. // Start runs a goroutine in the background that updates added/removed target configs on the // returned channel. type TargetLoader interface { // Init initializes the target loader given the config, logger and options Init(ctx context.Context, cfg map[string]interface{}, l *log.Logger, opts ...Option) error // RunOnce runs the loader only once, returning a map of target configs RunOnce(ctx context.Context) (map[string]*types.TargetConfig, error) // Start starts the target loader, running periodic polls or a long watch. // It returns a channel of TargetOperation from which the function caller can // receive the added/removed target configs Start(context.Context) chan *TargetOperation // RegsiterMetrics registers the loader metrics with the provided registry RegisterMetrics(*prometheus.Registry) // WithActions passes the actions configuration to the target loader WithActions(map[string]map[string]interface{}) // WithTargetsDefaults passes a callback function that sets the target config defaults WithTargetsDefaults(func(tc *types.TargetConfig) error) } type Initializer func() TargetLoader var Loaders = map[string]Initializer{} var LoadersTypes = []string{ "file", "consul", "docker", "http", } func Register(name string, initFn Initializer) { Loaders[name] = initFn } type TargetOperation struct { Add map[string]*types.TargetConfig Del []string } func DecodeConfig(src, dst interface{}) error { decoder, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), Result: dst, }, ) if err != nil { return err } return decoder.Decode(src) } func Diff(currentMap, newMap map[string]*types.TargetConfig) *TargetOperation { result := &TargetOperation{ Add: make(map[string]*types.TargetConfig, 0), Del: make([]string, 0), } // handle removed and added targets if len(currentMap) == 0 { maps.Copy(result.Add, newMap) return result } if len(newMap) == 0 { for name := range currentMap { result.Del = append(result.Del, name) } return result } for n, t := range newMap { if _, ok := currentMap[n]; !ok { result.Add[n] = t } } for n := range currentMap { if _, ok := newMap[n]; !ok { result.Del = append(result.Del, n) } } // handle changes for n, currentVal := range currentMap { newVal, ok := newMap[n] // we don't have the target in the new config, // already handled above if !ok { continue } // if any target parameter changes, we need to remove // and re-add // the only case I see where we wouldn't necessarily need to restart the actual GRPC connection // is if Tags and EventTags changed, we could just apply the new tags internally (but right now it's done in the StartCollector phase) if !reflect.DeepEqual(currentVal, newVal) { result.Add[n] = newVal result.Del = append(result.Del, n) } } return result } ================================================ FILE: pkg/loaders/loaders_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package loaders import ( "testing" "github.com/google/go-cmp/cmp" "github.com/openconfig/gnmic/pkg/api/types" ) var testSet = map[string]struct { m1, m2 map[string]*types.TargetConfig output *TargetOperation }{ "t1": { m1: nil, m2: nil, output: &TargetOperation{ Add: make(map[string]*types.TargetConfig, 0), Del: make([]string, 0), }, }, "t2": { m1: nil, m2: map[string]*types.TargetConfig{ "target1": {Name: "target1"}, }, output: &TargetOperation{ Add: map[string]*types.TargetConfig{ "target1": { Name: "target1", }, }, Del: make([]string, 0), }, }, "t3": { m1: map[string]*types.TargetConfig{ "target1": {Name: "target1"}, }, m2: map[string]*types.TargetConfig{ "target1": {Name: "target1"}, }, output: &TargetOperation{ Add: make(map[string]*types.TargetConfig, 0), Del: make([]string, 0), }, }, "t4": { m1: map[string]*types.TargetConfig{ "target1": {Name: "target1"}, "target2": {Name: "target2"}, }, m2: map[string]*types.TargetConfig{ "target1": {Name: "target1"}, "target2": {Name: "target2"}, }, output: &TargetOperation{ Add: make(map[string]*types.TargetConfig, 0), Del: make([]string, 0), }, }, "t5": { m1: map[string]*types.TargetConfig{ "target1": {Name: "target1"}, }, m2: nil, output: &TargetOperation{ Add: make(map[string]*types.TargetConfig, 0), Del: []string{"target1"}, }, }, "t6": { m1: map[string]*types.TargetConfig{ "target1": {Name: "target1"}, }, m2: map[string]*types.TargetConfig{ "target1": {Name: "target1"}, "target2": {Name: "target2"}, }, output: &TargetOperation{ Add: map[string]*types.TargetConfig{ "target2": { Name: "target2", }, }, Del: make([]string, 0), }, }, "t7": { m1: map[string]*types.TargetConfig{ "target1": {Name: "target1"}, }, m2: map[string]*types.TargetConfig{ "target2": {Name: "target2"}, }, output: &TargetOperation{ Add: map[string]*types.TargetConfig{ "target2": { Name: "target2", }, }, Del: []string{"target1"}, }, }, "t8": { m1: map[string]*types.TargetConfig{ "target1": {Name: "target1"}, }, m2: map[string]*types.TargetConfig{ "target2": {Name: "target2"}, "target3": {Name: "target3"}, }, output: &TargetOperation{ Add: map[string]*types.TargetConfig{ "target2": { Name: "target2", }, "target3": { Name: "target3", }, }, Del: []string{"target1"}, }, }, "t9": { m1: map[string]*types.TargetConfig{ "target1": {Name: "target1"}, "target2": {Name: "target2"}, }, m2: map[string]*types.TargetConfig{ "target2": {Name: "target2"}, "target3": {Name: "target3"}, }, output: &TargetOperation{ Add: map[string]*types.TargetConfig{ "target3": { Name: "target3", }, }, Del: []string{"target1"}, }, }, "t10-target-change": { m1: map[string]*types.TargetConfig{ "target1": {Address: "ip1"}, "target2": {Address: "ip2"}, }, m2: map[string]*types.TargetConfig{ "target1": {Address: "ip1"}, "target2": {Address: "ip2new"}, }, output: &TargetOperation{ Add: map[string]*types.TargetConfig{ "target2": { Address: "ip2new", }, }, Del: []string{"target2"}, }, }, "t11-tags-change": { m1: map[string]*types.TargetConfig{ "target1": {Name: "target1", Tags: []string{"a"}}, }, m2: map[string]*types.TargetConfig{ "target1": {Name: "target1", Tags: []string{"a", "b"}}, }, output: &TargetOperation{ Add: map[string]*types.TargetConfig{ "target1": {Name: "target1", Tags: []string{"a", "b"}}, }, Del: []string{"target1"}, }, }, "t12-both-empty": { m1: map[string]*types.TargetConfig{}, m2: map[string]*types.TargetConfig{}, output: &TargetOperation{ Add: make(map[string]*types.TargetConfig, 0), Del: make([]string, 0), }, }, "t13-slice-order-change": { m1: map[string]*types.TargetConfig{ "target1": {Name: "target1", Tags: []string{"a", "b"}}, }, m2: map[string]*types.TargetConfig{ "target1": {Name: "target1", Tags: []string{"b", "a"}}, }, output: &TargetOperation{ Add: map[string]*types.TargetConfig{ "target1": {Name: "target1", Tags: []string{"b", "a"}}, }, Del: []string{"target1"}, }, }, } func TestGetInstancesTagsMatches(t *testing.T) { for name, item := range testSet { t.Run(name, func(t *testing.T) { res := Diff(item.m1, item.m2) t.Logf("exp value: %+v", item.output) t.Logf("got value: %+v", res) if len(item.output.Add) != len(res.Add) { t.Fail() } if len(item.output.Del) != len(res.Del) { t.Fail() } for k, v1 := range item.output.Add { if v2, ok := res.Add[k]; ok { if v1.String() != v2.String() { t.Fail() } } else { t.Fail() } } if !cmp.Equal(item.output.Del, res.Del) { t.Fail() } }) } } ================================================ FILE: pkg/loaders/option.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package loaders import ( "github.com/openconfig/gnmic/pkg/api/types" "github.com/prometheus/client_golang/prometheus" ) type Option func(TargetLoader) func WithRegistry(reg *prometheus.Registry) Option { return func(l TargetLoader) { if reg == nil { return } l.RegisterMetrics(reg) } } func WithActions(acts map[string]map[string]interface{}) Option { return func(l TargetLoader) { if len(acts) == 0 { return } l.WithActions(acts) } } func WithTargetsDefaults(fn func(tc *types.TargetConfig) error) Option { return func(l TargetLoader) { l.WithTargetsDefaults(fn) } } ================================================ FILE: pkg/lockers/all/all.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package all import ( _ "github.com/openconfig/gnmic/pkg/lockers/consul_locker" _ "github.com/openconfig/gnmic/pkg/lockers/k8s_locker" _ "github.com/openconfig/gnmic/pkg/lockers/redis_locker" ) ================================================ FILE: pkg/lockers/consul_locker/consul_locker.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package consul_locker import ( "context" "encoding/json" "fmt" "io" "log" "sync" "time" "github.com/hashicorp/consul/api" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/lockers" ) const ( defaultSessionTTL = 10 * time.Second defaultRetryTimer = 2 * time.Second defaultDelay = 5 * time.Second loggingPrefix = "[consul_locker] " ) func init() { lockers.Register("consul", func() lockers.Locker { return &ConsulLocker{ Cfg: &config{}, m: new(sync.Mutex), acquiredlocks: make(map[string]*locks), attemptinglocks: make(map[string]*locks), logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), services: make(map[string]context.CancelFunc), } }) } type ConsulLocker struct { Cfg *config client *api.Client logger *log.Logger m *sync.Mutex acquiredlocks map[string]*locks attemptinglocks map[string]*locks services map[string]context.CancelFunc } type config struct { Address string `mapstructure:"address,omitempty" json:"address,omitempty"` Datacenter string `mapstructure:"datacenter,omitempty" json:"datacenter,omitempty"` Username string `mapstructure:"username,omitempty" json:"username,omitempty"` Password string `mapstructure:"password,omitempty" json:"password,omitempty"` Token string `mapstructure:"token,omitempty" json:"token,omitempty"` SessionTTL time.Duration `mapstructure:"session-ttl,omitempty" json:"session-ttl,omitempty"` Delay time.Duration `mapstructure:"delay,omitempty" json:"delay,omitempty"` RetryTimer time.Duration `mapstructure:"retry-timer,omitempty" json:"retry-timer,omitempty"` RenewPeriod time.Duration `mapstructure:"renew-period,omitempty" json:"renew-period,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` } type locks struct { sessionID string doneChan chan struct{} } func (c *ConsulLocker) Init(ctx context.Context, cfg map[string]interface{}, opts ...lockers.Option) error { err := lockers.DecodeConfig(cfg, c.Cfg) if err != nil { return err } for _, opt := range opts { opt(c) } err = c.setDefaults() if err != nil { return err } clientConfig := &api.Config{ Address: c.Cfg.Address, Scheme: "http", Datacenter: c.Cfg.Datacenter, Token: c.Cfg.Token, } if c.Cfg.Username != "" && c.Cfg.Password != "" { clientConfig.HttpAuth = &api.HttpBasicAuth{ Username: c.Cfg.Username, Password: c.Cfg.Password, } } c.client, err = api.NewClient(clientConfig) if err != nil { return err } b, _ := json.Marshal(c.Cfg) c.logger.Printf("initialized consul locker with cfg=%s", string(b)) return nil } func (c *ConsulLocker) Lock(ctx context.Context, key string, val []byte) (bool, error) { var err error var acquired bool writeOpts := new(api.WriteOptions) writeOpts = writeOpts.WithContext(ctx) kvPair := &api.KVPair{Key: key, Value: val} doneChan := make(chan struct{}) defer func() { c.m.Lock() defer c.m.Unlock() delete(c.attemptinglocks, key) }() for { select { case <-ctx.Done(): return false, ctx.Err() case <-doneChan: return false, lockers.ErrCanceled default: acquired = false kvPair.Session, _, err = c.client.Session().Create( &api.SessionEntry{ Behavior: "delete", TTL: c.Cfg.SessionTTL.String(), LockDelay: c.Cfg.Delay, }, writeOpts, ) if err != nil { c.logger.Printf("failed creating session: %v", err) time.Sleep(c.Cfg.RetryTimer) continue } c.m.Lock() c.attemptinglocks[key] = &locks{sessionID: kvPair.Session, doneChan: doneChan} c.m.Unlock() acquired, _, err = c.client.KV().Acquire(kvPair, writeOpts) if err != nil { c.logger.Printf("failed acquiring lock to %q: %v", kvPair.Key, err) time.Sleep(c.Cfg.RetryTimer) continue } if acquired { c.m.Lock() c.acquiredlocks[key] = &locks{sessionID: kvPair.Session, doneChan: doneChan} c.m.Unlock() return true, nil } if c.Cfg.Debug { c.logger.Printf("failed acquiring lock to %q: already locked", kvPair.Key) } time.Sleep(c.Cfg.RetryTimer) } } } func (c *ConsulLocker) KeepLock(ctx context.Context, key string) (chan struct{}, chan error) { writeOpts := new(api.WriteOptions) writeOpts = writeOpts.WithContext(ctx) c.m.Lock() sessionID := "" doneChan := make(chan struct{}) if l, ok := c.acquiredlocks[key]; ok { sessionID = l.sessionID doneChan = l.doneChan } c.m.Unlock() errChan := make(chan error) go func() { if sessionID == "" { errChan <- fmt.Errorf("unknown key") close(doneChan) return } err := c.client.Session().RenewPeriodic(c.Cfg.RenewPeriod.String(), sessionID, writeOpts, doneChan) if err != nil { errChan <- err } }() return doneChan, errChan } func (c *ConsulLocker) Unlock(ctx context.Context, key string) error { c.m.Lock() defer c.m.Unlock() if lock, ok := c.acquiredlocks[key]; ok { close(lock.doneChan) wrOpts := new(api.WriteOptions) _, err := c.client.KV().Delete(key, wrOpts.WithContext(ctx)) if err != nil { return err } _, err = c.client.Session().Destroy(lock.sessionID, nil) if err != nil { return err } delete(c.acquiredlocks, key) return nil } if lock, ok := c.attemptinglocks[key]; ok { close(lock.doneChan) _, err := c.client.Session().Destroy(lock.sessionID, nil) if err != nil { return err } delete(c.acquiredlocks, key) return nil } return fmt.Errorf("unlock failed: unknown key %q", key) } func (c *ConsulLocker) Stop() error { c.m.Lock() defer c.m.Unlock() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() for k := range c.acquiredlocks { c.Unlock(ctx, k) } return nil } func (c *ConsulLocker) SetLogger(logger *log.Logger) { if logger != nil && c.logger != nil { c.logger.SetOutput(logger.Writer()) c.logger.SetFlags(logger.Flags()) } } // helpers func (c *ConsulLocker) setDefaults() error { if c.Cfg.SessionTTL <= 0 { c.Cfg.SessionTTL = defaultSessionTTL } if c.Cfg.RetryTimer <= 0 { c.Cfg.RetryTimer = defaultRetryTimer } if c.Cfg.RenewPeriod <= 0 || c.Cfg.RenewPeriod >= c.Cfg.SessionTTL { c.Cfg.RenewPeriod = c.Cfg.SessionTTL / 2 } if c.Cfg.Delay < 0 { c.Cfg.Delay = defaultDelay } if c.Cfg.Delay > 60*time.Second { c.Cfg.Delay = 60 * time.Second } return nil } func (c *ConsulLocker) String() string { b, err := json.Marshal(c.Cfg) if err != nil { return "" } return string(b) } ================================================ FILE: pkg/lockers/consul_locker/consul_registration.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package consul_locker import ( "context" "net" "strconv" "time" "github.com/hashicorp/consul/api" "github.com/openconfig/gnmic/pkg/lockers" ) const defaultWatchTimeout = 1 * time.Minute func (c *ConsulLocker) Register(ctx context.Context, s *lockers.ServiceRegistration) error { service := &api.AgentServiceRegistration{ ID: s.ID, Name: s.Name, Address: s.Address, Port: s.Port, Tags: s.Tags, Checks: api.AgentServiceChecks{ { TTL: s.TTL.String(), DeregisterCriticalServiceAfter: "5s", }, }, } sctx, cancel := context.WithCancel(ctx) c.m.Lock() c.services[s.ID] = cancel c.m.Unlock() ttlCheckID := "service:" + s.ID err := c.client.Agent().ServiceRegister(service) if err != nil { return err } // keep service with ttl err = c.client.Agent().UpdateTTL(ttlCheckID, "", api.HealthPassing) if err != nil { return err } ticker := time.NewTicker(s.TTL / 2) defer ticker.Stop() for { select { case <-ticker.C: err = c.client.Agent().UpdateTTL(ttlCheckID, "", api.HealthPassing) if err != nil { return err } case <-sctx.Done(): err = c.client.Agent().UpdateTTL(ttlCheckID, sctx.Err().Error(), api.HealthCritical) if err != nil { return err } return nil } } } func (c *ConsulLocker) Deregister(s string) error { c.m.Lock() if cfn, ok := c.services[s]; ok { cfn() } c.m.Unlock() return c.client.Agent().ServiceDeregister(s) } func (c *ConsulLocker) WatchServices(ctx context.Context, serviceName string, tags []string, sChan chan<- []*lockers.Service, watchTimeout time.Duration) error { if watchTimeout <= 0 { watchTimeout = defaultWatchTimeout } var index uint64 qOpts := &api.QueryOptions{ WaitIndex: index, WaitTime: watchTimeout, } var err error // long blocking watch for { select { case <-ctx.Done(): return ctx.Err() default: if c.Cfg.Debug { c.logger.Printf("(re)starting watch service=%q, index=%d", serviceName, qOpts.WaitIndex) } index, err = c.watch(ctx, qOpts, serviceName, tags, sChan) if err != nil { c.logger.Printf("service %q watch failed: %v", serviceName, err) } if index == 1 { qOpts.WaitIndex = index time.Sleep(2 * time.Second) continue } if index > qOpts.WaitIndex { qOpts.WaitIndex = index } // reset WaitIndex if the returned index decreases // https://www.consul.io/api-docs/features/blocking#implementation-details if index < qOpts.WaitIndex { qOpts.WaitIndex = 0 } } } } func (c *ConsulLocker) watch(ctx context.Context, qOpts *api.QueryOptions, serviceName string, tags []string, sChan chan<- []*lockers.Service) (uint64, error) { qOpts = qOpts.WithContext(ctx) se, meta, err := c.client.Health().ServiceMultipleTags(serviceName, tags, true, qOpts) if err != nil { return 0, err } if meta == nil { meta = new(api.QueryMeta) } if meta.LastIndex == qOpts.WaitIndex { c.logger.Printf("service=%q did not change, lastIndex=%d", serviceName, meta.LastIndex) return meta.LastIndex, nil } if len(se) == 0 { return 1, nil } newSrvs := make([]*lockers.Service, 0) for _, srv := range se { addr := srv.Service.Address if addr == "" { addr = srv.Node.Address } newSrvs = append(newSrvs, &lockers.Service{ ID: srv.Service.ID, Address: net.JoinHostPort(addr, strconv.Itoa(srv.Service.Port)), Tags: srv.Service.Tags, }) } select { case <-ctx.Done(): return 0, ctx.Err() case sChan <- newSrvs: } return meta.LastIndex, nil } func (c *ConsulLocker) GetServices(ctx context.Context, serviceName string, tags []string) ([]*lockers.Service, error) { se, _, err := c.client.Health().ServiceMultipleTags(serviceName, tags, true, &api.QueryOptions{}) if err != nil { return nil, err } newSrvs := make([]*lockers.Service, 0) for _, srv := range se { addr := srv.Service.Address if addr == "" { addr = srv.Node.Address } newSrvs = append(newSrvs, &lockers.Service{ ID: srv.Service.ID, Address: net.JoinHostPort(addr, strconv.Itoa(srv.Service.Port)), Tags: srv.Service.Tags, }) } return newSrvs, nil } func (c *ConsulLocker) IsLocked(ctx context.Context, k string) (bool, error) { qOpts := &api.QueryOptions{} kv, _, err := c.client.KV().Get(k, qOpts.WithContext(ctx)) if err != nil { return false, err } if kv == nil { return false, nil } return kv.LockIndex > 0, nil } func (c *ConsulLocker) List(ctx context.Context, prefix string) (map[string]string, error) { qOpts := &api.QueryOptions{} kvs, _, err := c.client.KV().List(prefix, qOpts.WithContext(ctx)) if err != nil { return nil, err } if kvs == nil { return nil, err } rs := make(map[string]string) for _, kv := range kvs { rs[kv.Key] = string(kv.Value) } return rs, nil } ================================================ FILE: pkg/lockers/k8s_locker/k8s_locker.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package k8s_locker import ( "context" "encoding/json" "fmt" "io" "log" "os" "strings" "sync" "time" coordinationv1 "k8s.io/api/coordination/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/utils/ptr" "github.com/google/uuid" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/lockers" ) const ( defaultLeaseDuration = 10 * time.Second defaultRetryTimer = 2 * time.Second loggingPrefix = "[k8s_locker] " defaultNamespace = "default" origKeyName = "original-key" ) func init() { lockers.Register("k8s", func() lockers.Locker { return &k8sLocker{ Cfg: &config{}, m: new(sync.RWMutex), acquiredlocks: make(map[string]*lock), attemptinglocks: make(map[string]*lock), logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), } }) } type k8sLocker struct { Cfg *config clientset *kubernetes.Clientset logger *log.Logger m *sync.RWMutex acquiredlocks map[string]*lock attemptinglocks map[string]*lock identity string // hostname } type config struct { Namespace string `mapstructure:"namespace,omitempty" json:"namespace,omitempty"` LeaseDuration time.Duration `mapstructure:"lease-duration,omitempty" json:"lease-duration,omitempty"` RenewPeriod time.Duration `mapstructure:"renew-period,omitempty" json:"renew-period,omitempty"` RetryTimer time.Duration `mapstructure:"retry-timer,omitempty" json:"retry-timer,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` } type lock struct { lease *coordinationv1.Lease doneChan chan struct{} } func (k *k8sLocker) Init(ctx context.Context, cfg map[string]interface{}, opts ...lockers.Option) error { err := lockers.DecodeConfig(cfg, k.Cfg) if err != nil { return err } for _, opt := range opts { opt(k) } err = k.setDefaults() if err != nil { return err } inClusterConfig, err := rest.InClusterConfig() if err != nil { return err } k.clientset, err = kubernetes.NewForConfig(inClusterConfig) if err != nil { return err } k.identity = k.getIdentity() return nil } func (k *k8sLocker) Lock(ctx context.Context, key string, val []byte) (bool, error) { nkey := strings.ReplaceAll(key, "/", "-") doneChan := make(chan struct{}) l := &coordinationv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ origKeyName: key, }, Name: nkey, Namespace: k.Cfg.Namespace, Labels: map[string]string{ "app": "gnmic", nkey: string(val), }, }, Spec: coordinationv1.LeaseSpec{ HolderIdentity: ptr.To(k.identity), LeaseDurationSeconds: ptr.To(int32(k.Cfg.LeaseDuration / time.Second)), }, } k.m.Lock() k.attemptinglocks[nkey] = &lock{ lease: l, doneChan: doneChan, } k.m.Unlock() // cleanup when done defer func() { k.m.Lock() defer k.m.Unlock() delete(k.attemptinglocks, nkey) }() for { select { case <-ctx.Done(): return false, ctx.Err() case <-doneChan: return false, lockers.ErrCanceled default: now := metav1.NowMicro() var ol *coordinationv1.Lease var err error // get or create ol, err = k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Get(ctx, nkey, metav1.GetOptions{}) if err != nil { if !errors.IsNotFound(err) { return false, err } // create lease k.logger.Printf("lease %q not found, creating it: %+v", nkey, l.String()) l.Spec.AcquireTime = &now l.Spec.RenewTime = &now ol, err = k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Create(ctx, l, metav1.CreateOptions{}) if err != nil { return false, err } k.m.Lock() k.acquiredlocks[nkey] = &lock{ lease: ol, doneChan: doneChan, } k.m.Unlock() return true, nil } // obtained, compare if ol != nil && ol.Spec.HolderIdentity != nil && *ol.Spec.HolderIdentity != "" { if k.Cfg.Debug { k.logger.Printf("%q held by other instance: %v", ol.Name, *ol.Spec.HolderIdentity != k.identity) k.logger.Printf("%q lease has renewTime: %v", ol.Name, ol.Spec.RenewTime != nil) } if *ol.Spec.HolderIdentity != k.identity && ol.Spec.RenewTime != nil { expectedRenewTime := ol.Spec.RenewTime.Add(time.Duration(*ol.Spec.LeaseDurationSeconds) * time.Second) if k.Cfg.Debug { k.logger.Printf("%q existing lease renew time %v", ol.Name, ol.Spec.RenewTime) k.logger.Printf("%q expected lease renew time %v", ol.Name, expectedRenewTime) k.logger.Printf("%q renew time passed: %v", ol.Name, expectedRenewTime.Before(now.Time)) } if !expectedRenewTime.Before(now.Time) { if k.Cfg.Debug { k.logger.Printf("%q is currently held by %s", ol.Name, *ol.Spec.HolderIdentity) } time.Sleep(k.Cfg.RenewPeriod) continue } } } k.logger.Printf("taking over lease %q", nkey) // update the lease now = metav1.NowMicro() l.Spec.AcquireTime = &now l.Spec.RenewTime = &now // set resource version to the latest value known l.SetResourceVersion(ol.GetResourceVersion()) k.logger.Printf("%q updating with %+v", l.Name, l) ol, err = k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Update(ctx, l, metav1.UpdateOptions{}) if err != nil { return false, err } k.m.Lock() if lc, ok := k.acquiredlocks[nkey]; ok { lc.lease = ol } else { k.acquiredlocks[nkey] = &lock{lease: ol, doneChan: doneChan} } k.m.Unlock() return true, nil } } } func (k *k8sLocker) KeepLock(ctx context.Context, key string) (chan struct{}, chan error) { doneChan := make(chan struct{}) errChan := make(chan error) nkey := strings.ReplaceAll(key, "/", "-") go func() { defer close(doneChan) ticker := time.NewTicker(k.Cfg.RenewPeriod) for { select { case <-ctx.Done(): errChan <- ctx.Err() return case <-doneChan: return case <-ticker.C: k.m.RLock() lock, ok := k.acquiredlocks[nkey] k.m.RUnlock() if !ok { errChan <- fmt.Errorf("unable to maintain lock %q: not found in acquiredlocks", nkey) return } ol, err := k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Get(ctx, nkey, metav1.GetOptions{}) if err != nil { errChan <- fmt.Errorf("unable to maintain lock %q: %v", nkey, err) return } lock.lease.SetResourceVersion(ol.GetResourceVersion()) switch k.compareLeases(lock.lease, ol) { case 0, 1: now := metav1.NowMicro() lock.lease.Spec.AcquireTime = &now lock.lease.Spec.RenewTime = &now ol, err = k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Update(ctx, lock.lease, metav1.UpdateOptions{}) if err != nil { errChan <- fmt.Errorf("unable to update lock %q: %v", nkey, err) return } k.m.Lock() if lock, ok := k.acquiredlocks[nkey]; ok { lock.lease = ol } k.m.Unlock() case -1: errChan <- fmt.Errorf("%q failed to keep lease", nkey) return } } } }() return doneChan, errChan } func (k *k8sLocker) Unlock(ctx context.Context, key string) error { nkey := strings.ReplaceAll(key, "/", "-") k.m.Lock() defer k.m.Unlock() k.unlock(ctx, nkey) return nil } // assumes the mutex is locked func (k *k8sLocker) unlock(ctx context.Context, key string) error { if lock, ok := k.acquiredlocks[key]; ok { delete(k.acquiredlocks, key) return k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Delete(ctx, lock.lease.Name, metav1.DeleteOptions{}) } if lock, ok := k.attemptinglocks[key]; ok { delete(k.attemptinglocks, key) close(lock.doneChan) return k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Delete(ctx, lock.lease.Name, metav1.DeleteOptions{}) } return nil } func (k *k8sLocker) Stop() error { k.m.Lock() defer k.m.Unlock() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() for key := range k.acquiredlocks { k.unlock(ctx, key) } return nil } func (k *k8sLocker) SetLogger(logger *log.Logger) { if logger != nil && k.logger != nil { k.logger.SetOutput(logger.Writer()) k.logger.SetFlags(logger.Flags()) } } // helpers func (k *k8sLocker) setDefaults() error { if k.Cfg.Namespace == "" { k.Cfg.Namespace = defaultNamespace } if k.Cfg.LeaseDuration <= 0 { k.Cfg.LeaseDuration = defaultLeaseDuration } if k.Cfg.RenewPeriod <= 0 || k.Cfg.RenewPeriod >= k.Cfg.LeaseDuration { k.Cfg.RenewPeriod = k.Cfg.LeaseDuration / 2 } if k.Cfg.RetryTimer <= 0 { k.Cfg.RetryTimer = defaultRetryTimer } return nil } func (k *k8sLocker) String() string { b, err := json.Marshal(k.Cfg) if err != nil { return "" } return string(b) } // compares 2 Leases, assume l1 is not nil and has a valid holderIdentity value. // returns 0 if l1 and l2 have the same holder identity // return 1 if l2 is nil, has no holder or has an expired renewTime // returns -1 if l2 has another holder identity and has a valid renewTime func (l *k8sLocker) compareLeases(l1, l2 *coordinationv1.Lease) int { if l2 == nil { return 1 } if l2.Spec.HolderIdentity == nil { return 1 } now := time.Now() if *l2.Spec.HolderIdentity == "" { return 1 } if *l1.Spec.HolderIdentity != *l2.Spec.HolderIdentity { if l2.Spec.RenewTime == nil { return 1 } expectedRenewTime := l2.Spec.RenewTime.Add(time.Duration(*l2.Spec.LeaseDurationSeconds) * time.Second) if expectedRenewTime.Before(now) { return 1 } else { return -1 } } return 0 } func (l *k8sLocker) getIdentity() string { name, err := os.Hostname() if err != nil { return uuid.NewString() } return name } ================================================ FILE: pkg/lockers/k8s_locker/k8s_registration.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package k8s_locker import ( "context" "fmt" "strings" "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/watch" "github.com/openconfig/gnmic/pkg/lockers" ) const defaultWatchTimeout = 10 * time.Second func (k *k8sLocker) Register(ctx context.Context, s *lockers.ServiceRegistration) error { return nil } func (k *k8sLocker) Deregister(s string) error { return nil } func (k *k8sLocker) WatchServices(ctx context.Context, serviceName string, tags []string, sChan chan<- []*lockers.Service, watchTimeout time.Duration) error { if watchTimeout <= 0 { watchTimeout = defaultWatchTimeout } resourceVersion := "" var err error for { select { case <-ctx.Done(): return ctx.Err() default: resourceVersion, err = k.watch(ctx, serviceName, tags, sChan, watchTimeout, resourceVersion) if err != nil { k.logger.Printf("watch ended with error: %s", err) time.Sleep(k.Cfg.RetryTimer) } else if k.Cfg.Debug { k.logger.Print("watch timed out") } } } } func (k *k8sLocker) watch(ctx context.Context, serviceName string, _ []string, sChan chan<- []*lockers.Service, watchTimeout time.Duration, resourceVersion string) (string, error) { timeoutSeconds := int64(watchTimeout.Seconds()) listopts := metav1.ListOptions{ FieldSelector: fields.OneTermEqualSelector(metav1.ObjectNameField, serviceName).String(), ResourceVersion: resourceVersion, TimeoutSeconds: &timeoutSeconds, } if k.Cfg.Debug { if resourceVersion == "" { k.logger.Print("starting watch beginning with unspecified resource version") } else { k.logger.Printf("starting watch beginning with resource version %s", resourceVersion) } } watched, err := k.clientset.CoreV1().Endpoints(k.Cfg.Namespace).Watch(ctx, listopts) if err != nil { return "", err } defer watched.Stop() watchChan := watched.ResultChan() for { select { case <-ctx.Done(): return "", ctx.Err() case event := <-watchChan: switch event.Type { case watch.Modified, watch.Added: endpoints, ok := event.Object.(*corev1.Endpoints) if !ok { // this ought not to happen, but we should probably // start from scratch next time in case it does return "", fmt.Errorf("error converting watch result to an endpoint") } resourceVersion = endpoints.ResourceVersion if k.Cfg.Debug { k.logger.Printf("received watch event %s for resource version %s", event.Type, resourceVersion) } svcs, err := parseEndpoint(endpoints) if err != nil { return "", err } sChan <- svcs case "": // reached the timeout. return the version we last saw so // we can resume watching return resourceVersion, nil default: // something else happened, including maybe the object we // were watching being deleted. we'll need to start the // next watch from scratch, so don't return the resource // version return "", fmt.Errorf("unexpected watch event: %s", event.Type) } } } } func parseEndpoint(endpoint *corev1.Endpoints) ([]*lockers.Service, error) { // the service should only have a single port number assigned, so // all subsets should have the port number we're looking for if len(endpoint.Subsets) <= 0 { return nil, fmt.Errorf("no subsets found in endpoint for service %s", endpoint.Name) } if len(endpoint.Subsets[0].Ports) <= 0 { return nil, fmt.Errorf("no ports found for service %s", endpoint.Name) } port := endpoint.Subsets[0].Ports[0].Port services := make([]*lockers.Service, 0, len(endpoint.Subsets[0].Addresses)) for _, subset := range endpoint.Subsets { for _, addr := range subset.Addresses { targetName := addr.IP if addr.TargetRef != nil { targetName = addr.TargetRef.Name } ls := &lockers.Service{ ID: fmt.Sprintf("%s-api", targetName), Address: fmt.Sprintf("%s:%d", addr.IP, port), Tags: []string{ fmt.Sprintf("instance-name=%s", targetName), }, } services = append(services, ls) } } return services, nil } func (k *k8sLocker) GetServices(ctx context.Context, serviceName string, tags []string) ([]*lockers.Service, error) { ep, err := k.clientset.CoreV1().Endpoints(k.Cfg.Namespace).Get(ctx, serviceName, metav1.GetOptions{}) if err != nil { return nil, err } return parseEndpoint(ep) } func (k *k8sLocker) IsLocked(ctx context.Context, key string) (bool, error) { key = strings.ReplaceAll(key, "/", "-") ol, err := k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).Get(ctx, key, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return false, nil } return false, err } if ol == nil { return false, nil } if ol.Spec.RenewTime == nil { return false, nil } now := metav1.NowMicro() expectedRenewTime := ol.Spec.RenewTime.Add(time.Duration(*ol.Spec.LeaseDurationSeconds) * time.Second) return expectedRenewTime.After(now.Time), nil } func (k *k8sLocker) List(ctx context.Context, prefix string) (map[string]string, error) { ll, err := k.clientset.CoordinationV1().Leases(k.Cfg.Namespace).List(ctx, metav1.ListOptions{ LabelSelector: "app=gnmic", }) if err != nil { return nil, err } prefix = strings.ReplaceAll(prefix, "/", "-") rs := make(map[string]string, len(ll.Items)) for _, l := range ll.Items { for key, v := range l.Labels { if key == "app" { continue } if strings.HasPrefix(key, prefix) { okey, ok := l.Annotations[origKeyName] if ok { rs[okey] = v continue } } } } return rs, nil } ================================================ FILE: pkg/lockers/locker.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package lockers import ( "context" "errors" "log" "time" "github.com/mitchellh/mapstructure" ) var ErrCanceled = errors.New("canceled") type Locker interface { // Init initialises the locker data, with the given configuration read from flags/files. Init(context.Context, map[string]any, ...Option) error // Stop is called when the locker instance is called. It should unlock all acquired locks. Stop() error SetLogger(*log.Logger) // This is the Target locking logic. // Lock acquires a lock on given key. Lock(context.Context, string, []byte) (bool, error) // KeepLock maintains the lock on the target. KeepLock(context.Context, string) (chan struct{}, chan error) // IsLocked replys if the target given as string is currently locked or not. IsLocked(context.Context, string) (bool, error) // Unlock unlocks the target log. Unlock(context.Context, string) error // This is the instance registration logic. // Register registers this instance in the registry. It must also maintain the registration (called in a goroutine from the main). ServiceRegistration.ID contains the ID of the service to register. Register(context.Context, *ServiceRegistration) error // Deregister removes this instance from the registry. This looks like it's not called. Deregister(string) error // GetServices must return the gnmic instances. GetServices(ctx context.Context, serviceName string, tags []string) ([]*Service, error) // WatchServices must push all existing discovered gnmic instances // into the provided channel. WatchServices(ctx context.Context, serviceName string, tags []string, ch chan<- []*Service, dur time.Duration) error // Mixed registration/target lock functions // List returns all locks that start with prefix string, // indexed by the lock name. Could be target locks or leader lock. It must return a map of matching keys to instance name. List(ctx context.Context, prefix string) (map[string]string, error) } type Initializer func() Locker var Lockers = map[string]Initializer{} type Option func(Locker) func WithLogger(logger *log.Logger) Option { return func(i Locker) { i.SetLogger(logger) } } var LockerTypes = []string{ "consul", "k8s", "redis", } func Register(name string, initFn Initializer) { Lockers[name] = initFn } func DecodeConfig(src, dst interface{}) error { decoder, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), Result: dst, }, ) if err != nil { return err } return decoder.Decode(src) } type ServiceRegistration struct { ID string Name string Address string Port int Tags []string TTL time.Duration } type Service struct { ID string Address string Tags []string } ================================================ FILE: pkg/lockers/redis_locker/redis_locker.go ================================================ package redis_locker import ( "context" "crypto/rand" "encoding/base64" "encoding/json" "fmt" "io" "log" "sync" "time" "github.com/go-redsync/redsync/v4" "github.com/go-redsync/redsync/v4/redis/goredis/v9" goredislib "github.com/redis/go-redis/v9" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/lockers" ) const ( defaultLeaseDuration = 10 * time.Second defaultRetryTimer = 2 * time.Second defaultPollTimer = 10 * time.Second loggingPrefix = "[redis_locker] " ) func init() { lockers.Register("redis", func() lockers.Locker { return &redisLocker{ Cfg: &config{}, m: new(sync.RWMutex), acquiredLocks: make(map[string]*redsync.Mutex), attemptingLocks: make(map[string]*redsync.Mutex), registerLock: make(map[string]context.CancelFunc), logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), } }) } type redisLocker struct { Cfg *config logger *log.Logger m *sync.RWMutex acquiredLocks map[string]*redsync.Mutex attemptingLocks map[string]*redsync.Mutex registerLock map[string]context.CancelFunc client goredislib.UniversalClient redisLocker *redsync.Redsync } type config struct { Servers []string `mapstructure:"servers,omitempty" json:"servers,omitempty"` MasterName string `mapstructure:"master-name,omitempty" json:"master-name,omitempty"` Password string `mapstructure:"password,omitempty" json:"password,omitempty"` LeaseDuration time.Duration `mapstructure:"lease-duration,omitempty" json:"lease-duration,omitempty"` RenewPeriod time.Duration `mapstructure:"renew-period,omitempty" json:"renew-period,omitempty"` RetryTimer time.Duration `mapstructure:"retry-timer,omitempty" json:"retry-timer,omitempty"` PollTimer time.Duration `mapstructure:"poll-timer,omitempty" json:"poll-timer,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` } func (k *redisLocker) Init(ctx context.Context, cfg map[string]interface{}, opts ...lockers.Option) error { err := lockers.DecodeConfig(cfg, k.Cfg) if err != nil { return err } for _, opt := range opts { opt(k) } err = k.setDefaults() if err != nil { return err } k.client = goredislib.NewUniversalClient(&goredislib.UniversalOptions{ Addrs: k.Cfg.Servers, MasterName: k.Cfg.MasterName, Password: k.Cfg.Password, }) if err := k.client.Ping(ctx).Err(); err != nil { return fmt.Errorf("cannot contact redis server: %w", err) } k.redisLocker = redsync.New(goredis.NewPool(k.client)) return nil } func (k *redisLocker) Lock(ctx context.Context, key string, val []byte) (bool, error) { if k.Cfg.Debug { k.logger.Printf("attempting to lock=%s", key) } mu := k.redisLocker.NewMutex( key, redsync.WithGenValueFunc(func() (string, error) { rand, err := k.genRandValue() if err != nil { return "", err } return fmt.Sprintf("%s-%s", val, rand), nil }), redsync.WithExpiry(k.Cfg.LeaseDuration), ) k.m.Lock() k.attemptingLocks[key] = mu k.m.Unlock() defer func() { k.m.Lock() defer k.m.Unlock() delete(k.attemptingLocks, key) }() for { select { case <-ctx.Done(): return false, ctx.Err() default: err := mu.LockContext(ctx) if err != nil { switch err.(type) { case *redsync.ErrTaken: if k.Cfg.Debug { k.logger.Printf("lock already taken lock=%s: %v", key, err) } return false, nil default: return false, fmt.Errorf("failed to acquire lock=%s: %w", key, err) } } k.m.Lock() k.acquiredLocks[key] = mu k.m.Unlock() return true, nil } } } func (k *redisLocker) KeepLock(ctx context.Context, key string) (chan struct{}, chan error) { doneChan := make(chan struct{}) errChan := make(chan error) go func() { defer close(doneChan) ticker := time.NewTicker(k.Cfg.RenewPeriod) k.m.RLock() lock, ok := k.acquiredLocks[key] k.m.RUnlock() for { select { case <-ctx.Done(): errChan <- ctx.Err() return case <-doneChan: return case <-ticker.C: if !ok { errChan <- fmt.Errorf("unable to maintain lock %q: not found in acquiredlocks", key) return } ok, err := lock.ExtendContext(ctx) if err != nil { errChan <- err return } if !ok { errChan <- fmt.Errorf("could not keep lock") return } } } }() return doneChan, errChan } func (k *redisLocker) Unlock(ctx context.Context, key string) error { k.m.Lock() defer k.m.Unlock() if lock, ok := k.acquiredLocks[key]; ok { delete(k.acquiredLocks, key) ok, err := lock.Unlock() if err != nil { return err } if !ok { return fmt.Errorf("failed to unlock lock %s", key) } } if lock, ok := k.attemptingLocks[key]; ok { delete(k.attemptingLocks, key) _, err := lock.Unlock() if err != nil { return err } } return nil } func (k *redisLocker) Stop() error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() keys := []string{} k.m.RLock() for key := range k.acquiredLocks { keys = append(keys, key) } k.m.RUnlock() for _, key := range keys { k.Unlock(ctx, key) } return k.Deregister("") } func (k *redisLocker) SetLogger(logger *log.Logger) { if logger != nil && k.logger != nil { k.logger.SetOutput(logger.Writer()) k.logger.SetFlags(logger.Flags()) } } // helpers func (k *redisLocker) setDefaults() error { if k.Cfg.LeaseDuration <= 0 { k.Cfg.LeaseDuration = defaultLeaseDuration } if k.Cfg.RenewPeriod <= 0 || k.Cfg.RenewPeriod >= k.Cfg.LeaseDuration { k.Cfg.RenewPeriod = k.Cfg.LeaseDuration / 2 } if k.Cfg.RetryTimer <= 0 { k.Cfg.RetryTimer = defaultRetryTimer } if k.Cfg.PollTimer <= 0 { k.Cfg.PollTimer = defaultPollTimer } return nil } func (k *redisLocker) String() string { b, err := json.Marshal(k.Cfg) if err != nil { return "" } return string(b) } // genRandValue is required to generate a random value // so that the redislock algorithm works properly // especially in multi-server setups. func (k *redisLocker) genRandValue() (string, error) { b := make([]byte, 16) _, err := rand.Read(b) if err != nil { return "", err } return base64.StdEncoding.EncodeToString(b), nil } ================================================ FILE: pkg/lockers/redis_locker/redis_registration.go ================================================ package redis_locker import ( "bytes" "context" "encoding/json" "fmt" "time" "github.com/go-redsync/redsync/v4" goredis "github.com/redis/go-redis/v9" "github.com/openconfig/gnmic/pkg/lockers" ) // defaultWatchTimeout const defaultWatchTimeout = 10 * time.Second // redisRegistration represents a gnmic endpoint in redis. // It's serialised in the redis value to allow recovering // it during service discovery. type redisRegistration struct { ID string Address string Port int Tags []string Rand string } func (k *redisLocker) Register(ctx context.Context, s *lockers.ServiceRegistration) error { ctx, cancel := context.WithCancel(ctx) k.m.Lock() k.registerLock[s.ID] = cancel k.m.Unlock() if k.Cfg.Debug { k.logger.Printf("locking service=%s", s.ID) } mutex := k.redisLocker.NewMutex( fmt.Sprintf("%s-%s", s.Name, s.ID), redsync.WithGenValueFunc(func() (string, error) { rand, err := k.genRandValue() if err != nil { return "", err } reg := &redisRegistration{ ID: s.ID, Address: s.Address, Port: s.Port, Tags: s.Tags, Rand: rand, } val, err := json.Marshal(reg) if err != nil { return "", err } return string(val), nil }), redsync.WithExpiry(s.TTL), ) err := mutex.LockContext(ctx) if err != nil { return fmt.Errorf("failed to lock service=%s, %w", s.ID, err) } ticker := time.NewTicker(s.TTL / 2) defer ticker.Stop() for { select { case <-ticker.C: ok, err := mutex.ExtendContext(ctx) if err != nil { return fmt.Errorf("failed to extend lock for service=%s: %w", s.ID, err) } if !ok { return fmt.Errorf("could not extend lock for service=%s", s.ID) } case <-ctx.Done(): mutex.Unlock() return nil } } } func (k *redisLocker) Deregister(s string) error { k.m.Lock() defer k.m.Unlock() for sid, lockCancel := range k.registerLock { if k.Cfg.Debug { k.logger.Printf("unlocking service=%s", sid) } lockCancel() delete(k.registerLock, sid) } return nil } func (k *redisLocker) WatchServices(ctx context.Context, serviceName string, tags []string, sChan chan<- []*lockers.Service, watchTimeout time.Duration) error { if watchTimeout <= 0 { watchTimeout = defaultWatchTimeout } var err error for { select { case <-ctx.Done(): return ctx.Err() default: if k.Cfg.Debug { k.logger.Printf("(re)starting watch service=%q", serviceName) } err = k.watch(ctx, serviceName, tags, sChan, watchTimeout) if err != nil { k.logger.Printf("watch ended with error: %s", err) time.Sleep(k.Cfg.RetryTimer) continue } time.Sleep(k.Cfg.PollTimer) } } } func (k *redisLocker) watch(ctx context.Context, serviceName string, tags []string, sChan chan<- []*lockers.Service, watchTimeout time.Duration) error { // timeoutSeconds := int64(watchTimeout.Seconds()) // TODO: implement watch services, err := k.GetServices(ctx, serviceName, tags) if err != nil { return err } sChan <- services return nil } func (k *redisLocker) getBatchOfKeys(ctx context.Context, key string, batchSize int64, cursor uint64) (uint64, map[string]*goredis.StringCmd, error) { keys, cursor, err := k.client.Scan( ctx, cursor, key, batchSize, ).Result() if err != nil { return 0, nil, fmt.Errorf("failed to scan keys: %w", err) } results := map[string]*goredis.StringCmd{} _, err = k.client.Pipelined(ctx, func(p goredis.Pipeliner) error { for _, k := range keys { results[k] = p.Get(ctx, k) } return nil }) if err != nil { return cursor, nil, fmt.Errorf("error getting contents of keys") } return cursor, results, nil } func (k *redisLocker) GetServices(ctx context.Context, serviceName string, tags []string) ([]*lockers.Service, error) { var pageSize int64 = 50 var cursor uint64 var err error var cmds map[string]*goredis.StringCmd discoveredServiceRegistrations := []*redisRegistration{} for { select { case <-ctx.Done(): return nil, ctx.Err() default: // to select all gnmic instances, matching the given prefix cursor, cmds, err = k.getBatchOfKeys( ctx, fmt.Sprintf("%s-*", serviceName), pageSize, cursor, ) if err != nil { return nil, err } for _, cmd := range cmds { bytesVal, err := cmd.Bytes() if err != nil { // key removed from redis // could be that it has expired // doesn't make a difference, we skip it continue } serviceRegistration := &redisRegistration{} if err := json.Unmarshal(bytesVal, serviceRegistration); err != nil { // we don't have the data we expect // skip it continue } discoveredServiceRegistrations = append( discoveredServiceRegistrations, serviceRegistration, ) } // termination condition for redis scan if cursor == 0 { if k.Cfg.Debug { k.logger.Printf("got %d services from redis", len(discoveredServiceRegistrations)) } // convert discovered servicesRegistrations to services discoveredServices := make([]*lockers.Service, len(discoveredServiceRegistrations)) for i, registration := range discoveredServiceRegistrations { // match the required tags if !matchTags(registration.Tags, tags) { continue } discoveredServices[i] = &lockers.Service{ ID: registration.ID, Tags: registration.Tags, Address: fmt.Sprintf( "%s:%d", registration.Address, registration.Port, ), } } return discoveredServices, nil } } } } func (k *redisLocker) IsLocked(ctx context.Context, key string) (bool, error) { count, err := k.client.Exists(ctx, key).Result() if err != nil { return false, fmt.Errorf("error during redis query: %w", err) } if count > 0 { return true, nil } return false, nil } func (k *redisLocker) List(ctx context.Context, prefix string) (map[string]string, error) { var cursor uint64 var err error var cmds map[string]*goredis.StringCmd data := map[string]string{} for { select { case <-ctx.Done(): return nil, ctx.Err() default: } cursor, cmds, err = k.getBatchOfKeys( ctx, fmt.Sprintf("%s*", prefix), 100, cursor, ) if err != nil { return nil, fmt.Errorf("failed to fetch from redis: %w", err) } if k.Cfg.Debug { k.logger.Printf( "got %d keys from redis for prefix=%s", len(cmds), prefix, ) } for key, cmd := range cmds { bytesVal, err := cmd.Bytes() if err != nil { // key removed from redis // could be that it has expired // doesn't make a difference, we skip it continue } // we add a random string at the end of the value for redis // redlock algorithm, so we need to remove it here lastIndex := bytes.LastIndex(bytesVal, []byte("-")) // if it's not there, we skip the key if lastIndex < 0 { continue } data[key] = string(bytesVal[:lastIndex]) } if cursor == 0 { return data, nil } } } func matchTags(tags, wantedTags []string) bool { if wantedTags == nil { return true } tagsMap := map[string]struct{}{} for _, t := range tags { tagsMap[t] = struct{}{} } for _, wt := range wantedTags { if _, ok := tagsMap[wt]; !ok { return false } } return true } ================================================ FILE: pkg/logging/logging.go ================================================ // © 2025 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package logging import ( "fmt" "log/slog" "os" "reflect" "github.com/openconfig/gnmic/pkg/config" "github.com/zestor-dev/zestor/store" "gopkg.in/natefinch/lumberjack.v2" ) func GetLogger(level slog.Level, args ...any) *slog.Logger { handlerOptions := &slog.HandlerOptions{Level: level} return slog.New(slog.NewTextHandler(os.Stderr, handlerOptions)).With(args...) } func NewLogger(store store.Store[any], args ...any) *slog.Logger { cfg, ok, err := store.Get("global-flags", "global-flags") if err != nil { fmt.Fprintf(os.Stderr, "error getting global flags: %v. Building a default logger.\n", err) return GetLogger(slog.LevelInfo, args...) } if !ok { fmt.Fprintf(os.Stderr, "globalFlags is of an unexpected type: %T. Building a default logger.\n", reflect.TypeOf(cfg)) return GetLogger(slog.LevelInfo, args...) } flags := config.GlobalFlags{} switch i := cfg.(type) { case config.GlobalFlags: flags = i default: fmt.Fprintf(os.Stderr, "globalFlags is of an unexpected type: %T. Building a default logger.\n", reflect.TypeOf(cfg)) return GetLogger(slog.LevelInfo, args...) } if !flags.Log { return slog.New(slog.DiscardHandler) } var level slog.Level if flags.Debug { level = slog.LevelDebug } else { level = slog.LevelInfo } handlerOptions := &slog.HandlerOptions{Level: level} if flags.LogFile != "" { if flags.LogMaxSize > 0 { lj := &lumberjack.Logger{ Filename: flags.LogFile, MaxSize: flags.LogMaxSize, MaxBackups: flags.LogMaxBackups, Compress: flags.LogCompress, } return slog.New(slog.NewTextHandler(lj, handlerOptions)).With(args...) } f, err := os.OpenFile(flags.LogFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { fmt.Fprintf(os.Stderr, "error opening log file: %v\n", err) return GetLogger(slog.LevelInfo, args...) } return slog.New(slog.NewTextHandler(f, handlerOptions)).With(args...) } return slog.New(slog.NewTextHandler(os.Stderr, handlerOptions)).With(args...) } ================================================ FILE: pkg/outputs/all/all.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 // © 2025 NVIDIA Corporation // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of NVIDIA's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package all import ( _ "github.com/openconfig/gnmic/pkg/outputs/asciigraph_output" _ "github.com/openconfig/gnmic/pkg/outputs/file" _ "github.com/openconfig/gnmic/pkg/outputs/gnmi_output" _ "github.com/openconfig/gnmic/pkg/outputs/influxdb_output" _ "github.com/openconfig/gnmic/pkg/outputs/kafka_output" _ "github.com/openconfig/gnmic/pkg/outputs/nats_outputs/jetstream" _ "github.com/openconfig/gnmic/pkg/outputs/nats_outputs/nats" _ "github.com/openconfig/gnmic/pkg/outputs/otlp_output" _ "github.com/openconfig/gnmic/pkg/outputs/prometheus_output/prometheus_output" _ "github.com/openconfig/gnmic/pkg/outputs/prometheus_output/prometheus_write_output" _ "github.com/openconfig/gnmic/pkg/outputs/snmp_output" _ "github.com/openconfig/gnmic/pkg/outputs/tcp_output" _ "github.com/openconfig/gnmic/pkg/outputs/udp_output" ) ================================================ FILE: pkg/outputs/asciigraph_output/asciigraph.go ================================================ // © 2023 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package asciigraph_output import ( "context" "encoding/json" "errors" "fmt" "io" "log" "math" "os" "sort" "strconv" "strings" "sync" "text/template" "time" "github.com/guptarohit/asciigraph" "github.com/nsf/termbox-go" "github.com/openconfig/gnmi/proto/gnmi" "github.com/prometheus/client_golang/prometheus" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/outputs" gutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( loggingPrefix = "[asciigraph_output:%s] " defaultRefreshTimer = time.Second defaultPrecision = 2 defaultTimeout = 10 * time.Second ) var ( defaultLabelColor = asciigraph.Blue defaultCaptionColor = asciigraph.Default defaultAxisColor = asciigraph.Default ) func init() { outputs.Register("asciigraph", func() outputs.Output { return &asciigraphOutput{ cfg: &cfg{}, logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), eventCh: make(chan *formatters.EventMsg, 100), m: new(sync.RWMutex), data: make(map[string]*series), colors: make(map[asciigraph.AnsiColor]struct{}), } }) } // asciigraphOutput // type asciigraphOutput struct { outputs.BaseOutput cfg *cfg logger *log.Logger eventCh chan *formatters.EventMsg m *sync.RWMutex data map[string]*series colors map[asciigraph.AnsiColor]struct{} caption string captionColor asciigraph.AnsiColor axisColor asciigraph.AnsiColor labelColor asciigraph.AnsiColor evps []formatters.EventProcessor targetTpl *template.Template store store.Store[any] } type series struct { name string data []float64 color asciigraph.AnsiColor } // cfg // type cfg struct { // The caption to be displayed under the graph Caption string `mapstructure:"caption,omitempty" json:"caption,omitempty"` // The graph height Height int `mapstructure:"height,omitempty" json:"height,omitempty"` // The graph width Width int `mapstructure:"width,omitempty" json:"width,omitempty"` // The graph minimum value for the vertical axis LowerBound *float64 `mapstructure:"lower-bound,omitempty" json:"lower-bound,omitempty"` // the graph maximum value for the vertical axis UpperBound *float64 `mapstructure:"upper-bound,omitempty" json:"upper-bound,omitempty"` // The graph offset Offset int `mapstructure:"offset,omitempty" json:"offset,omitempty"` // The decimal point precision of the label values Precision uint `mapstructure:"precision,omitempty" json:"precision,omitempty"` // The caption color CaptionColor string `mapstructure:"caption-color,omitempty" json:"caption-color,omitempty"` // The axis color AxisColor string `mapstructure:"axis-color,omitempty" json:"axis-color,omitempty"` // The label color LabelColor string `mapstructure:"label-color,omitempty" json:"label-color,omitempty"` // The graph refresh timer RefreshTimer time.Duration `mapstructure:"refresh-timer,omitempty" json:"refresh-timer,omitempty"` // Add target the received subscribe responses AddTarget string `mapstructure:"add-target,omitempty" json:"add-target,omitempty"` // TargetTemplate string `mapstructure:"target-template,omitempty" json:"target-template,omitempty"` // list of event processors EventProcessors []string `mapstructure:"event-processors,omitempty" json:"event-processors,omitempty"` // enable extra logging Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` } func (a *asciigraphOutput) String() string { b, err := json.Marshal(a.cfg) if err != nil { return "" } return string(b) } func (a *asciigraphOutput) setEventProcessors(logger *log.Logger) error { tcs, ps, acts, err := gutils.GetConfigMaps(a.store) if err != nil { return err } a.evps, err = formatters.MakeEventProcessors( logger, a.cfg.EventProcessors, ps, tcs, acts, ) if err != nil { return err } return nil } func (a *asciigraphOutput) setLogger(logger *log.Logger) { if logger != nil && a.logger != nil { a.logger.SetOutput(logger.Writer()) a.logger.SetFlags(logger.Flags()) } } // Init // func (a *asciigraphOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error { err := outputs.DecodeConfig(cfg, a.cfg) if err != nil { return err } a.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name)) options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } a.store = options.Store a.setLogger(options.Logger) err = a.setEventProcessors(options.Logger) if err != nil { return err } if a.cfg.TargetTemplate == "" { a.targetTpl = outputs.DefaultTargetTemplate } else if a.cfg.AddTarget != "" { a.targetTpl, err = gtemplate.CreateTemplate("target-template", a.cfg.TargetTemplate) if err != nil { return err } a.targetTpl = a.targetTpl.Funcs(outputs.TemplateFuncs) } // set defaults err = a.setDefaults() if err != nil { return err } // go a.graph(ctx) a.logger.Printf("initialized asciigraph output: %s", a.String()) return nil } func (a *asciigraphOutput) Update(ctx context.Context, cfg map[string]any) error { return errors.New("not implemented for this output type") } func (a *asciigraphOutput) setDefaults() error { a.labelColor = defaultLabelColor if a.cfg.LabelColor != "" { if lc, ok := asciigraph.ColorNames[a.cfg.LabelColor]; ok { a.labelColor = lc } else { return fmt.Errorf("unknown label color %s", a.cfg.LabelColor) } } a.captionColor = defaultCaptionColor if a.cfg.CaptionColor != "" { if lc, ok := asciigraph.ColorNames[a.cfg.CaptionColor]; ok { a.captionColor = lc } else { return fmt.Errorf("unknown caption color %s", a.cfg.CaptionColor) } } a.axisColor = defaultAxisColor if a.cfg.AxisColor != "" { if lc, ok := asciigraph.ColorNames[a.cfg.AxisColor]; ok { a.axisColor = lc } else { return fmt.Errorf("unknown axis color %s", a.cfg.AxisColor) } } if a.cfg.RefreshTimer <= 0 { a.cfg.RefreshTimer = defaultRefreshTimer } if a.cfg.Precision <= 0 { a.cfg.Precision = defaultPrecision } return a.getTermSize() } // Write // func (a *asciigraphOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) { if rsp == nil { return } subRsp, err := outputs.AddSubscriptionTarget(rsp, meta, a.cfg.AddTarget, a.targetTpl) if err != nil { a.logger.Printf("failed to add target to the response: %v", err) return } evs, err := formatters.ResponseToEventMsgs(meta["subscription-name"], subRsp, meta, a.evps...) if err != nil { a.logger.Printf("failed to convert messages to events: %v", err) return } for _, ev := range evs { a.WriteEvent(ctx, ev) } } func (a *asciigraphOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) { ctx, cancel := context.WithTimeout(ctx, defaultTimeout) defer cancel() select { case <-ctx.Done(): a.logger.Printf("write timeout: %v", ctx.Err()) case a.eventCh <- ev: } } // Close // func (a *asciigraphOutput) Close() error { return nil } // Metrics // func (a *asciigraphOutput) RegisterMetrics(reg *prometheus.Registry) { } func (a *asciigraphOutput) SetName(name string) {} func (a *asciigraphOutput) SetClusterName(name string) {} func (a *asciigraphOutput) SetTargetsConfig(map[string]*types.TargetConfig) {} func (a *asciigraphOutput) graph(ctx context.Context) { for { select { case <-ctx.Done(): return case ev, ok := <-a.eventCh: if !ok { return } a.plot(ev) case <-time.After(a.cfg.RefreshTimer): a.plot(nil) } } } func (a *asciigraphOutput) plot(e *formatters.EventMsg) { a.m.Lock() defer a.m.Unlock() a.getTermSize() if e != nil && len(e.Values) > 0 { a.updateData(e) } data, colors := a.buildData() if len(data) == 0 { return } opts := []asciigraph.Option{ asciigraph.Height(a.cfg.Height), asciigraph.Width(a.cfg.Width), asciigraph.Offset(a.cfg.Offset), asciigraph.Precision(a.cfg.Precision), asciigraph.Caption(a.caption), asciigraph.CaptionColor(a.captionColor), asciigraph.SeriesColors(colors...), asciigraph.AxisColor(a.axisColor), asciigraph.LabelColor(a.labelColor), } if a.cfg.LowerBound != nil { opts = append(opts, asciigraph.LowerBound(*a.cfg.LowerBound)) } if a.cfg.UpperBound != nil { opts = append(opts, asciigraph.UpperBound(*a.cfg.UpperBound)) } plot := asciigraph.PlotMany(data, opts...) asciigraph.Clear() fmt.Fprintln(os.Stdout, plot) } func (a *asciigraphOutput) updateData(e *formatters.EventMsg) { if e == nil { return } evs := splitEvent(e) for _, ev := range evs { sn := a.buildSeriesName(e) serie := a.getOrCreateSerie(sn) for _, v := range ev.Values { i, err := toFloat(v) if err != nil { continue } serie.data = append(serie.data, i) break } } } func (a *asciigraphOutput) getOrCreateSerie(name string) *series { serie, ok := a.data[name] if ok { return serie } color := a.pickColor() serie = &series{ name: name, data: make([]float64, 0, a.cfg.Width-a.cfg.Offset), color: color, } a.data[name] = serie a.colors[serie.color] = struct{}{} a.setCaption() return serie } func (a *asciigraphOutput) setCaption() { seriesNames := make([]string, 0, len(a.data)) for seriesName := range a.data { seriesNames = append(seriesNames, seriesName) } sort.Strings(seriesNames) a.caption = "" if a.cfg.Debug { a.caption = fmt.Sprintf("(h=%d,w=%d)\n", a.cfg.Height, a.cfg.Width) } a.caption = fmt.Sprintf("%s\n", a.cfg.Caption) for _, sn := range seriesNames { color := a.data[sn].color a.caption += color.String() + "-+- " + sn + asciigraph.Default.String() + "\n" } } func (a *asciigraphOutput) buildData() ([][]float64, []asciigraph.AnsiColor) { numgraphs := len(a.data) series := make([]*series, 0, numgraphs) // sort series by name for _, serie := range a.data { size := len(serie.data) if size == 0 { continue } if size > a.cfg.Width { serie.data = serie.data[size-a.cfg.Width:] } series = append(series, serie) } sort.Slice(series, func(i, j int) bool { return series[i].name < series[j].name }) data := make([][]float64, 0, numgraphs) colors := make([]asciigraph.AnsiColor, 0, numgraphs) // get float slices and colors for _, serie := range series { data = append(data, serie.data) colors = append(colors, serie.color) } return data, colors } func splitEvent(e *formatters.EventMsg) []*formatters.EventMsg { numVals := len(e.Values) switch numVals { case 0: return nil case 1: return []*formatters.EventMsg{e} } evs := make([]*formatters.EventMsg, 0, numVals) for k, v := range e.Values { ev := &formatters.EventMsg{ Name: e.Name, Timestamp: e.Timestamp, Tags: e.Tags, Values: map[string]interface{}{k: v}, } evs = append(evs, ev) } return evs } var stringBuilderPool = sync.Pool{ New: func() any { return new(strings.Builder) }, } func (a *asciigraphOutput) buildSeriesName(e *formatters.EventMsg) string { sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() sb.WriteString(e.Name) sb.WriteString(":") for k := range e.Values { sb.WriteString(k) } numTags := len(e.Tags) if numTags == 0 { return sb.String() } sb.WriteString("{") tagNames := make([]string, 0, numTags) for k := range e.Tags { tagNames = append(tagNames, k) } sort.Strings(tagNames) for i, tn := range tagNames { fmt.Fprintf(sb, "%s=%s", tn, e.Tags[tn]) if numTags != i+1 { sb.WriteString(", ") } } sb.WriteString("}") return sb.String() } func toFloat(v interface{}) (float64, error) { switch i := v.(type) { case float64: return float64(i), nil case float32: return float64(i), nil case int64: return float64(i), nil case int32: return float64(i), nil case int16: return float64(i), nil case int8: return float64(i), nil case uint64: return float64(i), nil case uint32: return float64(i), nil case uint16: return float64(i), nil case uint8: return float64(i), nil case int: return float64(i), nil case uint: return float64(i), nil case string: f, err := strconv.ParseFloat(i, 64) if err != nil { return math.NaN(), err } return f, err //lint:ignore SA1019 still need DecimalVal for backward compatibility case *gnmi.Decimal64: return float64(i.Digits) / math.Pow10(int(i.Precision)), nil default: return math.NaN(), errors.New("getFloat: unknown value is of incompatible type") } } func (a *asciigraphOutput) pickColor() asciigraph.AnsiColor { for _, c := range asciigraph.ColorNames { if _, ok := a.colors[c]; !ok { return c } } return 0 } func (a *asciigraphOutput) getTermSize() error { err := termbox.Init() if err != nil { return fmt.Errorf("could not initialize a terminal box: %v", err) } w, h := termbox.Size() termbox.Close() if a.cfg.Width <= 0 || a.cfg.Width > w-10 { a.cfg.Width = w - 10 } numSeries := len(a.data) if a.cfg.Height <= 0 || a.cfg.Height > h-(numSeries+1)-5 { a.cfg.Height = h - (numSeries + 1) - 5 } return nil } ================================================ FILE: pkg/outputs/file/file_metrics.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package file import ( "sync" "github.com/prometheus/client_golang/prometheus" ) var registerMetricsOnce sync.Once var numberOfWrittenBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "file_output", Name: "number_bytes_written_total", Help: "Number of bytes written to file output", }, []string{"name", "file_name"}) var numberOfReceivedMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "file_output", Name: "number_messages_received_total", Help: "Number of messages received by file output", }, []string{"name", "file_name"}) var numberOfWrittenMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "file_output", Name: "number_messages_writes_total", Help: "Number of messages written to file output", }, []string{"name", "file_name"}) var numberOfFailWriteMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "file_output", Name: "number_messages_writes_fail_total", Help: "Number of failed message writes to file output", }, []string{"name", "file_name", "reason"}) func (f *File) initMetrics(name string) { numberOfWrittenBytes.WithLabelValues(name, "").Add(0) numberOfReceivedMsgs.WithLabelValues(name, "").Add(0) numberOfWrittenMsgs.WithLabelValues(name, "").Add(0) numberOfFailWriteMsgs.WithLabelValues(name, "", "").Add(0) } func (f *File) registerMetrics() error { cfg := f.cfg.Load() if cfg == nil { return nil } if !cfg.EnableMetrics { return nil } if f.reg == nil { f.logger.Printf("ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`") return nil } var err error registerMetricsOnce.Do(func() { if err = f.reg.Register(numberOfWrittenBytes); err != nil { return } if err = f.reg.Register(numberOfReceivedMsgs); err != nil { return } if err = f.reg.Register(numberOfWrittenMsgs); err != nil { return } if err = f.reg.Register(numberOfFailWriteMsgs); err != nil { return } }) f.initMetrics(cfg.Name) return err } ================================================ FILE: pkg/outputs/file/file_output.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package file import ( "context" "encoding/json" "errors" "fmt" "io" "log" "os" "slices" "sync/atomic" "text/template" "time" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sync/semaphore" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/outputs" gutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( defaultFormat = "json" defaultWriteConcurrency = 1000 defaultSeparator = "\n" loggingPrefix = "[file_output:%s] " ) const ( outputType = "file" fileType_STDOUT = "stdout" fileType_STDERR = "stderr" ) func init() { outputs.Register(outputType, func() outputs.Output { return &File{} }) } func (f *File) init() { f.cfg = new(atomic.Pointer[config]) f.dynCfg = new(atomic.Pointer[dynConfig]) f.file = new(atomic.Pointer[file]) f.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags) } // File // type File struct { outputs.BaseOutput cfg *atomic.Pointer[config] dynCfg *atomic.Pointer[dynConfig] file *atomic.Pointer[file] logger *log.Logger sem *semaphore.Weighted reg *prometheus.Registry store store.Store[any] } type dynConfig struct { targetTpl *template.Template msgTpl *template.Template evps []formatters.EventProcessor mo *formatters.MarshalOptions } // config // type config struct { Name string `mapstructure:"name,omitempty"` FileName string `mapstructure:"filename,omitempty"` FileType string `mapstructure:"file-type,omitempty"` Format string `mapstructure:"format,omitempty"` Multiline bool `mapstructure:"multiline,omitempty"` Indent string `mapstructure:"indent,omitempty"` Separator string `mapstructure:"separator,omitempty"` SplitEvents bool `mapstructure:"split-events,omitempty"` OverrideTimestamps bool `mapstructure:"override-timestamps,omitempty"` AddTarget string `mapstructure:"add-target,omitempty"` TargetTemplate string `mapstructure:"target-template,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty"` MsgTemplate string `mapstructure:"msg-template,omitempty"` ConcurrencyLimit int `mapstructure:"concurrency-limit,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty"` Debug bool `mapstructure:"debug,omitempty"` CalculateLatency bool `mapstructure:"calculate-latency,omitempty"` Rotation *rotationConfig `mapstructure:"rotation,omitempty"` } type file interface { Close() error Name() string Write([]byte) (int, error) } func (f *File) String() string { cfg := f.cfg.Load() if cfg == nil { return "" } b, err := json.Marshal(cfg) if err != nil { return "" } return string(b) } func (f *File) setDefaults(cfg *config) error { if cfg.Format == "proto" { return fmt.Errorf("proto format not supported in output type 'file'") } if cfg.Separator == "" { cfg.Separator = defaultSeparator } if cfg.FileName == "" && cfg.FileType == "" { cfg.FileType = fileType_STDOUT } if cfg.Format == "" { cfg.Format = defaultFormat } if cfg.FileType == fileType_STDOUT || cfg.FileType == fileType_STDERR { cfg.Indent = " " cfg.Multiline = true } if cfg.Multiline && cfg.Indent == "" { cfg.Indent = " " } if cfg.ConcurrencyLimit < 1 { switch cfg.FileType { case fileType_STDOUT, fileType_STDERR: cfg.ConcurrencyLimit = 1 default: cfg.ConcurrencyLimit = defaultWriteConcurrency } } return nil } // Init // func (f *File) Init(ctx context.Context, name string, cfg map[string]any, opts ...outputs.Option) error { f.init() newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } if newCfg.Name == "" { newCfg.Name = name } f.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name)) options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } f.store = options.Store // apply logger f.setLogger(options.Logger) err = f.setDefaults(newCfg) if err != nil { return err } // store config f.cfg.Store(newCfg) // initialize registry f.reg = options.Registry err = f.registerMetrics() if err != nil { return err } // initialize semaphore f.sem = semaphore.NewWeighted(int64(newCfg.ConcurrencyLimit)) // build dynamic config dc := new(dynConfig) // initialize event processors dc.evps, err = f.buildEventProcessors(options.Logger, newCfg.EventProcessors) if err != nil { return err } dc.mo = &formatters.MarshalOptions{ Multiline: newCfg.Multiline, Indent: newCfg.Indent, Format: newCfg.Format, OverrideTS: newCfg.OverrideTimestamps, CalculateLatency: newCfg.CalculateLatency, } // create templates if newCfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } if newCfg.MsgTemplate != "" { dc.msgTpl, err = gtemplate.CreateTemplate(fmt.Sprintf("%s-msg-template", name), newCfg.MsgTemplate) if err != nil { return err } dc.msgTpl = dc.msgTpl.Funcs(outputs.TemplateFuncs) } f.dynCfg.Store(dc) // initialize file newFile, err := f.openFile(newCfg) if err != nil { return err } f.file.Store(&newFile) f.logger.Printf("initialized file output: %s", f.String()) return nil } func (f *File) Validate(cfg map[string]any) error { newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } if newCfg.Format == "proto" { return fmt.Errorf("proto format not supported in output type 'file'") } return nil } func (f *File) openFile(cfg *config) (file, error) { var fileHandle file var err error switch cfg.FileType { case fileType_STDOUT: return os.Stdout, nil case fileType_STDERR: return os.Stderr, nil default: CRFILE: if cfg.Rotation != nil { fileHandle = newRotatingFile(cfg) } else { fileHandle, err = os.OpenFile(cfg.FileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { f.logger.Printf("failed to create file: %v", err) time.Sleep(10 * time.Second) goto CRFILE } } } return fileHandle, nil } func (f *File) Update(ctx context.Context, cfgMap map[string]any) error { newCfg := new(config) err := outputs.DecodeConfig(cfgMap, newCfg) if err != nil { return err } currCfg := f.cfg.Load() if newCfg.Name == "" && currCfg != nil { newCfg.Name = currCfg.Name } err = f.setDefaults(newCfg) if err != nil { return err } // check if we need to rebuild processors rebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 // build new dynamic config dc := new(dynConfig) // rebuild processors if needed prevDC := f.dynCfg.Load() if rebuildProcessors { dc.evps, err = f.buildEventProcessors(f.logger, newCfg.EventProcessors) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } dc.mo = &formatters.MarshalOptions{ Multiline: newCfg.Multiline, Indent: newCfg.Indent, Format: newCfg.Format, OverrideTS: newCfg.OverrideTimestamps, CalculateLatency: newCfg.CalculateLatency, } // rebuild templates var targetTpl *template.Template if newCfg.TargetTemplate == "" { targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { targetTpl, err = gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } targetTpl = targetTpl.Funcs(outputs.TemplateFuncs) } else { targetTpl = outputs.DefaultTargetTemplate } dc.targetTpl = targetTpl if newCfg.MsgTemplate != "" { dc.msgTpl, err = gtemplate.CreateTemplate(fmt.Sprintf("%s-msg-template", newCfg.Name), newCfg.MsgTemplate) if err != nil { return err } dc.msgTpl = dc.msgTpl.Funcs(outputs.TemplateFuncs) } // store new dynamic config f.dynCfg.Store(dc) // check if file needs to be reopened needsFileReopen := fileNeedsReopen(currCfg, newCfg) if needsFileReopen { // open new file newFile, err := f.openFile(newCfg) if err != nil { return err } // swap file handle oldFile := f.file.Swap(&newFile) // close old file (but not stdout/stderr) if oldFile != nil && *oldFile != os.Stdout && *oldFile != os.Stderr { (*oldFile).Close() } } // update semaphore if concurrency limit changed if currCfg == nil || currCfg.ConcurrencyLimit != newCfg.ConcurrencyLimit { f.sem = semaphore.NewWeighted(int64(newCfg.ConcurrencyLimit)) } // store new config f.cfg.Store(newCfg) f.logger.Printf("updated file output: %s", f.String()) return nil } func (f *File) UpdateProcessor(name string, pcfg map[string]any) error { cfg := f.cfg.Load() dc := f.dynCfg.Load() newEvps, changed, err := outputs.UpdateProcessorInSlice( f.logger, f.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps f.dynCfg.Store(&newDC) f.logger.Printf("updated event processor %s", name) } return nil } // Write // func (f *File) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) { if rsp == nil { return } // load current config and file cfg := f.cfg.Load() dc := f.dynCfg.Load() fileHandle := f.file.Load() if cfg == nil || dc == nil || fileHandle == nil { return } err := f.sem.Acquire(ctx, 1) if errors.Is(err, context.Canceled) { return } if err != nil { f.logger.Printf("failed acquiring semaphore: %v", err) return } defer f.sem.Release(1) numberOfReceivedMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name()).Inc() rsp, err = outputs.AddSubscriptionTarget(rsp, meta, cfg.AddTarget, dc.targetTpl) if err != nil { f.logger.Printf("failed to add target to the response: %v", err) } bb, err := outputs.Marshal(rsp, meta, dc.mo, cfg.SplitEvents, dc.evps...) if err != nil { if cfg.Debug { f.logger.Printf("failed marshaling proto msg: %v", err) } numberOfFailWriteMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name(), "marshal_error").Inc() return } if len(bb) == 0 { return } for _, b := range bb { if dc.msgTpl != nil { b, err = outputs.ExecTemplate(b, dc.msgTpl) if err != nil { if cfg.Debug { log.Printf("failed to execute template: %v", err) } numberOfFailWriteMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name(), "template_error").Inc() continue } } n, err := (*fileHandle).Write(append(b, []byte(cfg.Separator)...)) if err != nil { if cfg.Debug { f.logger.Printf("failed to write to file '%s': %v", (*fileHandle).Name(), err) } numberOfFailWriteMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name(), "write_error").Inc() return } numberOfWrittenBytes.WithLabelValues(cfg.Name, (*fileHandle).Name()).Add(float64(n)) numberOfWrittenMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name()).Inc() } } func (f *File) WriteEvent(ctx context.Context, ev *formatters.EventMsg) { select { case <-ctx.Done(): return default: } // load current config and file cfg := f.cfg.Load() dc := f.dynCfg.Load() fileHandle := f.file.Load() if cfg == nil || dc == nil || fileHandle == nil { return } var evs = []*formatters.EventMsg{ev} for _, proc := range dc.evps { evs = proc.Apply(evs...) } toWrite := []byte{} if cfg.SplitEvents { for _, pev := range evs { var err error var b []byte if cfg.Multiline { b, err = json.MarshalIndent(pev, "", cfg.Indent) } else { b, err = json.Marshal(pev) } if err != nil { fmt.Printf("failed to WriteEvent: %v", err) numberOfFailWriteMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name(), "marshal_error").Inc() return } toWrite = append(toWrite, b...) toWrite = append(toWrite, []byte(cfg.Separator)...) } } else { var err error var b []byte if cfg.Multiline { b, err = json.MarshalIndent(evs, "", cfg.Indent) } else { b, err = json.Marshal(evs) } if err != nil { fmt.Printf("failed to WriteEvent: %v", err) numberOfFailWriteMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name(), "marshal_error").Inc() return } toWrite = append(toWrite, b...) toWrite = append(toWrite, []byte(cfg.Separator)...) } n, err := (*fileHandle).Write(toWrite) if err != nil { fmt.Printf("failed to WriteEvent: %v", err) numberOfFailWriteMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name(), "write_error").Inc() return } numberOfWrittenBytes.WithLabelValues(cfg.Name, (*fileHandle).Name()).Add(float64(n)) numberOfWrittenMsgs.WithLabelValues(cfg.Name, (*fileHandle).Name()).Inc() } // Close // func (f *File) Close() error { fileHandle := f.file.Load() if fileHandle == nil { return nil } f.logger.Printf("closing file '%s' output", (*fileHandle).Name()) return (*fileHandle).Close() } func (f *File) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := gutils.GetConfigMaps(f.store) if err != nil { return nil, err } evps, err := formatters.MakeEventProcessors( logger, eventProcessors, ps, tcs, acts, ) if err != nil { return nil, err } return evps, nil } func (f *File) setLogger(logger *log.Logger) { if logger != nil && f.logger != nil { f.logger.SetOutput(logger.Writer()) f.logger.SetFlags(logger.Flags()) } } func fileNeedsReopen(old, new *config) bool { if old == nil || new == nil { return true } // file needs to be reopened if file-related settings changed return old.FileName != new.FileName || old.FileType != new.FileType || rotationChanged(old.Rotation, new.Rotation) } func rotationChanged(old, new *rotationConfig) bool { if old == nil && new == nil { return false } if old == nil || new == nil { return true } // compare rotation config fields return old.MaxSize != new.MaxSize || old.MaxAge != new.MaxAge || old.MaxBackups != new.MaxBackups } ================================================ FILE: pkg/outputs/file/rotating_file.go ================================================ package file import ( "gopkg.in/natefinch/lumberjack.v2" ) // RotationConfig manages configuration around file rotation type rotationConfig struct { MaxSize int `mapstructure:"max-size,omitempty"` MaxBackups int `mapstructure:"max-backups,omitempty"` MaxAge int `mapstructure:"max-age,omitempty"` Compress bool `mapstructure:"compress,omitempty"` } func (r *rotationConfig) SetDefaults() { if r.MaxSize == 0 { r.MaxSize = 100 } if r.MaxBackups == 0 { r.MaxBackups = 3 } if r.MaxAge == 0 { r.MaxAge = 30 } } type rotatingFile struct { l *lumberjack.Logger } // newRotatingFile initialize the lumberjack instance func newRotatingFile(cfg *config) *rotatingFile { cfg.Rotation.SetDefaults() lj := lumberjack.Logger{ Filename: cfg.FileName, MaxSize: cfg.Rotation.MaxSize, MaxBackups: cfg.Rotation.MaxBackups, MaxAge: cfg.Rotation.MaxAge, Compress: cfg.Rotation.Compress, } return &rotatingFile{l: &lj} } // Close closes the file func (r *rotatingFile) Close() error { return r.l.Close() } // Name returns the name of the file func (r *rotatingFile) Name() string { return r.l.Filename } // Write implements io.Writer func (r *rotatingFile) Write(b []byte) (int, error) { return r.l.Write(b) } ================================================ FILE: pkg/outputs/gnmi_output/gnmi_output.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package gnmi_output import ( "context" "encoding/json" "errors" "fmt" "io" "log" "net" "strings" "text/template" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/openconfig/gnmi/cache" "github.com/openconfig/gnmi/proto/gnmi" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sync/semaphore" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/outputs" ) const ( loggingPrefix = "[gnmi_output:%s] " defaultMaxSubscriptions = 64 defaultMaxGetRPC = 64 defaultAddress = ":57400" ) func init() { outputs.Register("gnmi", func() outputs.Output { return &gNMIOutput{ cfg: new(config), logger: log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags), } }) } // gNMIOutput // type gNMIOutput struct { outputs.BaseOutput cfg *config logger *log.Logger targetTpl *template.Template // srv *server grpcSrv *grpc.Server c *cache.Cache reg *prometheus.Registry } type config struct { //Name string `mapstructure:"name,omitempty"` Address string `mapstructure:"address,omitempty"` TargetTemplate string `mapstructure:"target-template,omitempty"` MaxSubscriptions int64 `mapstructure:"max-subscriptions,omitempty"` MaxUnaryRPC int64 `mapstructure:"max-unary-rpc,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty"` Debug bool `mapstructure:"debug,omitempty"` } func (g *gNMIOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error { err := outputs.DecodeConfig(cfg, g.cfg) if err != nil { return err } g.c = cache.New(nil) g.srv = g.newServer() options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } if options.Logger != nil && g.logger != nil { g.logger.SetOutput(options.Logger.Writer()) g.logger.SetFlags(options.Logger.Flags()) } g.reg = options.Registry g.registerMetrics() err = g.setDefaults() if err != nil { return err } g.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name)) if g.targetTpl == nil { g.targetTpl, err = gtemplate.CreateTemplate(fmt.Sprintf("%s-target-template", name), g.cfg.TargetTemplate) if err != nil { return err } } err = g.startGRPCServer() if err != nil { return err } g.logger.Printf("started gnmi output: %v", g) return nil } func (g *gNMIOutput) Update(ctx context.Context, cfg map[string]any) error { return errors.New("not implemented for this output type") } func (g *gNMIOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) { var err error rsp, err = outputs.AddSubscriptionTarget(rsp, meta, "if-not-present", g.targetTpl) if err != nil { g.logger.Printf("failed to add target to the response: %v", err) } switch rsp := rsp.(type) { case *gnmi.SubscribeResponse: switch rsp := rsp.Response.(type) { case *gnmi.SubscribeResponse_Update: target := rsp.Update.GetPrefix().GetTarget() if target == "" { g.logger.Printf("response missing target") return } if !g.c.HasTarget(target) { g.c.Add(target) g.logger.Printf("target %q added to the local cache", target) } if g.cfg.Debug { g.logger.Printf("updating target %q local cache", target) } err = g.c.GnmiUpdate(rsp.Update) if err != nil { g.logger.Printf("failed to update gNMI cache: %v", err) return } case *gnmi.SubscribeResponse_SyncResponse: } } } func (g *gNMIOutput) WriteEvent(context.Context, *formatters.EventMsg) {} func (g *gNMIOutput) Close() error { //g.teardown() g.grpcSrv.Stop() return nil } func (g *gNMIOutput) registerMetrics() { if !g.cfg.EnableMetrics { return } if g.reg == nil { g.logger.Printf("ERR: output metrics enabled but main registry is not initialized, enable main metrics under `api-server`") return } srvMetrics := grpc_prometheus.NewServerMetrics() srvMetrics.InitializeMetrics(g.grpcSrv) if err := g.reg.Register(srvMetrics); err != nil { g.logger.Printf("failed to register prometheus metrics: %v", err) } } func (g *gNMIOutput) String() string { b, err := json.Marshal(g.cfg) if err != nil { return "" } return string(b) } func (g *gNMIOutput) setDefaults() error { if g.cfg.Address == "" { g.cfg.Address = defaultAddress } if g.cfg.TargetTemplate == "" { g.targetTpl = outputs.DefaultTargetTemplate } if g.cfg.MaxSubscriptions <= 0 { g.cfg.MaxSubscriptions = defaultMaxSubscriptions } if g.cfg.MaxUnaryRPC <= 0 { g.cfg.MaxUnaryRPC = defaultMaxGetRPC } return nil } func (g *gNMIOutput) startGRPCServer() error { g.srv.subscribeRPCsem = semaphore.NewWeighted(g.cfg.MaxSubscriptions) g.srv.unaryRPCsem = semaphore.NewWeighted(g.cfg.MaxUnaryRPC) g.c.SetClient(g.srv.Update) var l net.Listener var err error network := "tcp" addr := g.cfg.Address if strings.HasPrefix(g.cfg.Address, "unix://") { network = "unix" addr = strings.TrimPrefix(addr, "unix://") } l, err = net.Listen(network, addr) if err != nil { return err } opts, err := g.serverOpts() if err != nil { return err } g.grpcSrv = grpc.NewServer(opts...) gnmi.RegisterGNMIServer(g.grpcSrv, g.srv) go g.grpcSrv.Serve(l) return nil } func (g *gNMIOutput) serverOpts() ([]grpc.ServerOption, error) { opts := make([]grpc.ServerOption, 0) if g.cfg.EnableMetrics { opts = append(opts, grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor)) } if g.cfg.TLS == nil { return opts, nil } tlscfg, err := utils.NewTLSConfig( g.cfg.TLS.CaFile, g.cfg.TLS.CertFile, g.cfg.TLS.KeyFile, g.cfg.TLS.ClientAuth, false, true, ) if err != nil { return nil, err } if tlscfg != nil { opts = append(opts, grpc.Creds(credentials.NewTLS(tlscfg))) } return opts, nil } ================================================ FILE: pkg/outputs/gnmi_output/gnmi_server.go ================================================ /* Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This gNMI server implementation is based on the one found here: // https://github.com/openconfig/gnmi/blob/c69a5df04b5329d70e3e76afa773669527cfad9b/subscribe/subscribe.go package gnmi_output import ( "errors" "io" "log" "sync" "golang.org/x/sync/semaphore" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" "github.com/openconfig/gnmi/cache" "github.com/openconfig/gnmi/coalesce" "github.com/openconfig/gnmi/ctree" "github.com/openconfig/gnmi/match" "github.com/openconfig/gnmi/path" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmi/subscribe" "github.com/openconfig/gnmic/pkg/api/types" ) type streamClient struct { target string req *gnmi.SubscribeRequest queue *coalesce.Queue stream gnmi.GNMI_SubscribeServer errChan chan<- error } type server struct { gnmi.UnimplementedGNMIServer // l *log.Logger c *cache.Cache m *match.Match subscribeRPCsem *semaphore.Weighted unaryRPCsem *semaphore.Weighted // mu *sync.RWMutex targets map[string]*types.TargetConfig } type matchClient struct { queue *coalesce.Queue err error } type syncMarker struct{} type resp struct { stream gnmi.GNMI_SubscribeServer n *ctree.Leaf dup uint32 } func (m *matchClient) Update(n interface{}) { if m.err != nil { return } _, m.err = m.queue.Insert(n) } func (g *gNMIOutput) newServer() *server { return &server{ l: g.logger, c: g.c, m: match.New(), mu: new(sync.RWMutex), targets: make(map[string]*types.TargetConfig), } } func (s *server) Update(n *ctree.Leaf) { switch v := n.Value().(type) { case *gnmi.Notification: subscribe.UpdateNotification(s.m, n, v, path.ToStrings(v.Prefix, true)) default: s.l.Printf("unexpected update type: %T", v) } } func addSubscription(m *match.Match, s *gnmi.SubscriptionList, c *matchClient) func() { removes := make([]func(), 0, len(s.GetSubscription())) prefix := path.ToStrings(s.GetPrefix(), true) for _, p := range s.GetSubscription() { if p.GetPath() == nil { continue } path := append(prefix, path.ToStrings(p.GetPath(), false)...) removes = append(removes, m.AddQuery(path, c)) } return func() { for _, remove := range removes { remove() } } } func (s *server) handleSubscriptionRequest(sc *streamClient) { var err error s.l.Printf("processing subscription to target %q", sc.target) defer func() { if err != nil { s.l.Printf("error processing subscription to target %q: %v", sc.target, err) sc.queue.Close() sc.errChan <- err return } s.l.Printf("subscription request to target %q processed", sc.target) }() if !sc.req.GetSubscribe().GetUpdatesOnly() { for _, sub := range sc.req.GetSubscribe().GetSubscription() { var fp []string fp, err = path.CompletePath(sc.req.GetSubscribe().GetPrefix(), sub.GetPath()) if err != nil { return } err = s.c.Query(sc.target, fp, func(_ []string, l *ctree.Leaf, _ interface{}) error { if err != nil { return err } _, err = sc.queue.Insert(l) return nil }) if err != nil { s.l.Printf("target %q failed internal cache query: %v", sc.target, err) return } } } _, err = sc.queue.Insert(syncMarker{}) } func (s *server) sendStreamingResults(sc *streamClient) { ctx := sc.stream.Context() peer, _ := peer.FromContext(ctx) s.l.Printf("sending streaming results from target %q to peer %q", sc.target, peer.Addr) defer s.subscribeRPCsem.Release(1) for { item, dup, err := sc.queue.Next(ctx) if coalesce.IsClosedQueue(err) { sc.errChan <- nil return } if err != nil { sc.errChan <- err return } if _, ok := item.(syncMarker); ok { err = sc.stream.Send(&gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_SyncResponse{ SyncResponse: true, }}) if err != nil { sc.errChan <- err return } continue } node, ok := item.(*ctree.Leaf) if !ok || node == nil { sc.errChan <- status.Errorf(codes.Internal, "invalid cache node: %+v", item) return } err = s.sendSubscribeResponse(&resp{ stream: sc.stream, n: node, dup: dup, }, sc) if err != nil { s.l.Printf("target %q: failed sending subscribeResponse: %v", sc.target, err) sc.errChan <- err return } // TODO: check if target was deleted ? necessary ? } } func (s *server) handlePolledSubscription(sc *streamClient) { s.handleSubscriptionRequest(sc) var err error for { if sc.queue.IsClosed() { return } _, err = sc.stream.Recv() if errors.Is(err, io.EOF) { return } if err != nil { s.l.Printf("target %q: failed poll subscription rcv: %v", sc.target, err) sc.errChan <- err return } s.l.Printf("target %q: repoll", sc.target) s.handleSubscriptionRequest(sc) s.l.Printf("target %q: repoll done", sc.target) } } func (s *server) sendSubscribeResponse(r *resp, _ *streamClient) error { notif, err := makeSubscribeResponse(r.n.Value(), r.dup) if err != nil { return status.Errorf(codes.Unknown, "unknown error: %v", err) } // No acls return r.stream.Send(notif) } func makeSubscribeResponse(n interface{}, _ uint32) (*gnmi.SubscribeResponse, error) { var notification *gnmi.Notification var ok bool notification, ok = n.(*gnmi.Notification) if !ok { return nil, status.Errorf(codes.Internal, "invalid notification type: %#v", n) } return &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: notification, }, }, nil } ================================================ FILE: pkg/outputs/gnmi_output/gnmi_server_get.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package gnmi_output import ( "context" "fmt" "strings" "sync" "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/target" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" ) func (s *server) Get(ctx context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) { ok := s.unaryRPCsem.TryAcquire(1) if !ok { return nil, status.Errorf(codes.ResourceExhausted, "max number of Unary RPC reached") } defer s.unaryRPCsem.Release(1) numPaths := len(req.GetPath()) if numPaths == 0 && req.GetPrefix() == nil { return nil, status.Errorf(codes.InvalidArgument, "missing path") } s.mu.RLock() defer s.mu.RUnlock() origins := make(map[string]struct{}) for _, p := range req.GetPath() { origins[p.GetOrigin()] = struct{}{} if p.GetOrigin() != "gnmic" { if _, ok := origins["gnmic"]; ok { return nil, status.Errorf(codes.InvalidArgument, "combining `gnmic` origin with other origin values is not supported") } } } if _, ok := origins["gnmic"]; ok { return s.handlegNMIcInternalGet(ctx, req) } targetName := req.GetPrefix().GetTarget() peer, _ := peer.FromContext(ctx) s.l.Printf("received Get request from %q to target %q", peer.Addr, targetName) targets, err := s.selectTargets(targetName) if err != nil { return nil, err } numTargets := len(targets) if numTargets == 0 { return nil, status.Errorf(codes.NotFound, "unknown target %q", targetName) } results := make(chan *gnmi.Notification) errChan := make(chan error, numTargets) response := &gnmi.GetResponse{ // assume one notification per path per target Notification: make([]*gnmi.Notification, 0, numTargets*numPaths), } done := make(chan struct{}) ctx, cancel := context.WithCancel(ctx) defer cancel() go func() { for { select { case notif, ok := <-results: if !ok { close(done) return } response.Notification = append(response.Notification, notif) case <-ctx.Done(): return } } }() wg := new(sync.WaitGroup) wg.Add(numTargets) for name, tc := range targets { go func(name string, tc *types.TargetConfig) { // name = outputs.GetHost(name) defer wg.Done() t := target.NewTarget(tc) ctx, cancel := context.WithTimeout(ctx, tc.Timeout) defer cancel() err := t.CreateGNMIClient(ctx) if err != nil { s.l.Printf("target %q err: %v", name, err) errChan <- fmt.Errorf("target %q err: %v", name, err) return } creq := proto.Clone(req).(*gnmi.GetRequest) if creq.GetPrefix() == nil { creq.Prefix = new(gnmi.Path) } if creq.GetPrefix().GetTarget() == "" || creq.GetPrefix().GetTarget() == "*" { creq.Prefix.Target = name } res, err := t.Get(ctx, creq) if err != nil { s.l.Printf("target %q err: %v", name, err) errChan <- fmt.Errorf("target %q err: %v", name, err) return } for _, n := range res.GetNotification() { if n.GetPrefix() == nil { n.Prefix = new(gnmi.Path) } if n.GetPrefix().GetTarget() == "" { n.Prefix.Target = name } results <- n } }(name, tc) } wg.Wait() close(results) close(errChan) for err := range errChan { if err != nil { return nil, status.Errorf(codes.Internal, "%v", err) } } <-done s.l.Printf("sending GetResponse to %q: %+v", peer.Addr, response) return response, nil } func targetConfigToNotification(tc *types.TargetConfig) *gnmi.Notification { n := &gnmi.Notification{ Timestamp: time.Now().UnixNano(), Prefix: &gnmi.Path{ Origin: "gnmic", Elem: []*gnmi.PathElem{ { Name: "target", Key: map[string]string{"name": tc.Name}, }, }, }, Update: []*gnmi.Update{ { Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "address"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.Address}, }, }, }, } if tc.Username != nil { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "username"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: *tc.Username}, }, }) } if tc.Insecure != nil { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "insecure"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: fmt.Sprint(*tc.Insecure)}, }, }) } if tc.SkipVerify != nil { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "skip-verify"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: fmt.Sprint(*tc.SkipVerify)}, }, }) } n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "timeout"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.Timeout.String()}, }, }) if tc.TLSCA != nil { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "tls-ca"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.TLSCAString()}, }, }) } if tc.TLSCert != nil { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "tls-cert"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.TLSCertString()}, }, }) } if tc.TLSKey != nil && tc.TLSKeyString() != "NA" { n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "tls-key"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: tc.TLSKeyString()}, }, }) } if len(tc.Outputs) > 0 { typedVals := make([]*gnmi.TypedValue, 0, len(tc.Subscriptions)) for _, out := range tc.Outputs { typedVals = append(typedVals, &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: out}, }) } n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "outputs"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_LeaflistVal{ LeaflistVal: &gnmi.ScalarArray{ Element: typedVals, }, }, }, }) } if len(tc.Subscriptions) > 0 { typedVals := make([]*gnmi.TypedValue, 0, len(tc.Subscriptions)) for _, sub := range tc.Subscriptions { typedVals = append(typedVals, &gnmi.TypedValue{ Value: &gnmi.TypedValue_AsciiVal{AsciiVal: sub}, }) } n.Update = append(n.Update, &gnmi.Update{ Path: &gnmi.Path{ Elem: []*gnmi.PathElem{ {Name: "subscriptions"}, }, }, Val: &gnmi.TypedValue{ Value: &gnmi.TypedValue_LeaflistVal{ LeaflistVal: &gnmi.ScalarArray{ Element: typedVals, }, }, }, }) } return n } func (s *server) selectTargets(target string) (map[string]*types.TargetConfig, error) { if target == "" || target == "*" { return s.targets, nil } targetsNames := strings.Split(target, ",") targets := make(map[string]*types.TargetConfig) s.mu.RLock() defer s.mu.RUnlock() OUTER: for i := range targetsNames { for n, tc := range s.targets { if utils.GetHost(n) == targetsNames[i] { targets[n] = tc continue OUTER } } return nil, status.Errorf(codes.NotFound, "target %q is not known", targetsNames[i]) } return targets, nil } func (s *server) handlegNMIcInternalGet(_ context.Context, req *gnmi.GetRequest) (*gnmi.GetResponse, error) { if len(req.GetPath()) > 1 { return nil, status.Errorf(codes.InvalidArgument, "only one path at a time is supported") } if req.GetPath()[0].Elem[0].Name == "targets" { notifs := make([]*gnmi.Notification, 0, len(s.targets)) for _, tc := range s.targets { notifs = append(notifs, targetConfigToNotification(tc)) } return &gnmi.GetResponse{Notification: notifs}, nil } return nil, status.Errorf(codes.InvalidArgument, "unknown path") } ================================================ FILE: pkg/outputs/gnmi_output/gnmi_server_set.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package gnmi_output import ( "context" "fmt" "sync" "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/target" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" ) func (s *server) Set(ctx context.Context, req *gnmi.SetRequest) (*gnmi.SetResponse, error) { ok := s.unaryRPCsem.TryAcquire(1) if !ok { return nil, status.Errorf(codes.ResourceExhausted, "max number of Unary RPC reached") } defer s.unaryRPCsem.Release(1) numUpdates := len(req.GetUpdate()) numReplaces := len(req.GetReplace()) numDeletes := len(req.GetDelete()) if numUpdates+numReplaces+numDeletes == 0 { return nil, status.Errorf(codes.InvalidArgument, "missing update/replace/delete path(s)") } s.mu.RLock() defer s.mu.RUnlock() targetName := req.GetPrefix().GetTarget() peer, _ := peer.FromContext(ctx) s.l.Printf("received Set request from %q to target %q", peer.Addr, targetName) targets, err := s.selectTargets(targetName) if err != nil { return nil, err } numTargets := len(targets) if numTargets == 0 { return nil, status.Errorf(codes.NotFound, "unknown target(s) %q", targetName) } results := make(chan *gnmi.UpdateResult) errChan := make(chan error, numTargets) response := &gnmi.SetResponse{ // assume one update per target, per update/replace/delete Response: make([]*gnmi.UpdateResult, 0, numTargets*(numUpdates+numReplaces+numDeletes)), } done := make(chan struct{}) ctx, cancel := context.WithCancel(ctx) defer cancel() go func() { for { select { case upd, ok := <-results: if !ok { response.Timestamp = time.Now().UnixNano() close(done) return } response.Response = append(response.Response, upd) case <-ctx.Done(): return } } }() wg := new(sync.WaitGroup) wg.Add(numTargets) for name, tc := range targets { go func(name string, tc *types.TargetConfig) { name = utils.GetHost(name) defer wg.Done() t := target.NewTarget(tc) err := t.CreateGNMIClient(ctx) if err != nil { s.l.Printf("target %q err: %v", name, err) errChan <- fmt.Errorf("target %q err: %v", name, err) return } creq := proto.Clone(req).(*gnmi.SetRequest) if creq.GetPrefix() == nil { creq.Prefix = new(gnmi.Path) } if creq.GetPrefix().GetTarget() == "" || creq.GetPrefix().GetTarget() == "*" { creq.Prefix.Target = name } res, err := t.Set(ctx, creq) if err != nil { s.l.Printf("target %q err: %v", name, err) errChan <- fmt.Errorf("target %q err: %v", name, err) return } for _, upd := range res.GetResponse() { upd.Path.Target = name results <- upd } }(name, tc) } wg.Wait() close(results) close(errChan) for err := range errChan { if err != nil { return nil, status.Errorf(codes.Internal, "%v", err) } } <-done s.l.Printf("sending SetResponse to %q: %+v", peer.Addr, response) return response, nil } ================================================ FILE: pkg/outputs/gnmi_output/gnmi_server_subscribe.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package gnmi_output import ( "fmt" "io" "strings" "google.golang.org/grpc/codes" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" "github.com/openconfig/gnmi/coalesce" "github.com/openconfig/gnmi/proto/gnmi" ) func (s *server) Subscribe(stream gnmi.GNMI_SubscribeServer) error { sc := &streamClient{ stream: stream, } var err error sc.req, err = stream.Recv() switch { case err == io.EOF: return nil case err != nil: return err case sc.req.GetSubscribe() == nil: return status.Errorf(codes.InvalidArgument, "the subscribe request must contain a subscription definition") } sc.target = sc.req.GetSubscribe().GetPrefix().GetTarget() if sc.target == "" { sc.target = "*" sub := sc.req.GetSubscribe() if sub.GetPrefix() == nil { sub.Prefix = &gnmi.Path{Target: "*"} } else { sub.Prefix.Target = "*" } } if !s.c.HasTarget(sc.target) { return status.Errorf(codes.NotFound, "target %q not found", sc.target) } peer, _ := peer.FromContext(stream.Context()) s.l.Printf("received a subscribe request mode=%v from %q for target %q", sc.req.GetSubscribe().GetMode(), peer.Addr, sc.target) defer s.l.Printf("subscription from peer %q terminated", peer.Addr) sc.queue = coalesce.NewQueue() errChan := make(chan error, 3) sc.errChan = errChan s.l.Printf("acquiring subscription spot for target %q", sc.target) ok := s.subscribeRPCsem.TryAcquire(1) if !ok { return status.Errorf(codes.ResourceExhausted, "could not acquire a subscription spot") } s.l.Printf("acquired subscription spot for target %q", sc.target) switch sc.req.GetSubscribe().GetMode() { case gnmi.SubscriptionList_ONCE: go func() { s.handleSubscriptionRequest(sc) sc.queue.Close() }() case gnmi.SubscriptionList_POLL: go s.handlePolledSubscription(sc) case gnmi.SubscriptionList_STREAM: if sc.req.GetSubscribe().GetUpdatesOnly() { sc.queue.Insert(syncMarker{}) } remove := addSubscription(s.m, sc.req.GetSubscribe(), &matchClient{queue: sc.queue}) defer remove() if !sc.req.GetSubscribe().GetUpdatesOnly() { go s.handleSubscriptionRequest(sc) } default: return status.Errorf(codes.InvalidArgument, "unrecognized subscription mode: %v", sc.req.GetSubscribe().GetMode()) } // send all nodes added to queue go s.sendStreamingResults(sc) var errs = make([]error, 0) for err := range errChan { errs = append(errs, err) } if len(errs) > 0 { sb := strings.Builder{} sb.WriteString("multiple errors occurred:\n") for _, err := range errs { sb.WriteString(fmt.Sprintf("- %v\n", err)) } return fmt.Errorf("%v", sb) } return nil } ================================================ FILE: pkg/outputs/influxdb_output/influxdb_cache.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package influxdb_output import ( "context" "fmt" "time" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/cache" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/outputs" ) func (i *influxDBOutput) initCache(ctx context.Context, name string) error { var err error cfg := i.cfg.Load() if cfg == nil { return fmt.Errorf("config is nil") } i.gnmiCache, err = cache.New(cfg.CacheConfig, cache.WithLogger(i.logger)) if err != nil { return err } i.cacheTicker = time.NewTicker(cfg.CacheFlushTimer) i.done = make(chan struct{}) go i.runCache(ctx, name) return nil } func (i *influxDBOutput) stopCache() { i.cacheTicker.Stop() close(i.done) i.gnmiCache.Stop() } func (i *influxDBOutput) runCache(ctx context.Context, name string) { for { select { case <-ctx.Done(): return case <-i.done: return case <-i.cacheTicker.C: cfg := i.cfg.Load() if cfg == nil { continue } if cfg.Debug { i.logger.Printf("cache timer tick") } i.readCache(ctx) } } } func (i *influxDBOutput) readCache(ctx context.Context) { notifications, err := i.gnmiCache.ReadAll() if err != nil { i.logger.Printf("failed to read from cache: %v", err) return } cfg := i.cfg.Load() dc := i.dynCfg.Load() if cfg == nil || dc == nil { return } if cfg.Debug { i.logger.Printf("read notifications: %+v", notifications) } events := make([]*formatters.EventMsg, 0, len(notifications)) for subName, notifs := range notifications { // build events without processors for _, notif := range notifs { ievents, err := formatters.ResponseToEventMsgs(subName, &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{Update: notif}, }, outputs.Meta{"subscription-name": subName}) if err != nil { i.logger.Printf("failed to convert gNMI notifications to events: %v", err) return } events = append(events, ievents...) } } for _, proc := range dc.evps { events = proc.Apply(events...) } resetChan := i.reset.Load() if resetChan == nil { return } for _, ev := range events { select { case <-ctx.Done(): return case <-*resetChan: return case i.eventChan <- ev: } } } func cacheCfgEqual(a, b *cache.Config) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } // Compare the fields you actually use; example: return a.Type == b.Type && a.Expiration == b.Expiration && a.Debug == b.Debug && a.Address == b.Address && a.Timeout == b.Timeout && a.Username == b.Username && a.Password == b.Password && a.MaxBytes == b.MaxBytes && a.MaxMsgsPerSubscription == b.MaxMsgsPerSubscription && a.FetchBatchSize == b.FetchBatchSize && a.FetchWaitTime == b.FetchWaitTime } ================================================ FILE: pkg/outputs/influxdb_output/influxdb_output.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package influxdb_output import ( "context" "crypto/tls" "encoding/json" "fmt" "log" "maps" "math" "net/url" "os" "slices" "strings" "sync/atomic" "text/template" "time" "google.golang.org/protobuf/proto" influxdb2 "github.com/influxdata/influxdb-client-go/v2" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/cache" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/outputs" gutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( defaultURL = "http://localhost:8086" defaultBatchSize = 1000 defaultFlushTimer = 10 * time.Second minHealthCheckPeriod = 30 * time.Second defaultCacheFlushTimer = 5 * time.Second numWorkers = 1 loggingPrefix = "[influxdb_output:%s] " deleteTagValue = "true" ) func init() { outputs.Register("influxdb", func() outputs.Output { return &influxDBOutput{} }) } type influxDBOutput struct { outputs.BaseOutput cfg *atomic.Pointer[Config] dynCfg *atomic.Pointer[dynConfig] client *atomic.Pointer[influxdb2.Client] logger *log.Logger cancelFn context.CancelFunc eventChan chan *formatters.EventMsg reset *atomic.Pointer[chan struct{}] startSig *atomic.Pointer[chan struct{}] wasUP atomic.Bool dbVersion atomic.Value // stores string gnmiCache cache.Cache cacheTicker *time.Ticker done chan struct{} store store.Store[any] healthCancel context.CancelFunc } func (i *influxDBOutput) init() { i.cfg = new(atomic.Pointer[Config]) i.dynCfg = new(atomic.Pointer[dynConfig]) i.client = new(atomic.Pointer[influxdb2.Client]) i.eventChan = make(chan *formatters.EventMsg) i.reset = new(atomic.Pointer[chan struct{}]) i.startSig = new(atomic.Pointer[chan struct{}]) i.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) } type dynConfig struct { targetTpl *template.Template evps []formatters.EventProcessor } type Config struct { Name string `mapstructure:"name,omitempty"` URL string `mapstructure:"url,omitempty"` Org string `mapstructure:"org,omitempty"` Bucket string `mapstructure:"bucket,omitempty"` Token string `mapstructure:"token,omitempty"` BatchSize uint `mapstructure:"batch-size,omitempty"` FlushTimer time.Duration `mapstructure:"flush-timer,omitempty"` UseGzip bool `mapstructure:"use-gzip,omitempty"` EnableTLS bool `mapstructure:"enable-tls,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty" json:"tls,omitempty"` HealthCheckPeriod time.Duration `mapstructure:"health-check-period,omitempty"` Debug bool `mapstructure:"debug,omitempty"` AddTarget string `mapstructure:"add-target,omitempty"` TargetTemplate string `mapstructure:"target-template,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty"` OverrideTimestamps bool `mapstructure:"override-timestamps,omitempty"` TimestampPrecision string `mapstructure:"timestamp-precision,omitempty"` CacheConfig *cache.Config `mapstructure:"cache,omitempty"` CacheFlushTimer time.Duration `mapstructure:"cache-flush-timer,omitempty"` DeleteTag string `mapstructure:"delete-tag,omitempty"` } func (k *influxDBOutput) String() string { cfg := k.cfg.Load() if cfg == nil { return "" } b, err := json.Marshal(cfg) if err != nil { return "" } return string(b) } func (i *influxDBOutput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := gutils.GetConfigMaps(i.store) if err != nil { return nil, err } evps, err := formatters.MakeEventProcessors( logger, eventProcessors, ps, tcs, acts, ) if err != nil { return nil, err } return evps, nil } func (i *influxDBOutput) setLogger(logger *log.Logger) { if logger != nil && i.logger != nil { i.logger.SetOutput(logger.Writer()) i.logger.SetFlags(logger.Flags()) } } func (i *influxDBOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error { i.init() // init struct fields newCfg := new(Config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } i.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name)) options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } i.store = options.Store if newCfg.Name == "" { newCfg.Name = name } // apply logger i.setLogger(options.Logger) // set defaults i.setDefaultsFor(newCfg) if _, err := url.Parse(newCfg.URL); err != nil { return fmt.Errorf("invalid url: %w", err) } // store config i.cfg.Store(newCfg) // build dynamic config dc := new(dynConfig) // initialize event processors dc.evps, err = i.buildEventProcessors(options.Logger, newCfg.EventProcessors) if err != nil { return err } // initialize template if newCfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } i.dynCfg.Store(dc) // initialize cache if newCfg.CacheConfig != nil { err = i.initCache(ctx, name) if err != nil { return err } } // initialize reset and startSig channels resetChan := make(chan struct{}) i.reset.Store(&resetChan) startSigChan := make(chan struct{}) i.startSig.Store(&startSigChan) ctx, i.cancelFn = context.WithCancel(ctx) influxOpts, err := clientOptsFor(newCfg) if err != nil { return err } // initialize influxdb client CRCLIENT: if ctx.Err() != nil { return ctx.Err() } newClient := influxdb2.NewClientWithOptions(newCfg.URL, newCfg.Token, influxOpts) i.client.Store(&newClient) // start influx health check if newCfg.HealthCheckPeriod > 0 { err = i.health(ctx) if err != nil { i.logger.Printf("failed to check influxdb health: %v", err) time.Sleep(2 * time.Second) goto CRCLIENT } hcCtx, hcCancel := context.WithCancel(ctx) i.healthCancel = hcCancel go i.healthCheck(hcCtx) } i.wasUP.Store(true) i.logger.Printf("initialized influxdb client: %s", i.String()) for k := 0; k < numWorkers; k++ { go i.worker(ctx, k) } go func() { <-ctx.Done() i.Close() }() return nil } func (i *influxDBOutput) setDefaultsFor(c *Config) { if c.URL == "" { c.URL = defaultURL } if c.BatchSize == 0 { c.BatchSize = defaultBatchSize } if c.FlushTimer == 0 { c.FlushTimer = defaultFlushTimer } if c.HealthCheckPeriod != 0 && c.HealthCheckPeriod < minHealthCheckPeriod { c.HealthCheckPeriod = minHealthCheckPeriod } if c.CacheConfig != nil { if c.CacheFlushTimer == 0 { c.CacheFlushTimer = defaultCacheFlushTimer } } } // Build influx options from an arbitrary config (no side effects on i.cfg) func clientOptsFor(c *Config) (*influxdb2.Options, error) { iopts := influxdb2.DefaultOptions(). SetUseGZip(c.UseGzip). SetBatchSize(c.BatchSize). SetFlushInterval(uint(c.FlushTimer.Milliseconds())) // TLS from explicit TLS config if c.TLS != nil { tlsConfig, err := utils.NewTLSConfig( c.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile, "", c.TLS.SkipVerify, false, ) if err != nil { return nil, err } iopts.SetTLSConfig(tlsConfig) } // Legacy "EnableTLS" flag (insecure) if c.EnableTLS { iopts.SetTLSConfig(&tls.Config{InsecureSkipVerify: true}) } switch c.TimestampPrecision { case "s": iopts.SetPrecision(time.Second) case "ms": iopts.SetPrecision(time.Millisecond) case "us": iopts.SetPrecision(time.Microsecond) } if c.Debug { iopts.SetLogLevel(3) } return iopts, nil } func (i *influxDBOutput) Validate(cfg map[string]any) error { ncfg := new(Config) err := outputs.DecodeConfig(cfg, ncfg) if err != nil { return err } if _, err := url.Parse(ncfg.URL); err != nil { return fmt.Errorf("invalid url: %w", err) } _, err = gtemplate.CreateTemplate("target-template", ncfg.TargetTemplate) if err != nil { return err } return nil } func (i *influxDBOutput) Update(ctx context.Context, cfg map[string]any) error { newCfg := new(Config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } currCfg := i.cfg.Load() if newCfg.Name == "" && currCfg != nil { newCfg.Name = currCfg.Name } i.setDefaultsFor(newCfg) // check if event processors changed rebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 // rebuild dynamic config dc := new(dynConfig) // rebuild templates if newCfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { t, err := gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = t.Funcs(outputs.TemplateFuncs) } else { dc.targetTpl = outputs.DefaultTargetTemplate } // rebuild event processors if needed prevDC := i.dynCfg.Load() if rebuildProcessors { dc.evps, err = i.buildEventProcessors(i.logger, newCfg.EventProcessors) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } // store new dynamic config i.dynCfg.Store(dc) // store new config i.cfg.Store(newCfg) // check if client needs rebuild needsClientRebuild := clientNeedsRebuild(currCfg, newCfg) if needsClientRebuild { // rebuild influxdb client options iopts, err := clientOptsFor(newCfg) if err != nil { return err } // rebuild influxdb client newClient := influxdb2.NewClientWithOptions(newCfg.URL, newCfg.Token, iopts) // health check if enabled if newCfg.HealthCheckPeriod > 0 { if _, err := newClient.Health(ctx); err != nil { // do not return error, continue i.logger.Printf("update: influx health probe failed (continuing): %v", err) } } // swap client oldClientPtr := i.client.Swap(&newClient) oldClient := *oldClientPtr // close old client if oldClient != nil { oldClient.Close() } // signal workers to rebuild their write APIs oldReset := i.reset.Load() newResetChan := make(chan struct{}) i.reset.Store(&newResetChan) close(*oldReset) } // cache toggle oldHadCache := currCfg != nil && currCfg.CacheConfig != nil newHasCache := newCfg.CacheConfig != nil switch { case oldHadCache && !newHasCache: // stop old cache if present i.stopCache() case !oldHadCache && newHasCache: // init new cache if requested if err := i.initCache(ctx, newCfg.Name); err != nil { return err } case oldHadCache && newHasCache: // check if cache config changed sameCacheConfig := cacheCfgEqual(currCfg.CacheConfig, newCfg.CacheConfig) if sameCacheConfig { if currCfg.CacheFlushTimer != newCfg.CacheFlushTimer { // change flush timer if i.cacheTicker != nil { i.cacheTicker.Stop() } i.cacheTicker = time.NewTicker(newCfg.CacheFlushTimer) } } else { // cache config changed, stop old cache and init new cache i.stopCache() if err := i.initCache(ctx, newCfg.Name); err != nil { return err } } } // handle health check changes oldPeriod := time.Duration(0) if currCfg != nil { oldPeriod = currCfg.HealthCheckPeriod } newPeriod := newCfg.HealthCheckPeriod periodChanged := oldPeriod != newPeriod enabledChanged := (oldPeriod == 0) != (newPeriod == 0) if enabledChanged || periodChanged { if i.healthCancel != nil { i.healthCancel() i.healthCancel = nil } if newPeriod > 0 { _ = i.health(ctx) hcCtx, hcCancel := context.WithCancel(ctx) i.healthCancel = hcCancel go i.healthCheck(hcCtx) } } i.logger.Printf("updated influxdb output: %s", i.String()) return nil } func (i *influxDBOutput) UpdateProcessor(name string, pcfg map[string]any) error { cfg := i.cfg.Load() dc := i.dynCfg.Load() newEvps, changed, err := outputs.UpdateProcessorInSlice( i.logger, i.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps i.dynCfg.Store(&newDC) i.logger.Printf("updated event processor %s", name) } return nil } func (i *influxDBOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) { if rsp == nil { return } cfg := i.cfg.Load() dc := i.dynCfg.Load() resetChan := i.reset.Load() if cfg == nil || dc == nil || resetChan == nil { return } var err error rsp, err = outputs.AddSubscriptionTarget(rsp, meta, cfg.AddTarget, dc.targetTpl) if err != nil { i.logger.Printf("failed to add target to the response: %v", err) } switch rsp := rsp.(type) { case *gnmi.SubscribeResponse: measName := "default" if subName, ok := meta["subscription-name"]; ok { measName = subName } if i.gnmiCache != nil { i.gnmiCache.Write(ctx, measName, rsp) return } events, err := formatters.ResponseToEventMsgs(measName, rsp, meta, dc.evps...) if err != nil { i.logger.Printf("failed to convert message to event: %v", err) return } for _, ev := range events { select { case <-ctx.Done(): return case <-*resetChan: return case i.eventChan <- ev: } } } } func (i *influxDBOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) { dc := i.dynCfg.Load() resetChan := i.reset.Load() if dc == nil || resetChan == nil { return } select { case <-ctx.Done(): return case <-*resetChan: return default: var evs = []*formatters.EventMsg{ev} for _, proc := range dc.evps { evs = proc.Apply(evs...) } for _, pev := range evs { i.eventChan <- pev } } } func (i *influxDBOutput) Close() error { i.logger.Printf("closing client...") cfg := i.cfg.Load() if cfg != nil && cfg.CacheConfig != nil { i.stopCache() } if i.healthCancel != nil { i.healthCancel() i.healthCancel = nil } i.cancelFn() clientPtr := i.client.Load() if *clientPtr != nil { (*clientPtr).Close() } reset := i.reset.Load() if reset != nil { select { case <-*reset: default: close(*reset) // unblock Write() and WriteEvent() } } i.logger.Printf("closed.") return nil } func (i *influxDBOutput) healthCheck(ctx context.Context) { cfg := i.cfg.Load() if cfg == nil { return } ticker := time.NewTicker(cfg.HealthCheckPeriod) defer ticker.Stop() for { select { case <-ctx.Done(): return case <-ticker.C: i.health(ctx) } } } func (i *influxDBOutput) health(ctx context.Context) error { clientPtr := i.client.Load() if clientPtr == nil || *clientPtr == nil { return fmt.Errorf("client not initialized") } res, err := (*clientPtr).Health(ctx) if err != nil { i.logger.Printf("failed health check: %v", err) if i.wasUP.Load() { oldReset := i.reset.Load() newResetChan := make(chan struct{}) i.reset.Store(&newResetChan) close(*oldReset) } return err } if res != nil { if res.Version != nil { i.dbVersion.Store(*res.Version) } b, err := json.Marshal(res) if err != nil { i.logger.Printf("failed to marshal health check result: %v", err) i.logger.Printf("health check result: %+v", res) if i.wasUP.Load() { oldReset := i.reset.Load() newResetChan := make(chan struct{}) i.reset.Store(&newResetChan) close(*oldReset) } return err } i.wasUP.Store(true) oldStartSig := i.startSig.Load() newStartSigChan := make(chan struct{}) i.startSig.Store(&newStartSigChan) close(*oldStartSig) i.logger.Printf("health check result: %s", string(b)) return nil } i.wasUP.Store(true) oldStartSig := i.startSig.Load() newStartSigChan := make(chan struct{}) i.startSig.Store(&newStartSigChan) close(*oldStartSig) i.logger.Print("health check result is nil") return nil } func (i *influxDBOutput) worker(ctx context.Context, idx int) { firstStart := true START: if ctx.Err() != nil { i.logger.Printf("worker-%d err=%v", idx, ctx.Err()) return } cfg := i.cfg.Load() if cfg == nil { i.logger.Printf("worker-%d: config not initialized", idx) return } if !firstStart && cfg.HealthCheckPeriod > 0 { i.logger.Printf("worker-%d waiting for client recovery", idx) startSigChan := i.startSig.Load() if startSigChan != nil { <-*startSigChan } } i.logger.Printf("starting worker-%d", idx) clientPtr := i.client.Load() if clientPtr == nil || *clientPtr == nil { i.logger.Printf("worker-%d: client not initialized", idx) return } client := *clientPtr resetChan := i.reset.Load() for { select { case <-ctx.Done(): if ctx.Err() != nil { i.logger.Printf("worker-%d err=%v", idx, ctx.Err()) } i.logger.Printf("worker-%d terminating...", idx) return case ev := <-i.eventChan: // Reload config for each event to get fresh values cfg := i.cfg.Load() if cfg == nil { continue } if len(ev.Values) == 0 && len(ev.Deletes) == 0 { continue } if len(ev.Values) == 0 && cfg.DeleteTag == "" { continue } for n, v := range ev.Values { switch v := v.(type) { //lint:ignore SA1019 still need DecimalVal for backward compatibility case *gnmi.Decimal64: ev.Values[n] = float64(v.Digits) / math.Pow10(int(v.Precision)) } } if ev.Timestamp == 0 || cfg.OverrideTimestamps { ev.Timestamp = time.Now().UnixNano() } if subscriptionName, ok := ev.Tags["subscription-name"]; ok { ev.Name = subscriptionName delete(ev.Tags, "subscription-name") } if len(ev.Values) > 0 { i.convertUints(ev) client.WriteAPI(cfg.Org, cfg.Bucket). WritePoint(influxdb2.NewPoint(ev.Name, ev.Tags, ev.Values, time.Unix(0, ev.Timestamp))) } if len(ev.Deletes) > 0 && cfg.DeleteTag != "" { tags := make(map[string]string, len(ev.Tags)) maps.Copy(tags, ev.Tags) tags[cfg.DeleteTag] = deleteTagValue values := make(map[string]any, len(ev.Deletes)) for _, del := range ev.Deletes { values[del] = "" } client.WriteAPI(cfg.Org, cfg.Bucket). WritePoint(influxdb2.NewPoint(ev.Name, tags, values, time.Unix(0, ev.Timestamp))) } case <-*resetChan: firstStart = false i.logger.Printf("resetting worker-%d...", idx) goto START case err := <-client.WriteAPI(cfg.Org, cfg.Bucket).Errors(): i.logger.Printf("worker-%d write error: %v", idx, err) } } } func (i *influxDBOutput) convertUints(ev *formatters.EventMsg) { dbVer := i.dbVersion.Load() if dbVer == nil { return } dbVersion, ok := dbVer.(string) if !ok || !strings.HasPrefix(dbVersion, "1.8") { return } for k, v := range ev.Values { switch v := v.(type) { case uint: ev.Values[k] = int(v) case uint8: ev.Values[k] = int(v) case uint16: ev.Values[k] = int(v) case uint32: ev.Values[k] = int(v) case uint64: ev.Values[k] = int(v) } } } func clientNeedsRebuild(old, new *Config) bool { if old == nil || new == nil { return true } return old.URL != new.URL || old.Token != new.Token || old.BatchSize != new.BatchSize || old.FlushTimer != new.FlushTimer || old.UseGzip != new.UseGzip || old.EnableTLS != new.EnableTLS || !old.TLS.Equal(new.TLS) || old.TimestampPrecision != new.TimestampPrecision || old.Debug != new.Debug } ================================================ FILE: pkg/outputs/kafka_output/kafka_metrics.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package kafka_output import ( "sync" "github.com/prometheus/client_golang/prometheus" ) var registerMetricsOnce sync.Once var kafkaNumberOfSentMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "kafka_output", Name: "number_of_kafka_msgs_sent_success_total", Help: "Number of msgs successfully sent by gnmic kafka output", }, []string{"name", "producer_id"}) var kafkaNumberOfSentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "kafka_output", Name: "number_of_written_kafka_bytes_total", Help: "Number of bytes written by gnmic kafka output", }, []string{"name", "producer_id"}) var kafkaNumberOfFailSendMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "kafka_output", Name: "number_of_kafka_msgs_sent_fail_total", Help: "Number of failed msgs sent by gnmic kafka output", }, []string{"name", "producer_id", "reason"}) var kafkaSendDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "kafka_output", Name: "msg_send_duration_ns", Help: "gnmic kafka output send duration in ns", }, []string{"name", "producer_id"}) func (k *kafkaOutput) initMetrics(name string) { kafkaNumberOfSentMsgs.WithLabelValues(name, "").Add(0) kafkaNumberOfSentBytes.WithLabelValues(name, "").Add(0) kafkaNumberOfFailSendMsgs.WithLabelValues(name, "", "").Add(0) kafkaSendDuration.WithLabelValues(name, "").Set(0) } func (k *kafkaOutput) registerMetrics() error { cfg := k.cfg.Load() if cfg == nil { return nil } if !cfg.EnableMetrics { return nil } if k.reg == nil { k.logger.Printf("ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`") return nil } var err error registerMetricsOnce.Do(func() { if err = k.reg.Register(kafkaNumberOfSentMsgs); err != nil { return } if err = k.reg.Register(kafkaNumberOfSentBytes); err != nil { return } if err = k.reg.Register(kafkaNumberOfFailSendMsgs); err != nil { return } if err = k.reg.Register(kafkaSendDuration); err != nil { return } }) k.initMetrics(cfg.Name) return err } ================================================ FILE: pkg/outputs/kafka_output/kafka_output.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package kafka_output import ( "context" "encoding/json" "errors" "fmt" "io" "log" "os" "slices" "strings" "sync" "sync/atomic" "text/template" "time" "github.com/IBM/sarama" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/outputs" pkgutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( defaultKafkaMaxRetry = 2 defaultKafkaTimeout = 5 * time.Second defaultKafkaTopic = "telemetry" defaultNumWorkers = 1 defaultFormat = "event" defaultRecoveryWaitTime = 10 * time.Second defaultAddress = "localhost:9092" loggingPrefixTpl = "[kafka_output:%s] " defaultCompressionCodec = sarama.CompressionNone requiredAcksNoResponse = "no-response" requiredAcksWaitForLocal = "wait-for-local" requiredAcksWaitForAll = "wait-for-all" ) var stringBuilderPool = sync.Pool{ New: func() any { return new(strings.Builder) }, } func init() { outputs.Register("kafka", func() outputs.Output { return &kafkaOutput{} }) } func (k *kafkaOutput) init() { k.cfg = new(atomic.Pointer[config]) k.dynCfg = new(atomic.Pointer[dynConfig]) k.msgChan = new(atomic.Pointer[chan *outputs.ProtoMsg]) k.wg = new(sync.WaitGroup) k.logger = log.New(io.Discard, loggingPrefixTpl, utils.DefaultLoggingFlags) k.closeOnce = sync.Once{} k.closeSig = make(chan struct{}) } // kafkaOutput // type kafkaOutput struct { outputs.BaseOutput cfg *atomic.Pointer[config] dynCfg *atomic.Pointer[dynConfig] logger sarama.StdLogger srcLogger *log.Logger msgChan *atomic.Pointer[chan *outputs.ProtoMsg] wg *sync.WaitGroup rootCtx context.Context cancelFn context.CancelFunc reg *prometheus.Registry store store.Store[any] closeOnce sync.Once closeSig chan struct{} } type dynConfig struct { targetTpl *template.Template msgTpl *template.Template evps []formatters.EventProcessor mo *formatters.MarshalOptions } // config // type config struct { Address string `mapstructure:"address,omitempty"` Topic string `mapstructure:"topic,omitempty"` TopicPrefix string `mapstructure:"topic-prefix,omitempty"` Name string `mapstructure:"name,omitempty"` SASL *types.SASL `mapstructure:"sasl,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty"` MaxRetry int `mapstructure:"max-retry,omitempty"` Timeout time.Duration `mapstructure:"timeout,omitempty"` RecoveryWaitTime time.Duration `mapstructure:"recovery-wait-time,omitempty"` FlushFrequency time.Duration `mapstructure:"flush-frequency,omitempty"` SyncProducer bool `mapstructure:"sync-producer,omitempty"` RequiredAcks string `mapstructure:"required-acks,omitempty"` Format string `mapstructure:"format,omitempty"` InsertKey bool `mapstructure:"insert-key,omitempty"` AddTarget string `mapstructure:"add-target,omitempty"` TargetTemplate string `mapstructure:"target-template,omitempty"` MsgTemplate string `mapstructure:"msg-template,omitempty"` SplitEvents bool `mapstructure:"split-events,omitempty"` NumWorkers int `mapstructure:"num-workers,omitempty"` CompressionCodec string `mapstructure:"compression-codec,omitempty"` KafkaVersion string `mapstructure:"kafka-version,omitempty"` Debug bool `mapstructure:"debug,omitempty"` BufferSize int `mapstructure:"buffer-size,omitempty"` OverrideTimestamps bool `mapstructure:"override-timestamps,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty"` } func (k *kafkaOutput) String() string { cfg := k.cfg.Load() if cfg == nil { return "" } b, err := json.Marshal(cfg) if err != nil { return "" } return string(b) } func (k *kafkaOutput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := pkgutils.GetConfigMaps(k.store) if err != nil { return nil, err } evps, err := formatters.MakeEventProcessors( logger, eventProcessors, ps, tcs, acts, ) if err != nil { return nil, err } return evps, nil } // Init / func (k *kafkaOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error { k.init() // init struct fields newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } if newCfg.Name == "" { newCfg.Name = name } loggingPrefix := fmt.Sprintf(loggingPrefixTpl, newCfg.Name) options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } k.store = options.Store if options.Logger != nil { k.srcLogger = options.Logger sarama.Logger = log.New(options.Logger.Writer(), loggingPrefix, options.Logger.Flags()) k.logger = sarama.Logger } err = k.setDefaultsFor(newCfg) if err != nil { return err } // store config k.cfg.Store(newCfg) // initialize registry k.reg = options.Registry err = k.registerMetrics() if err != nil { return err } dc := new(dynConfig) // initialize event processors evps, err := k.buildEventProcessors(options.Logger, newCfg.EventProcessors) if err != nil { return err } dc.evps = evps newMsgChan := make(chan *outputs.ProtoMsg, uint(newCfg.BufferSize)) k.msgChan.Store(&newMsgChan) if newCfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } if newCfg.MsgTemplate != "" { dc.msgTpl, err = gtemplate.CreateTemplate("msg-template", newCfg.MsgTemplate) if err != nil { return err } dc.msgTpl = dc.msgTpl.Funcs(outputs.TemplateFuncs) } dc.mo = &formatters.MarshalOptions{ Format: newCfg.Format, OverrideTS: newCfg.OverrideTimestamps, } k.dynCfg.Store(dc) config, err := k.createConfigFor(newCfg) if err != nil { return err } k.rootCtx = ctx ctx, k.cancelFn = context.WithCancel(k.rootCtx) k.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { cfg := *config cfg.ClientID = fmt.Sprintf("%s-%d", config.ClientID, i) go k.worker(ctx, i, &cfg, *k.msgChan.Load()) } return nil } func (k *kafkaOutput) Validate(cfg map[string]any) error { ncfg := new(config) err := outputs.DecodeConfig(cfg, ncfg) if err != nil { return err } err = k.setDefaultsFor(ncfg) if err != nil { return err } _, err = gtemplate.CreateTemplate("target-template", ncfg.TargetTemplate) if err != nil { return err } _, err = gtemplate.CreateTemplate("msg-template", ncfg.MsgTemplate) if err != nil { return err } return nil } func (k *kafkaOutput) Update(ctx context.Context, cfg map[string]any) error { newCfg := new(config) if err := outputs.DecodeConfig(cfg, newCfg); err != nil { return err } currCfg := k.cfg.Load() if newCfg.Name == "" && currCfg != nil { newCfg.Name = currCfg.Name } err := k.setDefaultsFor(newCfg) if err != nil { return err } swapChannel := channelNeedsSwap(currCfg, newCfg) restartWorkers := needsWorkerRestart(currCfg, newCfg) rebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 var targetTpl *template.Template if newCfg.TargetTemplate == "" { targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { t, err := gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } targetTpl = t.Funcs(outputs.TemplateFuncs) } else { targetTpl = outputs.DefaultTargetTemplate } var msgTpl *template.Template if newCfg.MsgTemplate != "" { t, err := gtemplate.CreateTemplate("msg-template", newCfg.MsgTemplate) if err != nil { return err } msgTpl = t.Funcs(outputs.TemplateFuncs) } dc := &dynConfig{ targetTpl: targetTpl, msgTpl: msgTpl, mo: &formatters.MarshalOptions{ Format: newCfg.Format, OverrideTS: newCfg.OverrideTimestamps, }, } prevDC := k.dynCfg.Load() if rebuildProcessors { dc.evps, err = k.buildEventProcessors(log.New(os.Stderr, fmt.Sprintf(loggingPrefixTpl, newCfg.Name), utils.DefaultLoggingFlags), newCfg.EventProcessors) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } k.dynCfg.Store(dc) k.cfg.Store(newCfg) if swapChannel || restartWorkers { var newChan chan *outputs.ProtoMsg if swapChannel { newChan = make(chan *outputs.ProtoMsg, newCfg.BufferSize) } else { newChan = *k.msgChan.Load() } baseCfg, err := k.createConfigFor(newCfg) if err != nil { return err } runCtx, cancel := context.WithCancel(k.rootCtx) newWG := new(sync.WaitGroup) // save old pointers oldCancel := k.cancelFn oldWG := k.wg oldMsgChan := *k.msgChan.Load() // swap k.cancelFn = cancel k.wg = newWG k.msgChan.Store(&newChan) k.wg.Add(currCfg.NumWorkers) for i := 0; i < currCfg.NumWorkers; i++ { cfgCopy := *baseCfg cfgCopy.ClientID = fmt.Sprintf("%s-%d", baseCfg.ClientID, i) go k.worker(runCtx, i, &cfgCopy, newChan) } drainDone := make(chan struct{}) go func() { defer close(drainDone) for { select { case <-ctx.Done(): return case msg, ok := <-oldMsgChan: if !ok { return } select { case newChan <- msg: default: } default: return } } }() // wait for drain to complete <-drainDone // cancel old workers and loops if oldCancel != nil { oldCancel() } if oldWG != nil { oldWG.Wait() } } k.logger.Printf("updated kafka output: %s", k.String()) return nil } func (k *kafkaOutput) setDefaultsFor(cfg *config) error { if cfg.Format == "" { cfg.Format = defaultFormat } if !(cfg.Format == "event" || cfg.Format == "protojson" || cfg.Format == "prototext" || cfg.Format == "proto" || cfg.Format == "json") { return fmt.Errorf("unsupported output format '%s' for output type kafka", cfg.Format) } if cfg.Address == "" { cfg.Address = defaultAddress } if cfg.Topic == "" { cfg.Topic = defaultKafkaTopic } if cfg.MaxRetry == 0 { cfg.MaxRetry = defaultKafkaMaxRetry } if cfg.Timeout <= 0 { cfg.Timeout = defaultKafkaTimeout } if cfg.RecoveryWaitTime <= 0 { cfg.RecoveryWaitTime = defaultRecoveryWaitTime } if cfg.NumWorkers <= 0 { cfg.NumWorkers = defaultNumWorkers } if cfg.Name == "" { cfg.Name = "gnmic-" + uuid.New().String() } if cfg.SASL == nil { return nil } cfg.SASL.Mechanism = strings.ToUpper(cfg.SASL.Mechanism) switch cfg.SASL.Mechanism { case "": cfg.SASL.Mechanism = "PLAIN" case "OAUTHBEARER": if cfg.SASL.TokenURL == "" { return errors.New("missing token-url for kafka SASL mechanism OAUTHBEARER") } } switch cfg.RequiredAcks { case requiredAcksNoResponse: case requiredAcksWaitForLocal: case requiredAcksWaitForAll: case "": cfg.RequiredAcks = requiredAcksWaitForLocal default: return fmt.Errorf("unknown `required-acks` value %s: must be one of %q, %q or %q", cfg.RequiredAcks, requiredAcksNoResponse, requiredAcksWaitForLocal, requiredAcksWaitForAll) } return nil } func (k *kafkaOutput) UpdateProcessor(name string, pcfg map[string]any) error { cfg := k.cfg.Load() dc := k.dynCfg.Load() newEvps, changed, err := outputs.UpdateProcessorInSlice( k.srcLogger, k.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps k.dynCfg.Store(&newDC) k.logger.Printf("updated event processor %s", name) } return nil } // Write // func (k *kafkaOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) { currentCfg := k.cfg.Load() if rsp == nil { return } msgChan := *k.msgChan.Load() wctx, cancel := context.WithTimeout(ctx, currentCfg.Timeout) defer cancel() select { case <-ctx.Done(): return case msgChan <- outputs.NewProtoMsg(rsp, meta): case <-k.closeSig: return case <-wctx.Done(): if currentCfg.Debug { k.logger.Printf("writing expired after %s, Kafka output might not be initialized", currentCfg.Timeout) } if currentCfg.EnableMetrics { kafkaNumberOfFailSendMsgs.WithLabelValues(currentCfg.Name, currentCfg.Name, "timeout").Inc() } return } } func (k *kafkaOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {} // Close // func (k *kafkaOutput) Close() error { k.cancelFn() k.wg.Wait() k.closeOnce.Do(func() { close(k.closeSig) }) k.logger.Printf("closed kafka output: %s", k.String()) return nil } func (k *kafkaOutput) worker(ctx context.Context, idx int, kafkaCfg *sarama.Config, msgChan <-chan *outputs.ProtoMsg) { currentCfg := k.cfg.Load() if currentCfg.SyncProducer { k.syncProducerWorker(ctx, idx, kafkaCfg, msgChan) return } k.asyncProducerWorker(ctx, idx, kafkaCfg, msgChan) } func (k *kafkaOutput) asyncProducerWorker(ctx context.Context, idx int, kafkaCfg *sarama.Config, msgChan <-chan *outputs.ProtoMsg) { var producer sarama.AsyncProducer var err error defer k.wg.Done() workerLogPrefix := fmt.Sprintf("worker-%d", idx) k.logger.Printf("%s starting", workerLogPrefix) CRPROD: if ctx.Err() != nil { return } cfg := k.cfg.Load() producer, err = sarama.NewAsyncProducer(strings.Split(cfg.Address, ","), kafkaCfg) if err != nil { k.logger.Printf("%s failed to create kafka producer: %v", workerLogPrefix, err) time.Sleep(cfg.RecoveryWaitTime) goto CRPROD } defer producer.Close() k.logger.Printf("%s initialized kafka producer: %s", workerLogPrefix, k.String()) go func() { for { select { case <-ctx.Done(): return case msg, ok := <-producer.Successes(): if !ok { return } cfg := k.cfg.Load() if cfg.EnableMetrics { start, ok := msg.Metadata.(time.Time) if ok { kafkaSendDuration.WithLabelValues(cfg.Name, kafkaCfg.ClientID).Set(float64(time.Since(start).Nanoseconds())) } kafkaNumberOfSentMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID).Inc() kafkaNumberOfSentBytes.WithLabelValues(cfg.Name, kafkaCfg.ClientID).Add(float64(msg.Value.Length())) } case err, ok := <-producer.Errors(): if !ok { return } cfg := k.cfg.Load() if cfg.Debug { k.logger.Printf("%s failed to send a kafka msg to topic '%s': %v", workerLogPrefix, err.Msg.Topic, err.Err) } if cfg.EnableMetrics { kafkaNumberOfFailSendMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID, "send_error").Inc() } } } }() for { select { case <-ctx.Done(): k.logger.Printf("%s shutting down", workerLogPrefix) return case m, ok := <-msgChan: if !ok { return } pmsg := m.GetMsg() cfg := k.cfg.Load() dc := k.dynCfg.Load() pmsg, err = outputs.AddSubscriptionTarget(pmsg, m.GetMeta(), cfg.AddTarget, dc.targetTpl) if err != nil { k.logger.Printf("failed to add target to the response: %v", err) } bb, err := outputs.Marshal(pmsg, m.GetMeta(), dc.mo, cfg.SplitEvents, dc.evps...) if err != nil { if cfg.Debug { k.logger.Printf("%s failed marshaling proto msg: %v", workerLogPrefix, err) } if cfg.EnableMetrics { kafkaNumberOfFailSendMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID, "marshal_error").Inc() } continue } if len(bb) == 0 { continue } for _, b := range bb { if dc.msgTpl != nil { b, err = outputs.ExecTemplate(b, dc.msgTpl) if err != nil { if cfg.Debug { log.Printf("failed to execute template: %v", err) } kafkaNumberOfFailSendMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID, "template_error").Inc() continue } } topic := k.selectTopic(m.GetMeta()) msg := &sarama.ProducerMessage{ Topic: topic, Value: sarama.ByteEncoder(b), } if cfg.InsertKey { msg.Key = sarama.ByteEncoder(k.partitionKey(m.GetMeta())) } var start time.Time if cfg.EnableMetrics { start = time.Now() msg.Metadata = start } producer.Input() <- msg } } } } func (k *kafkaOutput) syncProducerWorker(ctx context.Context, idx int, kafkaCfg *sarama.Config, msgChan <-chan *outputs.ProtoMsg) { var producer sarama.SyncProducer var err error defer k.wg.Done() workerLogPrefix := fmt.Sprintf("worker-%d", idx) k.logger.Printf("%s starting", workerLogPrefix) CRPROD: cfg := k.cfg.Load() producer, err = sarama.NewSyncProducer(strings.Split(cfg.Address, ","), kafkaCfg) if err != nil { k.logger.Printf("%s failed to create kafka producer: %v", workerLogPrefix, err) time.Sleep(cfg.RecoveryWaitTime) goto CRPROD } defer producer.Close() k.logger.Printf("%s initialized kafka producer: %s", workerLogPrefix, k.String()) for { select { case <-ctx.Done(): k.logger.Printf("%s shutting down", workerLogPrefix) return case m, ok := <-msgChan: if !ok { return } pmsg := m.GetMsg() cfg := k.cfg.Load() dc := k.dynCfg.Load() pmsg, err = outputs.AddSubscriptionTarget(pmsg, m.GetMeta(), cfg.AddTarget, dc.targetTpl) if err != nil { k.logger.Printf("failed to add target to the response: %v", err) } bb, err := outputs.Marshal(pmsg, m.GetMeta(), dc.mo, cfg.SplitEvents, dc.evps...) if err != nil { if cfg.Debug { k.logger.Printf("%s failed marshaling proto msg: %v", workerLogPrefix, err) } if cfg.EnableMetrics { kafkaNumberOfFailSendMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID, "marshal_error").Inc() } continue } if len(bb) == 0 { continue } for _, b := range bb { if dc.msgTpl != nil { b, err = outputs.ExecTemplate(b, dc.msgTpl) if err != nil { if cfg.Debug { log.Printf("failed to execute template: %v", err) } kafkaNumberOfFailSendMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID, "template_error").Inc() continue } } topic := k.selectTopic(m.GetMeta()) msg := &sarama.ProducerMessage{ Topic: topic, Value: sarama.ByteEncoder(b), } if cfg.InsertKey { msg.Key = sarama.ByteEncoder(k.partitionKey(m.GetMeta())) } var start time.Time if cfg.EnableMetrics { start = time.Now() } _, _, err = producer.SendMessage(msg) if err != nil { if cfg.Debug { k.logger.Printf("%s failed to send a kafka msg to topic '%s': %v", workerLogPrefix, topic, err) } if cfg.EnableMetrics { kafkaNumberOfFailSendMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID, "send_error").Inc() } producer.Close() time.Sleep(cfg.RecoveryWaitTime) goto CRPROD } if cfg.EnableMetrics { kafkaSendDuration.WithLabelValues(cfg.Name, kafkaCfg.ClientID).Set(float64(time.Since(start).Nanoseconds())) kafkaNumberOfSentMsgs.WithLabelValues(cfg.Name, kafkaCfg.ClientID).Inc() kafkaNumberOfSentBytes.WithLabelValues(cfg.Name, kafkaCfg.ClientID).Add(float64(len(b))) } } } } } func (k *kafkaOutput) createConfigFor(c *config) (*sarama.Config, error) { cfg := sarama.NewConfig() cfg.ClientID = c.Name if c.KafkaVersion != "" { var err error cfg.Version, err = sarama.ParseKafkaVersion(c.KafkaVersion) if err != nil { return nil, err } } // SASL_PLAINTEXT or SASL_SSL if c.SASL != nil { cfg.Net.SASL.Enable = true cfg.Net.SASL.User = c.SASL.User cfg.Net.SASL.Password = c.SASL.Password cfg.Net.SASL.Mechanism = sarama.SASLMechanism(c.SASL.Mechanism) switch cfg.Net.SASL.Mechanism { case sarama.SASLTypeSCRAMSHA256: cfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA256} } case sarama.SASLTypeSCRAMSHA512: cfg.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: SHA512} } case sarama.SASLTypeOAuth: cfg.Net.SASL.TokenProvider = pkgutils.NewTokenProvider(cfg.Net.SASL.User, cfg.Net.SASL.Password, c.SASL.TokenURL) } } // SSL or SASL_SSL if c.TLS != nil { var err error cfg.Net.TLS.Enable = true cfg.Net.TLS.Config, err = utils.NewTLSConfig( c.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile, "", c.TLS.SkipVerify, false) if err != nil { return nil, err } } cfg.Producer.Retry.Max = c.MaxRetry cfg.Producer.Return.Successes = true cfg.Producer.Timeout = c.Timeout cfg.Producer.Flush.Frequency = c.FlushFrequency switch c.RequiredAcks { case requiredAcksNoResponse: case requiredAcksWaitForLocal: cfg.Producer.RequiredAcks = sarama.WaitForLocal case requiredAcksWaitForAll: cfg.Producer.RequiredAcks = sarama.WaitForAll } cfg.Metadata.Full = false switch c.CompressionCodec { case "gzip": cfg.Producer.Compression = sarama.CompressionGZIP case "snappy": cfg.Producer.Compression = sarama.CompressionSnappy case "zstd": cfg.Producer.Compression = sarama.CompressionZSTD case "lz4": cfg.Producer.Compression = sarama.CompressionLZ4 default: cfg.Producer.Compression = defaultCompressionCodec } return cfg, nil } func (k *kafkaOutput) partitionKey(m outputs.Meta) []byte { b := stringBuilderPool.Get().(*strings.Builder) defer func() { b.Reset() stringBuilderPool.Put(b) }() b.WriteString(m["source"]) b.WriteString(":::") b.WriteString(m["subscription-name"]) return []byte(b.String()) } func (k *kafkaOutput) selectTopic(m outputs.Meta) string { cfg := k.cfg.Load() if cfg.TopicPrefix == "" { return cfg.Topic } sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() sb.WriteString(cfg.TopicPrefix) if subname, ok := m["subscription-name"]; ok { sb.WriteString("_") sb.WriteString(subname) } if s, ok := m["source"]; ok { sb.WriteString("_") for _, r := range s { if r == ':' { sb.WriteRune('_') } else { sb.WriteRune(r) } } } return sb.String() } // config swap requirements: // decides if we need to rebuild sarama.Config and producers func needsProducerRestart(old, nw *config) bool { if old == nil || nw == nil { return true } // anything that maps into sarama.Config or producer type if old.Address != nw.Address || !old.TLS.Equal(nw.TLS) || !saslEq(old.SASL, nw.SASL) || old.KafkaVersion != nw.KafkaVersion || old.MaxRetry != nw.MaxRetry || old.Timeout != nw.Timeout || old.FlushFrequency != nw.FlushFrequency || old.RequiredAcks != nw.RequiredAcks || old.CompressionCodec != nw.CompressionCodec || old.SyncProducer != nw.SyncProducer || old.Name != nw.Name { return true } return false } func needsWorkerRestart(old, nw *config) bool { if old == nil || nw == nil { return true } // producer dependencies OR worker count change return needsProducerRestart(old, nw) || old.NumWorkers != nw.NumWorkers } func channelNeedsSwap(old, nw *config) bool { if old != nil && nw != nil { return old.BufferSize != nw.BufferSize } return true } func saslEq(a, b *types.SASL) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } return a.User == b.User && a.Password == b.Password && strings.EqualFold(a.Mechanism, b.Mechanism) && a.TokenURL == b.TokenURL } ================================================ FILE: pkg/outputs/kafka_output/kafka_scram_client.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package kafka_output import ( "crypto/sha256" "crypto/sha512" "hash" "github.com/xdg/scram" ) var SHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() } var SHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() } type XDGSCRAMClient struct { *scram.Client *scram.ClientConversation scram.HashGeneratorFcn } func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) if err != nil { return err } x.ClientConversation = x.Client.NewConversation() return nil } func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { response, err = x.ClientConversation.Step(challenge) return } func (x *XDGSCRAMClient) Done() bool { return x.ClientConversation.Done() } ================================================ FILE: pkg/outputs/nats_outputs/jetstream/jetstream_output.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package jetstream_output import ( "context" "encoding/json" "errors" "fmt" "io" "log" "net" "slices" "sort" "strings" "sync" "sync/atomic" "text/template" "time" "github.com/google/uuid" "github.com/nats-io/nats.go" "github.com/openconfig/gnmi/proto/gnmi" "github.com/prometheus/client_golang/prometheus" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/outputs" gutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( loggingPrefix = "[jetstream_output:%s] " defaultSubjectName = "telemetry" defaultFormat = "event" defaultAddress = "localhost:4222" natsConnectWait = 2 * time.Second defaultNumWorkers = 1 defaultWriteTimeout = 5 * time.Second ) func init() { outputs.Register("jetstream", func() outputs.Output { return &jetstreamOutput{} }) } type subjectFormat string const ( subjectFormat_Static = "static" subjectFormat_TargetSub = "target.subscription" subjectFormat_SubTarget = "subscription.target" subjectFormat_SubTargetPath = "subscription.target.path" subjectFormat_SubTargetPathWithKeys = "subscription.target.pathKeys" ) type config struct { Name string `mapstructure:"name,omitempty" json:"name,omitempty"` Address string `mapstructure:"address,omitempty" json:"address,omitempty"` Stream string `mapstructure:"stream,omitempty" json:"stream,omitempty"` Subject string `mapstructure:"subject,omitempty" json:"subject,omitempty"` SubjectFormat subjectFormat `mapstructure:"subject-format,omitempty" json:"subject-format,omitempty"` CreateStream *createStreamConfig `mapstructure:"create-stream,omitempty" json:"create-stream,omitempty"` Username string `mapstructure:"username,omitempty" json:"username,omitempty"` Password string `mapstructure:"password,omitempty" json:"password,omitempty"` ConnectTimeWait time.Duration `mapstructure:"connect-time-wait,omitempty" json:"connect-time-wait,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty" json:"tls,omitempty"` Format string `mapstructure:"format,omitempty" json:"format,omitempty"` SplitEvents bool `mapstructure:"split-events,omitempty" json:"split-events,omitempty"` AddTarget string `mapstructure:"add-target,omitempty" json:"add-target,omitempty"` TargetTemplate string `mapstructure:"target-template,omitempty" json:"target-template,omitempty"` MsgTemplate string `mapstructure:"msg-template,omitempty" json:"msg-template,omitempty"` OverrideTimestamps bool `mapstructure:"override-timestamps,omitempty" json:"override-timestamps,omitempty"` NumWorkers int `mapstructure:"num-workers,omitempty" json:"num-workers,omitempty"` WriteTimeout time.Duration `mapstructure:"write-timeout,omitempty" json:"write-timeout,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` BufferSize uint `mapstructure:"buffer-size,omitempty" json:"buffer-size,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty" json:"enable-metrics,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty" json:"event-processors,omitempty"` } type createStreamConfig struct { Description string `mapstructure:"description,omitempty" json:"description,omitempty"` Subjects []string `mapstructure:"subjects,omitempty" json:"subjects,omitempty"` Storage string `mapstructure:"storage,omitempty" json:"storage,omitempty"` Retention string `mapstructure:"retention-policy,omitempty" json:"retention-policy,omitempty"` MaxMsgs int64 `mapstructure:"max-msgs,omitempty" json:"max-msgs,omitempty"` MaxBytes int64 `mapstructure:"max-bytes,omitempty" json:"max-bytes,omitempty"` MaxAge time.Duration `mapstructure:"max-age,omitempty" json:"max-age,omitempty"` MaxMsgSize int32 `mapstructure:"max-msg-size,omitempty" json:"max-msg-size,omitempty"` } // jetstreamOutput // type jetstreamOutput struct { outputs.BaseOutput cfg *atomic.Pointer[config] rootCtx context.Context cancelFn context.CancelFunc msgChan *atomic.Pointer[chan *outputs.ProtoMsg] // atomic channel swaps // workers wait group wg *sync.WaitGroup // dynamic config items that don't need a worker restart dynCfg *atomic.Pointer[dynConfig] // metrics registry reg *prometheus.Registry // config store store store.Store[any] logger *log.Logger closeOnce sync.Once closeSig chan struct{} } type dynConfig struct { targetTpl *template.Template msgTpl *template.Template evps []formatters.EventProcessor mo *formatters.MarshalOptions } func (n *jetstreamOutput) init() { n.cfg = new(atomic.Pointer[config]) n.dynCfg = new(atomic.Pointer[dynConfig]) n.msgChan = new(atomic.Pointer[chan *outputs.ProtoMsg]) n.wg = new(sync.WaitGroup) n.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags) n.closeOnce = sync.Once{} n.closeSig = make(chan struct{}) } func (n *jetstreamOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error { n.init() // init struct fields ncfg := new(config) err := outputs.DecodeConfig(cfg, ncfg) if err != nil { return err } if ncfg.Name == "" { ncfg.Name = name } n.logger.SetPrefix(fmt.Sprintf(loggingPrefix, ncfg.Name)) options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } n.store = options.Store // set defaults err = n.setDefaultsFor(ncfg) if err != nil { return err } n.cfg.Store(ncfg) // apply logger n.setLogger(options.Logger) // initialize registry n.reg = options.Registry err = n.registerMetrics() if err != nil { return err } msgChan := make(chan *outputs.ProtoMsg, ncfg.BufferSize) n.msgChan.Store(&msgChan) // prep dynamic config dc := new(dynConfig) // initialize event processors evps, err := n.buildEventProcessors(options.Logger, ncfg.EventProcessors) if err != nil { return err } dc.evps = evps dc.mo = &formatters.MarshalOptions{ Format: ncfg.Format, OverrideTS: ncfg.OverrideTimestamps, } if ncfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if ncfg.AddTarget != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", ncfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } if ncfg.MsgTemplate != "" { dc.msgTpl, err = gtemplate.CreateTemplate("msg-template", ncfg.MsgTemplate) if err != nil { return err } dc.msgTpl = dc.msgTpl.Funcs(outputs.TemplateFuncs) } n.dynCfg.Store(dc) n.rootCtx = ctx // store root context var wctx context.Context wctx, n.cancelFn = context.WithCancel(n.rootCtx) // create worker context n.wg.Add(ncfg.NumWorkers) for i := 0; i < ncfg.NumWorkers; i++ { go n.worker(wctx, i) } return nil } func (n *jetstreamOutput) setDefaultsFor(cfg *config) error { if cfg.Stream == "" { return errors.New("missing stream name") } if cfg.Format == "" { cfg.Format = defaultFormat } if cfg.SubjectFormat == "" { cfg.SubjectFormat = subjectFormat_Static } switch cfg.SubjectFormat { case subjectFormat_Static, subjectFormat_TargetSub, subjectFormat_SubTarget, subjectFormat_SubTargetPath, subjectFormat_SubTargetPathWithKeys: default: return fmt.Errorf("unknown subject-format value: %v", cfg.SubjectFormat) } if cfg.Subject == "" { cfg.Subject = defaultSubjectName } if cfg.Address == "" { cfg.Address = defaultAddress } if cfg.ConnectTimeWait <= 0 { cfg.ConnectTimeWait = natsConnectWait } if cfg.Name == "" { cfg.Name = "gnmic-" + uuid.New().String() } if cfg.NumWorkers <= 0 { cfg.NumWorkers = defaultNumWorkers } if cfg.WriteTimeout <= 0 { cfg.WriteTimeout = defaultWriteTimeout } if cfg.CreateStream != nil { if len(cfg.CreateStream.Subjects) == 0 { cfg.CreateStream.Subjects = []string{fmt.Sprintf("%s.>", cfg.Stream)} } if cfg.CreateStream.Description == "" { cfg.CreateStream.Description = "created by gNMIc" } if cfg.CreateStream.Storage == "" { cfg.CreateStream.Storage = "memory" } if cfg.CreateStream.Retention == "" { cfg.CreateStream.Retention = "limits" } // Validate retention policy value if !isValidRetentionPolicy(cfg.CreateStream.Retention) { return fmt.Errorf("invalid retention-policy: %s (must be 'limits' or 'workqueue')", cfg.CreateStream.Retention) } return nil } return nil } func (n *jetstreamOutput) Validate(cfg map[string]any) error { ncfg := new(config) err := outputs.DecodeConfig(cfg, ncfg) if err != nil { return err } err = n.setDefaultsFor(ncfg) if err != nil { return err } _, err = gtemplate.CreateTemplate("target-template", ncfg.TargetTemplate) if err != nil { return err } _, err = gtemplate.CreateTemplate("msg-template", ncfg.MsgTemplate) if err != nil { return err } return nil } func (n *jetstreamOutput) Update(ctx context.Context, cfg map[string]any) error { newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } err = n.setDefaultsFor(newCfg) if err != nil { return err } currCfg := n.cfg.Load() swapChannel := channelNeedsSwap(currCfg, newCfg) restartWorkers := needsWorkerRestart(currCfg, newCfg) streamChanged := streamChanged(currCfg, newCfg) rebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 //rebuild var targetTpl *template.Template if newCfg.TargetTemplate == "" { targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { t, err := gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } targetTpl = t.Funcs(outputs.TemplateFuncs) } else { targetTpl = outputs.DefaultTargetTemplate } var msgTpl *template.Template if newCfg.MsgTemplate != "" { t, err := gtemplate.CreateTemplate("msg-template", newCfg.MsgTemplate) if err != nil { return err } msgTpl = t.Funcs(outputs.TemplateFuncs) } dc := &dynConfig{ targetTpl: targetTpl, msgTpl: msgTpl, mo: &formatters.MarshalOptions{ Format: newCfg.Format, OverrideTS: newCfg.OverrideTimestamps, }, } // rebuild processors ? prevDC := n.dynCfg.Load() if rebuildProcessors { dc.evps, err = n.buildEventProcessors(n.logger, newCfg.EventProcessors) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } // store new dynamic config n.dynCfg.Store(dc) // store new config n.cfg.Store(newCfg) if swapChannel || restartWorkers || streamChanged { var newChan chan *outputs.ProtoMsg if swapChannel { newChan = make(chan *outputs.ProtoMsg, newCfg.BufferSize) } else { newChan = *n.msgChan.Load() } runCtx, cancel := context.WithCancel(n.rootCtx) newWG := new(sync.WaitGroup) // save old pointers oldCancel := n.cancelFn oldWG := n.wg oldMsgChan := *n.msgChan.Load() // swap n.cancelFn = cancel n.wg = newWG n.msgChan.Store(&newChan) // restart workers n.wg.Add(currCfg.NumWorkers) for i := 0; i < currCfg.NumWorkers; i++ { go n.worker(runCtx, i) } // cancel old workers if oldCancel != nil { oldCancel() } if oldWG != nil { oldWG.Wait() } if swapChannel { // best effort drain old channel OUTER_LOOP: // break label for { select { case msg, ok := <-oldMsgChan: if !ok { break } select { case newChan <- msg: default: // new channel full, drop message } default: break OUTER_LOOP // break out of the outer loop } } } } n.logger.Printf("updated jetstream output: %s", n.String()) return nil } func (n *jetstreamOutput) UpdateProcessor(name string, pcfg map[string]any) error { cfg := n.cfg.Load() dc := n.dynCfg.Load() newEvps, changed, err := outputs.UpdateProcessorInSlice( n.logger, n.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps n.dynCfg.Store(&newDC) n.logger.Printf("updated event processor %s", name) } return nil } func (n *jetstreamOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) { dc := n.dynCfg.Load() cfg := n.cfg.Load() if rsp == nil || dc == nil || dc.mo == nil { return } wctx, cancel := context.WithTimeout(ctx, cfg.WriteTimeout) defer cancel() ch := n.msgChan.Load() select { case <-ctx.Done(): return case *ch <- outputs.NewProtoMsg(rsp, meta): case <-n.closeSig: return case <-wctx.Done(): if cfg.Debug { n.logger.Printf("writing expired after %s, JetStream output might not be initialized", cfg.WriteTimeout) } if cfg.EnableMetrics { jetStreamNumberOfFailSendMsgs.WithLabelValues(cfg.Name, "timeout").Inc() } return } } func (n *jetstreamOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {} func (n *jetstreamOutput) Close() error { n.cancelFn() n.wg.Wait() n.closeOnce.Do(func() { close(n.closeSig) }) n.logger.Printf("closed jetstream output: %s", n.String()) return nil } func (c *config) String() string { b, err := json.Marshal(c) if err != nil { return "" } return string(b) } func (n *jetstreamOutput) String() string { cfg := n.cfg.Load() b, err := json.Marshal(cfg) if err != nil { return "" } return string(b) } func (n *jetstreamOutput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := gutils.GetConfigMaps(n.store) if err != nil { return nil, err } evps, err := formatters.MakeEventProcessors( logger, eventProcessors, ps, tcs, acts, ) if err != nil { return nil, err } return evps, nil } func (n *jetstreamOutput) setLogger(logger *log.Logger) { if logger != nil && n.logger != nil { n.logger.SetOutput(logger.Writer()) n.logger.SetFlags(logger.Flags()) } } func (n *jetstreamOutput) worker(ctx context.Context, i int) { defer n.wg.Done() var natsConn *nats.Conn var err error var subject string workerLogPrefix := fmt.Sprintf("worker-%d", i) n.logger.Printf("%s starting", workerLogPrefix) // snapshot msgChan msgChan := *n.msgChan.Load() CRCONN: if ctx.Err() != nil { return } cfg := n.cfg.Load() name := fmt.Sprintf("%s-%d", cfg.Name, i) natsConn, err = n.createNATSConn(ctx, cfg, i) if err != nil { n.logger.Printf("%s failed to create connection: %v", workerLogPrefix, err) time.Sleep(cfg.ConnectTimeWait) goto CRCONN } js, err := natsConn.JetStream() if err != nil { if cfg.Debug { n.logger.Printf("%s failed to create jetstream context: %v", workerLogPrefix, err) } if cfg.EnableMetrics { jetStreamNumberOfFailSendMsgs.WithLabelValues(name, "jetstream_context_error").Inc() } natsConn.Close() time.Sleep(cfg.ConnectTimeWait) goto CRCONN } n.logger.Printf("%s initialized nats jetstream producer: %s", workerLogPrefix, cfg) // worker-0 create stream if configured if i == 0 { err = n.createStream(js, cfg) if err != nil { if cfg.Debug { n.logger.Printf("%s failed to create stream: %v", workerLogPrefix, err) } if cfg.EnableMetrics { jetStreamNumberOfFailSendMsgs.WithLabelValues(name, "create_stream_error").Inc() } natsConn.Close() time.Sleep(cfg.ConnectTimeWait) goto CRCONN } } for { select { case <-ctx.Done(): natsConn.Close() n.logger.Printf("%s shutting down", workerLogPrefix) return case m := <-msgChan: pmsg := m.GetMsg() // get fresh config cfg := n.cfg.Load() // snapshot template and marshal options dc := n.dynCfg.Load() name := fmt.Sprintf("%s-%d", cfg.Name, i) pmsg, err = outputs.AddSubscriptionTarget(pmsg, m.GetMeta(), cfg.AddTarget, dc.targetTpl) if err != nil { n.logger.Printf("failed to add target to the response: %v", err) } var rs []proto.Message switch cfg.SubjectFormat { case subjectFormat_Static, subjectFormat_TargetSub, subjectFormat_SubTarget: rs = []proto.Message{pmsg} case subjectFormat_SubTargetPath, subjectFormat_SubTargetPathWithKeys: switch rsp := pmsg.(type) { case *gnmi.SubscribeResponse: switch rsp := rsp.Response.(type) { case *gnmi.SubscribeResponse_Update: rs = splitSubscribeResponse(rsp) } } } for _, r := range rs { bb, err := outputs.Marshal(r, m.GetMeta(), dc.mo, cfg.SplitEvents, dc.evps...) if err != nil { if cfg.Debug { n.logger.Printf("%s failed marshaling proto msg: %v", workerLogPrefix, err) } if cfg.EnableMetrics { jetStreamNumberOfFailSendMsgs.WithLabelValues(name, "marshal_error").Inc() } continue } if len(bb) == 0 { continue } for _, b := range bb { if dc.msgTpl != nil { b, err = outputs.ExecTemplate(b, dc.msgTpl) if err != nil { if cfg.Debug { log.Printf("failed to execute template: %v", err) } jetStreamNumberOfFailSendMsgs.WithLabelValues(name, "template_error").Inc() continue } } subject, err = n.subjectName(r, m.GetMeta(), dc, cfg) if err != nil { if cfg.Debug { n.logger.Printf("%s failed to get subject name: %v", workerLogPrefix, err) } if cfg.EnableMetrics { jetStreamNumberOfFailSendMsgs.WithLabelValues(name, "subject_name_error").Inc() } continue } var start time.Time if cfg.EnableMetrics { start = time.Now() } _, err = js.Publish(subject, b, nats.Context(ctx)) if err != nil { if cfg.Debug { n.logger.Printf("%s failed to write to subject '%s': %v", workerLogPrefix, subject, err) } if cfg.EnableMetrics { jetStreamNumberOfFailSendMsgs.WithLabelValues(name, "publish_error").Inc() } natsConn.Close() time.Sleep(cfg.ConnectTimeWait) goto CRCONN } if cfg.EnableMetrics { jetStreamSendDuration.WithLabelValues(name).Set(float64(time.Since(start).Nanoseconds())) jetStreamNumberOfSentMsgs.WithLabelValues(name, subject).Inc() jetStreamNumberOfSentBytes.WithLabelValues(name, subject).Add(float64(len(b))) } } } } } } type customDialer struct { ctx context.Context logger *log.Logger } func (n *jetstreamOutput) newCustomDialer(ctx context.Context) *customDialer { return &customDialer{ctx: ctx, logger: n.logger} } func (d *customDialer) Dial(network, address string) (net.Conn, error) { ctx, cancel := context.WithCancel(d.ctx) defer cancel() d.logger.Printf("attempting to connect to %s", address) select { case <-d.ctx.Done(): return nil, d.ctx.Err() default: nd := &net.Dialer{} conn, err := nd.DialContext(ctx, network, address) if err != nil { return nil, err } d.logger.Printf("successfully connected to NATS server %s", address) return conn, nil } } func (n *jetstreamOutput) createNATSConn(ctx context.Context, c *config, idx int) (*nats.Conn, error) { opts := []nats.Option{ nats.Name(fmt.Sprintf("%s-%d", c.Name, idx)), nats.SetCustomDialer(n.newCustomDialer(ctx)), nats.ReconnectWait(c.ConnectTimeWait), // nats.ReconnectBufSize(natsReconnectBufferSize), nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { n.logger.Printf("NATS error: %v", err) }), nats.DisconnectErrHandler(func(_ *nats.Conn, err error) { n.logger.Printf("Disconnected from NATS err=%v", err) }), nats.ClosedHandler(func(*nats.Conn) { n.logger.Println("NATS connection is closed") }), } if c.TLS != nil { tlsConfig, err := utils.NewTLSConfig( c.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile, "", c.TLS.SkipVerify, false) if err != nil { return nil, err } if tlsConfig != nil { opts = append(opts, nats.Secure(tlsConfig)) } } if c.Username != "" && c.Password != "" { opts = append(opts, nats.UserInfo(c.Username, c.Password)) } nc, err := nats.Connect(c.Address, opts...) if err != nil { return nil, err } return nc, nil } func (n *jetstreamOutput) subjectName(m proto.Message, meta outputs.Meta, dc *dynConfig, cfg *config) (string, error) { sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() sb.WriteString(cfg.Stream) sb.WriteString(".") switch cfg.SubjectFormat { case subjectFormat_Static: sb.WriteString(cfg.Subject) case subjectFormat_TargetSub: err := dc.targetTpl.Execute(sb, meta) if err != nil { return "", err } if sub, ok := meta["subscription-name"]; ok { sb.WriteString(".") sb.WriteString(sub) } case subjectFormat_SubTarget: if sub, ok := meta["subscription-name"]; ok { sb.WriteString(sub) sb.WriteString(".") } err := dc.targetTpl.Execute(sb, meta) if err != nil { return "", err } case subjectFormat_SubTargetPath: if sub, ok := meta["subscription-name"]; ok { sb.WriteString(sub) sb.WriteString(".") } err := dc.targetTpl.Execute(sb, meta) if err != nil { return "", err } sb.WriteString(".") switch rsp := m.(type) { case *gnmi.SubscribeResponse: switch rsp := rsp.Response.(type) { case *gnmi.SubscribeResponse_Update: var prefixSubject string if rsp.Update.GetPrefix() != nil { prefixSubject = gNMIPathToSubject(rsp.Update.GetPrefix(), false) } var pathSubject string if len(rsp.Update.GetUpdate()) > 0 { pathSubject = gNMIPathToSubject(rsp.Update.GetUpdate()[0].GetPath(), false) } if prefixSubject != "" { sb.WriteString(prefixSubject) sb.WriteString(".") } if pathSubject != "" { sb.WriteString(pathSubject) } } } case subjectFormat_SubTargetPathWithKeys: if sub, ok := meta["subscription-name"]; ok { sb.WriteString(sub) sb.WriteString(".") } err := dc.targetTpl.Execute(sb, meta) if err != nil { return "", err } sb.WriteString(".") switch rsp := m.(type) { case *gnmi.SubscribeResponse: switch rsp := rsp.Response.(type) { case *gnmi.SubscribeResponse_Update: var prefixSubject string if rsp.Update.GetPrefix() != nil { prefixSubject = gNMIPathToSubject(rsp.Update.GetPrefix(), true) } var pathSubject string if len(rsp.Update.GetUpdate()) > 0 { pathSubject = gNMIPathToSubject(rsp.Update.GetUpdate()[0].GetPath(), true) } if prefixSubject != "" { sb.WriteString(prefixSubject) sb.WriteString(".") } if pathSubject != "" { sb.WriteString(pathSubject) } } } } return sb.String(), nil } func splitSubscribeResponse(m *gnmi.SubscribeResponse_Update) []proto.Message { if m == nil || m.Update == nil { return nil } rs := make([]proto.Message, 0, len(m.Update.GetUpdate())+len(m.Update.Delete)) for _, upd := range m.Update.GetUpdate() { rs = append(rs, &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: m.Update.GetTimestamp(), Prefix: m.Update.GetPrefix(), Update: []*gnmi.Update{upd}, }, }, }) } for _, del := range m.Update.GetDelete() { rs = append(rs, &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: &gnmi.Notification{ Timestamp: m.Update.GetTimestamp(), Prefix: m.Update.GetPrefix(), Delete: []*gnmi.Path{del}, }, }, }) } return rs } func gNMIPathToSubject(p *gnmi.Path, keys bool) string { if p == nil { return "" } sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() if p.GetOrigin() != "" { fmt.Fprintf(sb, "%s.", p.GetOrigin()) } for i, e := range p.GetElem() { if i > 0 { sb.WriteString(".") } sb.WriteString(e.Name) if keys { if len(e.Key) > 0 { // sort keys by name kNames := make([]string, 0, len(e.Key)) for k := range e.Key { kNames = append(kNames, k) } sort.Strings(kNames) for _, k := range kNames { sk := sanitizeKey(e.GetKey()[k]) fmt.Fprintf(sb, ".{%s=%s}", k, sk) } } } } return sb.String() } var stringBuilderPool = sync.Pool{ New: func() any { return new(strings.Builder) }, } func sanitizeKey(k string) string { // Fast path: no special chars if !strings.ContainsAny(k, ". ") { return k } sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() sb.Grow(len(k)) for _, r := range k { switch r { case '.': sb.WriteRune('^') case ' ': sb.WriteRune('~') default: sb.WriteRune(r) } } return sb.String() } func storageType(s string) nats.StorageType { switch strings.ToLower(s) { case "file": return nats.FileStorage case "memory": return nats.MemoryStorage } return nats.MemoryStorage } func isValidRetentionPolicy(policy string) bool { switch strings.ToLower(policy) { case "limits", "workqueue": return true } return false } func retentionPolicy(s string) nats.RetentionPolicy { switch strings.ToLower(s) { case "workqueue": return nats.WorkQueuePolicy case "limits": return nats.LimitsPolicy } return nats.LimitsPolicy } func (n *jetstreamOutput) createStream(js nats.JetStreamContext, cfg *config) error { // If CreateStream is not configured, we're using an existing stream if cfg.CreateStream == nil { return nil } stream, err := js.StreamInfo(cfg.Stream) if err != nil { if !errors.Is(err, nats.ErrStreamNotFound) { return err } } // Stream exists, nothing to do if stream != nil { return nil } // Create stream with configured retention policy streamConfig := &nats.StreamConfig{ Name: cfg.Stream, Description: cfg.CreateStream.Description, Retention: retentionPolicy(cfg.CreateStream.Retention), Subjects: cfg.CreateStream.Subjects, Storage: storageType(cfg.CreateStream.Storage), MaxMsgs: cfg.CreateStream.MaxMsgs, MaxBytes: cfg.CreateStream.MaxBytes, MaxAge: cfg.CreateStream.MaxAge, MaxMsgSize: cfg.CreateStream.MaxMsgSize, } _, err = js.AddStream(streamConfig) return err } func channelNeedsSwap(old, nw *config) bool { if old == nil || nw == nil { return true } return old.BufferSize != nw.BufferSize } func needsWorkerRestart(old, nw *config) bool { if old == nil || nw == nil { return true } return old.NumWorkers != nw.NumWorkers || !old.TLS.Equal(nw.TLS) || old.Address != nw.Address || old.Username != nw.Username || old.Password != nw.Password } func streamChanged(old, nw *config) bool { if old == nil || nw == nil { return true } // stream name changed? if old.Stream != nw.Stream { return true } // create stream presence changed? if (old.CreateStream == nil) != (nw.CreateStream == nil) { return true } // both nil: nothing else to compare if old.CreateStream == nil && nw.CreateStream == nil { return false } // compare contents oc, nc := old.CreateStream, nw.CreateStream if oc.Description != nc.Description { return true } if !slices.Equal(oc.Subjects, nc.Subjects) { return true } if storageType(oc.Storage) != storageType(nc.Storage) { return true } if oc.MaxMsgs != nc.MaxMsgs { return true } if oc.MaxBytes != nc.MaxBytes { return true } if oc.MaxAge != nc.MaxAge { return true } if oc.MaxMsgSize != nc.MaxMsgSize { return true } return false } ================================================ FILE: pkg/outputs/nats_outputs/jetstream/jetstream_output_metrics.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package jetstream_output import ( "sync" "github.com/prometheus/client_golang/prometheus" ) var registerMetricsOnce sync.Once var jetStreamNumberOfSentMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "jetstream_output", Name: "number_of_jetstream_msgs_sent_success_total", Help: "Number of msgs successfully sent by gnmic jetstream output", }, []string{"publisher_id", "subject"}) var jetStreamNumberOfSentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "jetstream_output", Name: "number_of_written_jetstream_bytes_total", Help: "Number of bytes written by gnmic jetstream output", }, []string{"publisher_id", "subject"}) var jetStreamNumberOfFailSendMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "jetstream_output", Name: "number_of_jetstream_msgs_sent_fail_total", Help: "Number of failed msgs sent by gnmic jetstream output", }, []string{"publisher_id", "reason"}) var jetStreamSendDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "jetstream_output", Name: "msg_send_duration_ns", Help: "gnmic jetstream output send duration in ns", }, []string{"publisher_id"}) func (n *jetstreamOutput) initMetrics() { currCfg := n.cfg.Load() if currCfg == nil { return } jetStreamNumberOfSentMsgs.WithLabelValues(currCfg.Name, "").Add(0) jetStreamNumberOfSentBytes.WithLabelValues(currCfg.Name, "").Add(0) jetStreamNumberOfFailSendMsgs.WithLabelValues(currCfg.Name, "").Add(0) jetStreamSendDuration.WithLabelValues(currCfg.Name).Set(0) } func (n *jetstreamOutput) registerMetrics() error { currCfg := n.cfg.Load() if currCfg == nil { return nil } if !currCfg.EnableMetrics { return nil } if n.reg == nil { n.logger.Printf("ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`") return nil } var err error registerMetricsOnce.Do(func() { if err = n.reg.Register(jetStreamNumberOfSentMsgs); err != nil { return } if err = n.reg.Register(jetStreamNumberOfSentBytes); err != nil { return } if err = n.reg.Register(jetStreamNumberOfFailSendMsgs); err != nil { return } if err = n.reg.Register(jetStreamSendDuration); err != nil { return } }) n.initMetrics() return err } ================================================ FILE: pkg/outputs/nats_outputs/jetstream/jetstream_output_test.go ================================================ package jetstream_output import ( "strings" "testing" "sync/atomic" "github.com/nats-io/nats.go" ) func Test_isValidRetentionPolicy(t *testing.T) { tests := []struct { name string policy string want bool }{ { name: "valid limits policy", policy: "limits", want: true, }, { name: "valid workqueue policy", policy: "workqueue", want: true, }, { name: "valid limits policy uppercase", policy: "LIMITS", want: true, }, { name: "valid workqueue policy uppercase", policy: "WORKQUEUE", want: true, }, { name: "valid limits policy mixed case", policy: "Limits", want: true, }, { name: "invalid empty policy", policy: "", want: false, }, { name: "invalid interest policy", policy: "interest", want: false, }, { name: "invalid random string", policy: "invalid", want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := isValidRetentionPolicy(tt.policy); got != tt.want { t.Errorf("isValidRetentionPolicy() = %v, want %v", got, tt.want) } }) } } func Test_retentionPolicy(t *testing.T) { tests := []struct { name string policy string want nats.RetentionPolicy }{ { name: "workqueue policy lowercase", policy: "workqueue", want: nats.WorkQueuePolicy, }, { name: "workqueue policy uppercase", policy: "WORKQUEUE", want: nats.WorkQueuePolicy, }, { name: "workqueue policy mixed case", policy: "WorkQueue", want: nats.WorkQueuePolicy, }, { name: "limits policy lowercase", policy: "limits", want: nats.LimitsPolicy, }, { name: "limits policy uppercase", policy: "LIMITS", want: nats.LimitsPolicy, }, { name: "limits policy mixed case", policy: "Limits", want: nats.LimitsPolicy, }, { name: "empty string defaults to limits", policy: "", want: nats.LimitsPolicy, }, { name: "invalid policy defaults to limits", policy: "invalid", want: nats.LimitsPolicy, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := retentionPolicy(tt.policy); got != tt.want { t.Errorf("retentionPolicy() = %v, want %v", got, tt.want) } }) } } func Test_setDefaults(t *testing.T) { tests := []struct { name string cfg *config wantErr bool errMsg string }{ { name: "missing stream name", cfg: &config{ Stream: "", }, wantErr: true, errMsg: "missing stream name", }, { name: "valid create-stream with limits retention", cfg: &config{ Stream: "test-stream", CreateStream: &createStreamConfig{ Subjects: []string{"test.>"}, Retention: "limits", }, }, wantErr: false, }, { name: "valid create-stream with workqueue retention", cfg: &config{ Stream: "test-stream", CreateStream: &createStreamConfig{ Subjects: []string{"test.>"}, Retention: "workqueue", }, }, wantErr: false, }, { name: "invalid retention policy", cfg: &config{ Stream: "test-stream", CreateStream: &createStreamConfig{ Subjects: []string{"test.>"}, Retention: "interest", }, }, wantErr: true, errMsg: "invalid retention-policy: interest", }, { name: "create-stream with empty retention defaults to limits", cfg: &config{ Stream: "test-stream", CreateStream: &createStreamConfig{ Subjects: []string{"test.>"}, }, }, wantErr: false, }, { name: "create-stream with uppercase WORKQUEUE retention", cfg: &config{ Stream: "test-stream", CreateStream: &createStreamConfig{ Subjects: []string{"test.>"}, Retention: "WORKQUEUE", }, }, wantErr: false, }, { name: "create-stream with uppercase LIMITS retention", cfg: &config{ Stream: "test-stream", CreateStream: &createStreamConfig{ Subjects: []string{"test.>"}, Retention: "LIMITS", }, }, wantErr: false, }, { name: "create-stream with invalid retention", cfg: &config{ Stream: "test-stream", CreateStream: &createStreamConfig{ Subjects: []string{"test.>"}, Retention: "invalid", }, }, wantErr: true, errMsg: "invalid retention-policy: invalid", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := new(atomic.Pointer[config]) cfg.Store(tt.cfg) n := &jetstreamOutput{ cfg: cfg, } err := n.setDefaultsFor(tt.cfg) if tt.wantErr { if err == nil { t.Errorf("setDefaults() expected error but got nil") return } if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) { t.Errorf("setDefaults() error = %v, want error containing %v", err.Error(), tt.errMsg) } } else { if err != nil { t.Errorf("setDefaults() unexpected error = %v", err) return } // Verify defaults were set correctly rcfg := cfg.Load() if rcfg.CreateStream != nil { if rcfg.CreateStream.Retention == "" { t.Errorf("setDefaults() did not set default retention policy") } if rcfg.CreateStream.Retention != "" && rcfg.CreateStream.Retention != "limits" && rcfg.CreateStream.Retention != "workqueue" && rcfg.CreateStream.Retention != "LIMITS" && rcfg.CreateStream.Retention != "WORKQUEUE" { t.Errorf("setDefaults() set invalid retention policy: %s", rcfg.CreateStream.Retention) } } } }) } } ================================================ FILE: pkg/outputs/nats_outputs/nats/nats_metrics.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package nats_output import ( "sync" "github.com/prometheus/client_golang/prometheus" ) var registerMetricsOnce sync.Once var NatsNumberOfSentMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "nats_output", Name: "number_of_nats_msgs_sent_success_total", Help: "Number of msgs successfully sent by gnmic nats output", }, []string{"publisher_id", "subject"}) var NatsNumberOfSentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "nats_output", Name: "number_of_written_nats_bytes_total", Help: "Number of bytes written by gnmic nats output", }, []string{"publisher_id", "subject"}) var NatsNumberOfFailSendMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "nats_output", Name: "number_of_nats_msgs_sent_fail_total", Help: "Number of failed msgs sent by gnmic nats output", }, []string{"publisher_id", "reason"}) var NatsSendDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "nats_output", Name: "msg_send_duration_ns", Help: "gnmic nats output send duration in ns", }, []string{"publisher_id"}) func (n *NatsOutput) initMetrics() { currCfg := n.cfg.Load() if currCfg == nil { return } NatsNumberOfSentMsgs.WithLabelValues(currCfg.Name, "").Add(0) NatsNumberOfSentBytes.WithLabelValues(currCfg.Name, "").Add(0) NatsNumberOfFailSendMsgs.WithLabelValues(currCfg.Name, "").Add(0) NatsSendDuration.WithLabelValues(currCfg.Name).Set(0) } func (n *NatsOutput) registerMetrics() error { if n.reg == nil { return nil } var err error registerMetricsOnce.Do(func() { if err = n.reg.Register(NatsNumberOfSentMsgs); err != nil { return } if err = n.reg.Register(NatsNumberOfSentBytes); err != nil { return } if err = n.reg.Register(NatsNumberOfFailSendMsgs); err != nil { return } if err = n.reg.Register(NatsSendDuration); err != nil { return } }) n.initMetrics() return err } ================================================ FILE: pkg/outputs/nats_outputs/nats/nats_output.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package nats_output import ( "context" "encoding/json" "fmt" "io" "log" "net" "slices" "strings" "sync" "sync/atomic" "text/template" "time" "github.com/google/uuid" "github.com/nats-io/nats.go" "github.com/prometheus/client_golang/prometheus" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/outputs" gutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( natsConnectWait = 2 * time.Second natsReconnectBufferSize = 100 * 1024 * 1024 defaultSubjectName = "telemetry" defaultFormat = "event" defaultNumWorkers = 1 defaultWriteTimeout = 5 * time.Second defaultAddress = "localhost:4222" loggingPrefix = "[nats_output:%s] " ) func init() { outputs.Register("nats", func() outputs.Output { return &NatsOutput{} }) } func (n *NatsOutput) init() { n.cfg = new(atomic.Pointer[Config]) n.dynCfg = new(atomic.Pointer[dynConfig]) n.msgChan = new(atomic.Pointer[chan *outputs.ProtoMsg]) n.wg = new(sync.WaitGroup) n.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags) } // NatsOutput // type NatsOutput struct { outputs.BaseOutput // Cfg *Config cfg *atomic.Pointer[Config] dynCfg *atomic.Pointer[dynConfig] // root context ctx context.Context cancelFn context.CancelFunc msgChan *atomic.Pointer[chan *outputs.ProtoMsg] // atomic channel swaps wg *sync.WaitGroup logger *log.Logger reg *prometheus.Registry store store.Store[any] } type dynConfig struct { targetTpl *template.Template msgTpl *template.Template evps []formatters.EventProcessor mo *formatters.MarshalOptions } // Config // type Config struct { Name string `mapstructure:"name,omitempty"` Address string `mapstructure:"address,omitempty"` SubjectPrefix string `mapstructure:"subject-prefix,omitempty"` Subject string `mapstructure:"subject,omitempty"` Username string `mapstructure:"username,omitempty"` Password string `mapstructure:"password,omitempty"` ConnectTimeWait time.Duration `mapstructure:"connect-time-wait,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty" json:"tls,omitempty"` Format string `mapstructure:"format,omitempty"` SplitEvents bool `mapstructure:"split-events,omitempty"` AddTarget string `mapstructure:"add-target,omitempty"` TargetTemplate string `mapstructure:"target-template,omitempty"` MsgTemplate string `mapstructure:"msg-template,omitempty"` OverrideTimestamps bool `mapstructure:"override-timestamps,omitempty"` NumWorkers int `mapstructure:"num-workers,omitempty"` WriteTimeout time.Duration `mapstructure:"write-timeout,omitempty"` Debug bool `mapstructure:"debug,omitempty"` BufferSize uint `mapstructure:"buffer-size,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty"` } func (n *NatsOutput) String() string { cfg := n.cfg.Load() b, err := json.Marshal(cfg) if err != nil { return "" } return string(b) } func (n *NatsOutput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := gutils.GetConfigMaps(n.store) if err != nil { return nil, err } evps, err := formatters.MakeEventProcessors( logger, eventProcessors, ps, tcs, acts, ) if err != nil { return nil, err } return evps, nil } func (n *NatsOutput) setLogger(logger *log.Logger) { if logger != nil && n.logger != nil { n.logger.SetOutput(logger.Writer()) n.logger.SetFlags(logger.Flags()) } } // Init // func (n *NatsOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error { n.init() // init struct fields newCfg := new(Config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } if newCfg.Name == "" { newCfg.Name = name } n.logger.SetPrefix(fmt.Sprintf(loggingPrefix, newCfg.Name)) options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } n.store = options.Store // set defaults n.setDefaultsFor(newCfg) n.cfg.Store(newCfg) // apply logger n.setLogger(options.Logger) // initialize registry n.reg = options.Registry err = n.registerMetrics() if err != nil { return err } // initialize message channel msgChan := make(chan *outputs.ProtoMsg, newCfg.BufferSize) n.msgChan.Store(&msgChan) // prep dynamic config dc := new(dynConfig) dc.mo = &formatters.MarshalOptions{ Format: newCfg.Format, OverrideTS: newCfg.OverrideTimestamps, } // initialize event processors dc.evps, err = n.buildEventProcessors(options.Logger, newCfg.EventProcessors) if err != nil { return err } // initialize target template if newCfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } // initialize message template if newCfg.MsgTemplate != "" { dc.msgTpl, err = gtemplate.CreateTemplate("msg-template", newCfg.MsgTemplate) if err != nil { return err } dc.msgTpl = dc.msgTpl.Funcs(outputs.TemplateFuncs) } n.dynCfg = new(atomic.Pointer[dynConfig]) n.dynCfg.Store(dc) // initialize context n.ctx, n.cancelFn = context.WithCancel(ctx) n.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { go n.worker(n.ctx, i) } return nil } func (n *NatsOutput) setDefaultsFor(cfg *Config) { if cfg.Format == "" { cfg.Format = defaultFormat } if cfg.Address == "" { cfg.Address = defaultAddress } if cfg.ConnectTimeWait <= 0 { cfg.ConnectTimeWait = natsConnectWait } if cfg.Subject == "" && cfg.SubjectPrefix == "" { cfg.Subject = defaultSubjectName } if cfg.Name == "" { cfg.Name = "gnmic-" + uuid.New().String() } if cfg.NumWorkers <= 0 { cfg.NumWorkers = defaultNumWorkers } if cfg.WriteTimeout <= 0 { cfg.WriteTimeout = defaultWriteTimeout } } func (n *NatsOutput) Validate(cfg map[string]any) error { ncfg := new(Config) err := outputs.DecodeConfig(cfg, ncfg) if err != nil { return err } _, err = gtemplate.CreateTemplate("target-template", ncfg.TargetTemplate) if err != nil { return err } _, err = gtemplate.CreateTemplate("msg-template", ncfg.MsgTemplate) if err != nil { return err } return nil } func (n *NatsOutput) Update(ctx context.Context, cfg map[string]any) error { newCfg := new(Config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } n.setDefaultsFor(newCfg) currCfg := n.cfg.Load() swapChannel := channelNeedsSwap(currCfg, newCfg) restartWorkers := needsWorkerRestart(currCfg, newCfg) rebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 var targetTpl *template.Template if newCfg.TargetTemplate == "" { targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { t, err := gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } targetTpl = t.Funcs(outputs.TemplateFuncs) } else { targetTpl = outputs.DefaultTargetTemplate } var msgTpl *template.Template if newCfg.MsgTemplate != "" { t, err := gtemplate.CreateTemplate("msg-template", newCfg.MsgTemplate) if err != nil { return err } msgTpl = t.Funcs(outputs.TemplateFuncs) } dc := &dynConfig{ targetTpl: targetTpl, msgTpl: msgTpl, mo: &formatters.MarshalOptions{ Format: newCfg.Format, OverrideTS: newCfg.OverrideTimestamps, }, } prevDC := n.dynCfg.Load() if rebuildProcessors { dc.evps, err = n.buildEventProcessors(n.logger, newCfg.EventProcessors) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } n.dynCfg.Store(dc) n.cfg.Store(newCfg) if swapChannel || restartWorkers { var newMsgChan chan *outputs.ProtoMsg if swapChannel { newMsgChan = make(chan *outputs.ProtoMsg, newCfg.BufferSize) } else { newMsgChan = *n.msgChan.Load() } runCtx, cancel := context.WithCancel(n.ctx) newWG := new(sync.WaitGroup) // save old pointers oldCancel := n.cancelFn oldWG := n.wg oldMsgChan := *n.msgChan.Load() // swap n.cancelFn = cancel n.wg = newWG n.msgChan.Store(&newMsgChan) n.wg.Add(currCfg.NumWorkers) for i := 0; i < currCfg.NumWorkers; i++ { go n.worker(runCtx, i) } // cancel old workers and loops if oldCancel != nil { oldCancel() } if oldWG != nil { oldWG.Wait() } if swapChannel { // best effort drain old channel OUTER_LOOP: for { select { case msg, ok := <-oldMsgChan: if !ok { break } select { case newMsgChan <- msg: default: } default: break OUTER_LOOP } } } } n.logger.Printf("updated nats output: %s", n.String()) return nil } func (n *NatsOutput) UpdateProcessor(name string, pcfg map[string]any) error { cfg := n.cfg.Load() dc := n.dynCfg.Load() newEvps, changed, err := outputs.UpdateProcessorInSlice( n.logger, n.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps n.dynCfg.Store(&newDC) n.logger.Printf("updated event processor %s", name) } return nil } // Write // func (n *NatsOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) { dc := n.dynCfg.Load() cfg := n.cfg.Load() if rsp == nil || dc == nil || dc.mo == nil { return } wctx, cancel := context.WithTimeout(ctx, cfg.WriteTimeout) defer cancel() ch := n.msgChan.Load() select { case <-ctx.Done(): return case *ch <- outputs.NewProtoMsg(rsp, meta): case <-wctx.Done(): if cfg.Debug { n.logger.Printf("writing expired after %s, NATS output might not be initialized", cfg.WriteTimeout) } if cfg.EnableMetrics { NatsNumberOfFailSendMsgs.WithLabelValues(cfg.Name, "timeout").Inc() } return } } func (n *NatsOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {} // Close // func (n *NatsOutput) Close() error { n.cancelFn() n.wg.Wait() n.logger.Printf("closed nats output: %s", n.String()) return nil } func (n *NatsOutput) createNATSConn(c *Config, i int) (*nats.Conn, error) { opts := []nats.Option{ nats.Name(fmt.Sprintf("%s-%d", c.Name, i)), nats.SetCustomDialer(n), nats.ReconnectWait(c.ConnectTimeWait), nats.ReconnectBufSize(natsReconnectBufferSize), nats.ErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { n.logger.Printf("NATS error: %v", err) }), nats.DisconnectErrHandler(func(_ *nats.Conn, err error) { n.logger.Printf("Disconnected from NATS err=%v", err) }), nats.ClosedHandler(func(*nats.Conn) { n.logger.Println("NATS connection is closed") }), } if c.Username != "" && c.Password != "" { opts = append(opts, nats.UserInfo(c.Username, c.Password)) } if c.TLS != nil { tlsConfig, err := utils.NewTLSConfig( c.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile, "", c.TLS.SkipVerify, false) if err != nil { return nil, err } if tlsConfig != nil { opts = append(opts, nats.Secure(tlsConfig)) } } nc, err := nats.Connect(c.Address, opts...) if err != nil { return nil, err } return nc, nil } // Dial // func (n *NatsOutput) Dial(network, address string) (net.Conn, error) { ctx, cancel := context.WithCancel(n.ctx) defer cancel() for { cfg := n.cfg.Load() n.logger.Printf("attempting to connect to %s", address) if ctx.Err() != nil { return nil, ctx.Err() } select { case <-n.ctx.Done(): return nil, n.ctx.Err() default: d := &net.Dialer{} conn, err := d.DialContext(ctx, network, address) if err != nil { n.logger.Printf("failed to connect to NATS server %s: %v", address, err) time.Sleep(cfg.ConnectTimeWait) continue } n.logger.Printf("successfully connected to NATS server %s", address) return conn, nil } } } func (n *NatsOutput) worker(ctx context.Context, i int) { defer n.wg.Done() var natsConn *nats.Conn var err error workerLogPrefix := fmt.Sprintf("worker-%d", i) defer n.logger.Printf("%s exited", workerLogPrefix) n.logger.Printf("%s starting", workerLogPrefix) msgChan := *n.msgChan.Load() CRCONN: if ctx.Err() != nil { return } cfg := n.cfg.Load() natsConn, err = n.createNATSConn(cfg, i) if err != nil { n.logger.Printf("%s failed to create connection: %v", workerLogPrefix, err) time.Sleep(cfg.ConnectTimeWait) goto CRCONN } for { select { case <-ctx.Done(): n.logger.Printf("%s flushing", workerLogPrefix) natsConn.FlushTimeout(time.Second) n.logger.Printf("%s shutting down", workerLogPrefix) natsConn.Close() return case m := <-msgChan: pmsg := m.GetMsg() // get fresh config cfg := n.cfg.Load() // snapshot template and marshal options dc := n.dynCfg.Load() name := fmt.Sprintf("%s-%d", cfg.Name, i) pmsg, err = outputs.AddSubscriptionTarget(pmsg, m.GetMeta(), cfg.AddTarget, dc.targetTpl) if err != nil { n.logger.Printf("failed to add target to the response: %v", err) } bb, err := outputs.Marshal(pmsg, m.GetMeta(), dc.mo, cfg.SplitEvents, dc.evps...) if err != nil { if cfg.Debug { n.logger.Printf("%s failed marshaling proto msg: %v", workerLogPrefix, err) } if cfg.EnableMetrics { NatsNumberOfFailSendMsgs.WithLabelValues(name, "marshal_error").Inc() } continue } if len(bb) == 0 { continue } for _, b := range bb { if dc.msgTpl != nil { b, err = outputs.ExecTemplate(b, dc.msgTpl) if err != nil { if cfg.Debug { log.Printf("failed to execute template: %v", err) } NatsNumberOfFailSendMsgs.WithLabelValues(name, "template_error").Inc() continue } } subject := n.subjectName(m.GetMeta(), cfg) var start time.Time if cfg.EnableMetrics { start = time.Now() } err = natsConn.Publish(subject, b) if err != nil { if cfg.Debug { n.logger.Printf("%s failed to write to nats subject '%s': %v", workerLogPrefix, subject, err) } if cfg.EnableMetrics { NatsNumberOfFailSendMsgs.WithLabelValues(cfg.Name, "publish_error").Inc() } if cfg.Debug { n.logger.Printf("%s closing connection to NATS '%s'", workerLogPrefix, subject) } natsConn.Close() time.Sleep(cfg.ConnectTimeWait) if cfg.Debug { n.logger.Printf("%s reconnecting to NATS", workerLogPrefix) } goto CRCONN } if cfg.EnableMetrics { NatsSendDuration.WithLabelValues(name).Set(float64(time.Since(start).Nanoseconds())) NatsNumberOfSentMsgs.WithLabelValues(name, subject).Inc() NatsNumberOfSentBytes.WithLabelValues(name, subject).Add(float64(len(b))) } } } } } var stringBuilderPool = sync.Pool{ New: func() any { return new(strings.Builder) }, } func (n *NatsOutput) subjectName(meta outputs.Meta, cfg *Config) string { if cfg.SubjectPrefix != "" { ssb := stringBuilderPool.Get().(*strings.Builder) defer func() { ssb.Reset() stringBuilderPool.Put(ssb) }() ssb.WriteString(cfg.SubjectPrefix) if s, ok := meta["source"]; ok { ssb.WriteString(".") for _, r := range s { switch r { case '.': ssb.WriteRune('-') case ' ': ssb.WriteRune('_') default: ssb.WriteRune(r) } } } if subname, ok := meta["subscription-name"]; ok { ssb.WriteString(".") for _, r := range subname { if r == ' ' { ssb.WriteRune('_') } else { ssb.WriteRune(r) } } } return ssb.String() } return strings.ReplaceAll(cfg.Subject, " ", "_") } func channelNeedsSwap(old, nw *Config) bool { if old == nil || nw == nil { return true } return old.BufferSize != nw.BufferSize } func needsWorkerRestart(old, nw *Config) bool { if old == nil || nw == nil { return true } return old.NumWorkers != nw.NumWorkers || !old.TLS.Equal(nw.TLS) || old.Address != nw.Address || old.Username != nw.Username || old.Password != nw.Password } ================================================ FILE: pkg/outputs/options.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package outputs import ( "log" "github.com/prometheus/client_golang/prometheus" "github.com/zestor-dev/zestor/store" ) type OutputOptions struct { Name string ClusterName string Logger *log.Logger Registry *prometheus.Registry Store store.Store[any] } type Option func(*OutputOptions) error func WithLogger(logger *log.Logger) Option { return func(o *OutputOptions) error { o.Logger = logger return nil } } func WithRegistry(reg *prometheus.Registry) Option { return func(o *OutputOptions) error { o.Registry = reg return nil } } func WithName(name string) Option { return func(o *OutputOptions) error { o.Name = name return nil } } func WithClusterName(name string) Option { return func(o *OutputOptions) error { o.ClusterName = name return nil } } func WithConfigStore(st store.Store[any]) Option { return func(o *OutputOptions) error { o.Store = st return nil } } ================================================ FILE: pkg/outputs/otlp_output/otlp_converter.go ================================================ // © 2025-2026 NVIDIA Corporation // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of NVIDIA's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package otlp_output import ( "context" "fmt" "strconv" "strings" "sync" "google.golang.org/grpc/metadata" metricsv1 "go.opentelemetry.io/proto/otlp/collector/metrics/v1" commonpb "go.opentelemetry.io/proto/otlp/common/v1" metricspb "go.opentelemetry.io/proto/otlp/metrics/v1" resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/version" ) var stringsBuilderPool = sync.Pool{ New: func() any { return &strings.Builder{} }, } // convertToOTLP converts gNMI EventMsg slice to OTLP ExportMetricsServiceRequest func (o *otlpOutput) convertToOTLP(events []*formatters.EventMsg) *metricsv1.ExportMetricsServiceRequest { cfg := o.cfg.Load() if cfg.Debug { o.logger.Printf("DEBUG: convertToOTLP called with %d events", len(events)) } // Group events by resource (source) resourceGroups := o.groupByResource(events) if cfg.Debug { o.logger.Printf("DEBUG: Grouped into %d resource groups", len(resourceGroups)) } req := &metricsv1.ExportMetricsServiceRequest{ ResourceMetrics: make([]*metricspb.ResourceMetrics, 0, len(resourceGroups)), } totalMetrics := 0 skippedEvents := 0 for _, groupedEvents := range resourceGroups { rm := &metricspb.ResourceMetrics{ Resource: o.createResource(cfg, groupedEvents[0]), ScopeMetrics: []*metricspb.ScopeMetrics{ { Scope: &commonpb.InstrumentationScope{ Name: "gNMIc", Version: version.Version, }, Metrics: make([]*metricspb.Metric, 0, len(groupedEvents)), }, }, } // Convert each event to OTLP metric for _, event := range groupedEvents { metrics, err := o.convertEventToMetrics(cfg, event) if err != nil { if cfg.Debug { o.logger.Printf("DEBUG: failed to convert event %s: %v", event.Name, err) } skippedEvents++ continue } if len(metrics) == 0 { if cfg.Debug { o.logger.Printf("DEBUG: convertEvent returned nil for event: name=%s, values=%v", event.Name, event.Values) } skippedEvents++ continue } rm.ScopeMetrics[0].Metrics = append(rm.ScopeMetrics[0].Metrics, metrics...) totalMetrics += len(metrics) } if len(rm.ScopeMetrics[0].Metrics) > 0 { req.ResourceMetrics = append(req.ResourceMetrics, rm) } } if cfg.Debug { o.logger.Printf("DEBUG: Converted %d metrics, skipped %d events, %d ResourceMetrics", totalMetrics, skippedEvents, len(req.ResourceMetrics)) } return req } // groupByResource groups events by their source (device) for resource attribution func (o *otlpOutput) groupByResource(events []*formatters.EventMsg) map[string][]*formatters.EventMsg { groups := make(map[string][]*formatters.EventMsg) for _, event := range events { // Use source as resource key source := event.Tags["source"] if source == "" { source = "unknown" } groups[source] = append(groups[source], event) } return groups } // createResource creates OTLP Resource from event metadata. // Tags listed in cfg.ResourceTagKeys are placed as resource attributes. func (o *otlpOutput) createResource(cfg *config, event *formatters.EventMsg) *resourcepb.Resource { attrs := make([]*commonpb.KeyValue, 0, len(cfg.ResourceTagKeys)+len(cfg.ResourceAttributes)) for _, key := range cfg.ResourceTagKeys { if val, ok := event.Tags[key]; ok { attrs = append(attrs, &commonpb.KeyValue{ Key: key, Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: val}}, }) } } for key, val := range cfg.ResourceAttributes { attrs = append(attrs, &commonpb.KeyValue{ Key: key, Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: val}}, }) } return &resourcepb.Resource{ Attributes: attrs, } } // convertEventToMetrics converts a single gNMI event to OTLP metrics. // Returns nil if the event has no valid values to convert. func (o *otlpOutput) convertEventToMetrics(cfg *config, event *formatters.EventMsg) ([]*metricspb.Metric, error) { if len(event.Values) == 0 { if cfg.Debug { o.logger.Printf("DEBUG: event has no values (event: %s)", event.Name) } return nil, nil } attributes := o.extractAttributesForMetric(cfg, event) result := make([]*metricspb.Metric, 0, len(event.Values)) for k, v := range event.Values { metricName := o.buildMetricName(cfg, event, k) metric := &metricspb.Metric{ Name: metricName, } // Handle string values switch v := v.(type) { case string: if !cfg.StringsAsAttributes { if cfg.Debug { o.logger.Printf("DEBUG: skipping string value (strings-as-attributes=false): %s", event.Name) } continue } metric.Data = &metricspb.Metric_Gauge{ Gauge: o.createGaugeWithString(event, attributes, v), } result = append(result, metric) continue } dataPoint := o.createNumberDataPointWithValue(cfg, event, attributes, v) if dataPoint == nil { if cfg.Debug { o.logger.Printf("DEBUG: failed to create data point for value type %T (event: %s)", v, event.Name) } continue } if o.isCounter(cfg, k) { metric.Data = &metricspb.Metric_Sum{ Sum: &metricspb.Sum{ AggregationTemporality: metricspb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, IsMonotonic: true, DataPoints: []*metricspb.NumberDataPoint{dataPoint}, }, } } else { metric.Data = &metricspb.Metric_Gauge{ Gauge: &metricspb.Gauge{ DataPoints: []*metricspb.NumberDataPoint{dataPoint}, }, } } result = append(result, metric) } return result, nil } // buildMetricName creates metric name from event and value key // event.Name contains the subscription name (e.g., "nvos", "arista") // valueKey contains the metric path (e.g., "interfaces/interface/state/counters/in-octets") func (o *otlpOutput) buildMetricName(cfg *config, event *formatters.EventMsg, valueKey string) string { sb := stringsBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringsBuilderPool.Put(sb) }() // Add global prefix if configured if cfg.MetricPrefix != "" { sb.WriteString(cfg.MetricPrefix) sb.WriteString("_") } // Append subscription name if configured (for vendor-specific prefixes) if cfg.AppendSubscriptionName { sb.WriteString(event.Name) // subscription name (nvos, arista, etc.) sb.WriteString("_") } // Append the value key (metric path), converting slashes to underscores // e.g., "interfaces/interface/state/counters/in-octets" -> "interfaces_interface_state_counters_in_octets" // gNMI paths arrive with a leading "/"; strip it when configured so the conversion // does not produce a leading "_" (or a double "_" when a prefix is set). path := valueKey if cfg.StripLeadingUnderscore { path = strings.TrimPrefix(path, "/") } metricPath := strings.ReplaceAll(path, "/", "_") sb.WriteString(metricPath) name := sb.String() // Replace remaining hyphens with underscores (Prometheus convention) name = strings.ReplaceAll(name, "-", "_") return name } // extractAttributesForMetric extracts data point attributes from event tags. // Tags listed in cfg.ResourceTagKeys are excluded (they live on the Resource). func (o *otlpOutput) extractAttributesForMetric(cfg *config, event *formatters.EventMsg) []*commonpb.KeyValue { attrs := make([]*commonpb.KeyValue, 0, len(event.Tags)) for key, val := range event.Tags { if cfg.resourceTagSet[key] { continue } attrs = append(attrs, &commonpb.KeyValue{ Key: key, Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: val}}, }) } return attrs } // isCounter returns true if any of the configured counter-patterns match the value key. func (o *otlpOutput) isCounter(cfg *config, valueName string) bool { for _, re := range cfg.counterRegexes { if re.MatchString(valueName) { return true } } return false } // createNumberDataPointWithValue creates OTLP data point from event with a specific value func (o *otlpOutput) createNumberDataPointWithValue(cfg *config, event *formatters.EventMsg, attrs []*commonpb.KeyValue, value interface{}) *metricspb.NumberDataPoint { dp := &metricspb.NumberDataPoint{ Attributes: attrs, TimeUnixNano: uint64(event.Timestamp), } // Handle value conversion switch v := value.(type) { case int: dp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: int64(v)} case int32: dp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: int64(v)} case int64: dp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: v} case uint: dp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: int64(v)} case uint32: dp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: int64(v)} case uint64: // Handle potential overflow maxInt64 := uint64(9223372036854775807) // math.MaxInt64 if v > maxInt64 { // Convert to double if too large for int64 dp.Value = &metricspb.NumberDataPoint_AsDouble{AsDouble: float64(v)} } else { dp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: int64(v)} } case float32: dp.Value = &metricspb.NumberDataPoint_AsDouble{AsDouble: float64(v)} case float64: dp.Value = &metricspb.NumberDataPoint_AsDouble{AsDouble: v} case string: // Try to parse as number if fVal, err := strconv.ParseFloat(v, 64); err == nil { dp.Value = &metricspb.NumberDataPoint_AsDouble{AsDouble: fVal} } else { return nil } default: if cfg.Debug { o.logger.Printf("unsupported value type %T for metric %s", v, event.Name) } return nil } return dp } // createGaugeWithString creates OTLP Gauge with string value as attribute. // It copies attrs to avoid mutating the caller's shared slice. func (o *otlpOutput) createGaugeWithString(event *formatters.EventMsg, attrs []*commonpb.KeyValue, strVal string) *metricspb.Gauge { dpAttrs := make([]*commonpb.KeyValue, len(attrs), len(attrs)+1) copy(dpAttrs, attrs) dpAttrs = append(dpAttrs, &commonpb.KeyValue{ Key: "value", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: strVal}}, }) dp := &metricspb.NumberDataPoint{ Attributes: dpAttrs, TimeUnixNano: uint64(event.Timestamp), Value: &metricspb.NumberDataPoint_AsDouble{AsDouble: 1.0}, } return &metricspb.Gauge{ DataPoints: []*metricspb.NumberDataPoint{dp}, } } // validateRequest validates OTLP request structure func (o *otlpOutput) validateRequest(req *metricsv1.ExportMetricsServiceRequest) error { if req == nil { return fmt.Errorf("request is nil") } if len(req.ResourceMetrics) == 0 { return fmt.Errorf("ResourceMetrics is empty") } for i, rm := range req.ResourceMetrics { if rm.Resource == nil { return fmt.Errorf("ResourceMetrics[%d].Resource is nil", i) } if len(rm.ScopeMetrics) == 0 { return fmt.Errorf("ResourceMetrics[%d].ScopeMetrics is empty", i) } for j, sm := range rm.ScopeMetrics { if sm.Scope == nil { return fmt.Errorf("ScopeMetrics[%d].Scope is nil", j) } if len(sm.Metrics) == 0 { return fmt.Errorf("ScopeMetrics[%d].Metrics is empty", j) } for k, m := range sm.Metrics { if m.Name == "" { return fmt.Errorf("Metric[%d].Name is empty", k) } if m.Data == nil { return fmt.Errorf("Metric[%d].Data is nil", k) } if err := o.validateMetricData(i, j, k, m); err != nil { return err } } } } return nil } // validateMetricData validates metric data points func (o *otlpOutput) validateMetricData(rmIdx, smIdx, mIdx int, m *metricspb.Metric) error { var dataPoints []*metricspb.NumberDataPoint switch data := m.Data.(type) { case *metricspb.Metric_Gauge: if data.Gauge == nil { return fmt.Errorf("ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d].Gauge is nil", rmIdx, smIdx, mIdx) } dataPoints = data.Gauge.DataPoints case *metricspb.Metric_Sum: if data.Sum == nil { return fmt.Errorf("ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d].Sum is nil", rmIdx, smIdx, mIdx) } dataPoints = data.Sum.DataPoints case *metricspb.Metric_Histogram: return nil case *metricspb.Metric_ExponentialHistogram: return nil case *metricspb.Metric_Summary: return nil default: return fmt.Errorf("ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d] has unknown data type: %T", rmIdx, smIdx, mIdx, m.Data) } if len(dataPoints) == 0 { return fmt.Errorf("ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d] has no data points", rmIdx, smIdx, mIdx) } for dpIdx, dp := range dataPoints { if dp.TimeUnixNano == 0 { return fmt.Errorf("ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d].DataPoint[%d] has zero timestamp", rmIdx, smIdx, mIdx, dpIdx) } if dp.Value == nil { return fmt.Errorf("ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d].DataPoint[%d] has nil value", rmIdx, smIdx, mIdx, dpIdx) } switch v := dp.Value.(type) { case *metricspb.NumberDataPoint_AsInt: // Valid case *metricspb.NumberDataPoint_AsDouble: // Valid default: return fmt.Errorf("ResourceMetrics[%d].ScopeMetrics[%d].Metric[%d].DataPoint[%d] has invalid value type: %T", rmIdx, smIdx, mIdx, dpIdx, v) } } return nil } // sendGRPC sends the OTLP metrics via gRPC func (o *otlpOutput) sendGRPC(ctx context.Context, req *metricsv1.ExportMetricsServiceRequest) error { cfg := o.cfg.Load() gs := o.grpcState.Load() if gs == nil || gs.client == nil { return fmt.Errorf("gRPC client not initialized") } if err := o.validateRequest(req); err != nil { o.logger.Printf("VALIDATION ERROR: %v", err) return fmt.Errorf("request validation failed: %w", err) } if len(cfg.Headers) > 0 { md := metadata.New(cfg.Headers) ctx = metadata.NewOutgoingContext(ctx, md) } if cfg.Timeout > 0 { var cancel func() ctx, cancel = context.WithTimeout(ctx, cfg.Timeout) defer cancel() } if cfg.Debug { o.logger.Printf("DEBUG: Sending OTLP request with %d ResourceMetrics", len(req.ResourceMetrics)) if len(req.ResourceMetrics) > 0 && len(req.ResourceMetrics[0].ScopeMetrics) > 0 { o.logger.Printf("DEBUG: First ScopeMetric has %d Metrics", len(req.ResourceMetrics[0].ScopeMetrics[0].Metrics)) } } response, err := gs.client.Export(ctx, req) if err != nil { if cfg.Debug { o.logger.Printf("DEBUG: gRPC Export returned error: %v", err) } return fmt.Errorf("grpc export failed: %w", err) } if cfg.Debug { o.logger.Printf("DEBUG: gRPC Export succeeded") } if response.PartialSuccess != nil && response.PartialSuccess.RejectedDataPoints > 0 { errMsg := fmt.Sprintf("OTEL rejected %d data points: %s", response.PartialSuccess.RejectedDataPoints, response.PartialSuccess.ErrorMessage) o.logger.Printf("ERROR: %s", errMsg) if cfg.EnableMetrics { otlpRejectedDataPoints.WithLabelValues(cfg.Name).Add(float64(response.PartialSuccess.RejectedDataPoints)) } return fmt.Errorf("%s", errMsg) } return nil } ================================================ FILE: pkg/outputs/otlp_output/otlp_metrics.go ================================================ // © 2025 NVIDIA Corporation // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of NVIDIA's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package otlp_output import ( "github.com/prometheus/client_golang/prometheus" ) var otlpNumberOfSentEvents = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "otlp_output", Name: "number_of_sent_events_total", Help: "Number of events successfully sent to OTLP collector", }, []string{"output_name"}) var otlpNumberOfFailedEvents = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "otlp_output", Name: "number_of_failed_events_total", Help: "Number of events that failed to send to OTLP collector", }, []string{"output_name", "reason"}) var otlpSendDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "gnmic", Subsystem: "otlp_output", Name: "send_duration_seconds", Help: "Duration of sending batches to OTLP collector", }, []string{"output_name"}) var otlpRejectedDataPoints = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "otlp_output", Name: "rejected_data_points_total", Help: "Number of data points rejected by OTLP collector (PartialSuccess)", }, []string{"output_name"}) ================================================ FILE: pkg/outputs/otlp_output/otlp_output.go ================================================ // © 2025-2026 NVIDIA Corporation // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of NVIDIA's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package otlp_output import ( "context" "crypto/tls" "encoding/json" "fmt" "io" "log" "regexp" "slices" "sync" "sync/atomic" "time" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmi/proto/gnmi" gutils "github.com/openconfig/gnmic/pkg/utils" metricsv1 "go.opentelemetry.io/proto/otlp/collector/metrics/v1" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/outputs" "github.com/zestor-dev/zestor/store" ) const ( outputType = "otlp" defaultTimeout = 10 * time.Second defaultBatchSize = 1000 defaultNumWorkers = 1 defaultMaxRetries = 3 defaultProtocol = "grpc" loggingPrefix = "[otlp_output:%s] " ) func init() { outputs.Register(outputType, func() outputs.Output { return &otlpOutput{} }) } // otlpOutput implements the Output interface for OTLP metrics export type otlpOutput struct { outputs.BaseOutput cfg *atomic.Pointer[config] dynCfg *atomic.Pointer[dynConfig] grpcState *atomic.Pointer[grpcClientState] eventCh *atomic.Pointer[chan *formatters.EventMsg] logger *log.Logger rootCtx context.Context cancelFn context.CancelFunc wg *sync.WaitGroup // Metrics reg *prometheus.Registry // store store store.Store[any] } type dynConfig struct { evps []formatters.EventProcessor } type grpcClientState struct { conn *grpc.ClientConn client metricsv1.MetricsServiceClient } // config holds the OTLP output configuration type config struct { // name of the output Name string `mapstructure:"name,omitempty"` // endpoint of the OTLP collector Endpoint string `mapstructure:"endpoint,omitempty"` // "grpc" or "http" // defaults to "grpc" Protocol string `mapstructure:"protocol,omitempty"` // RPC timeout Timeout time.Duration `mapstructure:"timeout,omitempty"` // TLS configuration TLS *types.TLSConfig `mapstructure:"tls,omitempty"` // Batching BatchSize int `mapstructure:"batch-size,omitempty"` Interval time.Duration `mapstructure:"interval,omitempty"` BufferSize int `mapstructure:"buffer-size,omitempty"` // Retry MaxRetries int `mapstructure:"max-retries,omitempty"` // Metric naming // string, to be used as the metric namespace MetricPrefix string `mapstructure:"metric-prefix,omitempty"` // boolean, if true the subscription name will be prepended to the metric name after the prefix. AppendSubscriptionName bool `mapstructure:"append-subscription-name,omitempty"` // boolean, if true, string type values are exported as gauge metrics with value=1 // and the string stored as an attribute named "value". // if false, string values are dropped. StringsAsAttributes bool `mapstructure:"strings-as-attributes,omitempty"` // boolean, if true, the leading "/" of the metric path is trimmed before the // slash-to-underscore conversion, so a path like "/interfaces/..." becomes // "interfaces_..." instead of "_interfaces_...". Defaults to false for // backward compatibility. StripLeadingUnderscore bool `mapstructure:"strip-leading-underscore,omitempty"` // Tags whose values are placed as OTLP Resource attributes and excluded // from data point attributes. // Set to an empty list to put all tags on data points (useful for Prometheus compatibility). ResourceTagKeys []string `mapstructure:"resource-tag-keys,omitempty"` // Regex patterns matched against the value key to classify a metric as a // monotonic cumulative counter (Sum). Unmatched metrics become Gauges. // If empty, all metrics are exported as Gauges. CounterPatterns []string `mapstructure:"counter-patterns,omitempty"` // Resource attributes ResourceAttributes map[string]string `mapstructure:"resource-attributes,omitempty"` // Headers to include with every export request (gRPC metadata / HTTP headers). // Use this to set e.g. "X-Scope-OrgID" for Grafana Mimir, Loki, Tempo, etc. Headers map[string]string `mapstructure:"headers,omitempty"` // Precomputed lookup set for ResourceTagKeys (not from config file). resourceTagSet map[string]bool // Compiled regexes from CounterPatterns. counterRegexes []*regexp.Regexp // Performance NumWorkers int `mapstructure:"num-workers,omitempty"` // Debugging Debug bool `mapstructure:"debug,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty"` // Event processors EventProcessors []string `mapstructure:"event-processors,omitempty"` } func (o *otlpOutput) initFields() { o.cfg = new(atomic.Pointer[config]) o.dynCfg = new(atomic.Pointer[dynConfig]) o.grpcState = new(atomic.Pointer[grpcClientState]) o.eventCh = new(atomic.Pointer[chan *formatters.EventMsg]) o.wg = new(sync.WaitGroup) o.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags) } func (o *otlpOutput) String() string { cfg := o.cfg.Load() b, err := json.Marshal(cfg) if err != nil { return "" } return string(b) } // Init initializes the OTLP output func (o *otlpOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error { o.initFields() ncfg := new(config) err := outputs.DecodeConfig(cfg, ncfg) if err != nil { return err } if ncfg.Name == "" { ncfg.Name = name } o.logger.SetPrefix(fmt.Sprintf(loggingPrefix, ncfg.Name)) options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } o.store = options.Store // Set defaults if options.Name != "" { ncfg.Name = options.Name } o.setDefaultsFor(ncfg) // Apply logger if options.Logger != nil { o.logger.SetOutput(options.Logger.Writer()) o.logger.SetFlags(options.Logger.Flags()) } o.cfg.Store(ncfg) // Initialize registry o.reg = options.Registry err = o.registerMetrics() if err != nil { return err } // Initialize event processors dc := new(dynConfig) dc.evps, err = o.buildEventProcessors(ncfg) if err != nil { return err } o.dynCfg.Store(dc) // Initialize transport switch ncfg.Protocol { case "grpc": gs, err := o.initGRPCFor(ncfg) if err != nil { return fmt.Errorf("failed to initialize gRPC transport: %w", err) } o.grpcState.Store(gs) case "http": return fmt.Errorf("HTTP transport not yet implemented") default: return fmt.Errorf("unsupported protocol '%s': must be 'grpc' or 'http'", ncfg.Protocol) } // Initialize worker channels eventCh := make(chan *formatters.EventMsg, ncfg.BufferSize) o.eventCh.Store(&eventCh) // Start workers o.rootCtx = ctx var wctx context.Context wctx, o.cancelFn = context.WithCancel(o.rootCtx) o.wg.Add(ncfg.NumWorkers) for i := 0; i < ncfg.NumWorkers; i++ { go o.worker(wctx, i) } o.logger.Printf("initialized OTLP output: endpoint=%s, protocol=%s, batch-size=%d, workers=%d", ncfg.Endpoint, ncfg.Protocol, ncfg.BatchSize, ncfg.NumWorkers) return nil } func (o *otlpOutput) Update(ctx context.Context, cfg map[string]any) error { newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } o.setDefaultsFor(newCfg) if err := o.validateConfig(newCfg); err != nil { return err } currCfg := o.cfg.Load() swapChannel := channelNeedsSwap(currCfg, newCfg) restartWorkers := needsWorkerRestart(currCfg, newCfg) rebuildGRPC := needsGRPCRebuild(currCfg, newCfg) rebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 dc := new(dynConfig) prevDC := o.dynCfg.Load() if rebuildProcessors { dc.evps, err = o.buildEventProcessors(newCfg) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } o.dynCfg.Store(dc) if rebuildGRPC { gs, err := o.initGRPCFor(newCfg) if err != nil { return fmt.Errorf("failed to rebuild gRPC transport: %w", err) } oldState := o.grpcState.Swap(gs) if oldState != nil && oldState.conn != nil { oldState.conn.Close() } } o.cfg.Store(newCfg) if swapChannel || restartWorkers { var newChan chan *formatters.EventMsg if swapChannel { newChan = make(chan *formatters.EventMsg, newCfg.BufferSize) } else { newChan = *o.eventCh.Load() } runCtx, cancel := context.WithCancel(o.rootCtx) newWG := new(sync.WaitGroup) oldCancel := o.cancelFn oldWG := o.wg oldEventCh := *o.eventCh.Load() o.cancelFn = cancel o.wg = newWG o.eventCh.Store(&newChan) o.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { go o.worker(runCtx, i) } if oldCancel != nil { oldCancel() } if oldWG != nil { oldWG.Wait() } if swapChannel { OUTER_LOOP: for { select { case ev, ok := <-oldEventCh: if !ok { break OUTER_LOOP } select { case newChan <- ev: default: } default: break OUTER_LOOP } } } } o.logger.Printf("updated OTLP output: %s", o.String()) return nil } func (o *otlpOutput) Validate(cfg map[string]any) error { ncfg := new(config) err := outputs.DecodeConfig(cfg, ncfg) if err != nil { return err } o.setDefaultsFor(ncfg) return o.validateConfig(ncfg) } func (o *otlpOutput) UpdateProcessor(name string, pcfg map[string]any) error { cfg := o.cfg.Load() dc := o.dynCfg.Load() newEvps, changed, err := outputs.UpdateProcessorInSlice( o.logger, o.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps o.dynCfg.Store(&newDC) o.logger.Printf("updated event processor %s", name) } return nil } // Write handles incoming gNMI messages func (o *otlpOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) { if rsp == nil { return } cfg := o.cfg.Load() dc := o.dynCfg.Load() if dc == nil { return } // Type assert to gNMI SubscribeResponse subsResp, ok := rsp.(*gnmi.SubscribeResponse) if !ok { if cfg.Debug { o.logger.Printf("received non-SubscribeResponse message, ignoring") } return } // Convert gNMI response to EventMsg format subscriptionName := meta["subscription-name"] if subscriptionName == "" { subscriptionName = "default" } events, err := formatters.ResponseToEventMsgs(subscriptionName, subsResp, meta, dc.evps...) if err != nil { if cfg.Debug { o.logger.Printf("failed to convert response to events: %v", err) } return } // Send events to worker channel eventCh := *o.eventCh.Load() for _, event := range events { select { case eventCh <- event: case <-ctx.Done(): return default: if cfg.Debug { o.logger.Printf("event channel full, dropping event") } if cfg.EnableMetrics { otlpNumberOfFailedEvents.WithLabelValues(cfg.Name, "channel_full").Inc() } } } } // WriteEvent handles individual EventMsg func (o *otlpOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) { if ev == nil { return } cfg := o.cfg.Load() eventCh := *o.eventCh.Load() select { case eventCh <- ev: case <-ctx.Done(): return default: if cfg.Debug { o.logger.Printf("event channel full, dropping event") } if cfg.EnableMetrics { otlpNumberOfFailedEvents.WithLabelValues(cfg.Name, "channel_full").Inc() } } } // Close closes the OTLP output func (o *otlpOutput) Close() error { if o.cancelFn != nil { o.cancelFn() } // Close event channel eventCh := o.eventCh.Load() if eventCh != nil { close(*eventCh) } // Wait for workers to finish o.wg.Wait() // Close gRPC connection gs := o.grpcState.Load() if gs != nil && gs.conn != nil { return gs.conn.Close() } return nil } // worker processes events in batches func (o *otlpOutput) worker(ctx context.Context, id int) { defer o.wg.Done() cfg := o.cfg.Load() if cfg.Debug { o.logger.Printf("worker %d started", id) } batch := make([]*formatters.EventMsg, 0, cfg.BatchSize) ticker := time.NewTicker(cfg.Interval) defer ticker.Stop() eventCh := *o.eventCh.Load() for { select { case <-ctx.Done(): if len(batch) > 0 { flushCtx, cancel := context.WithTimeout(context.Background(), cfg.Timeout) defer cancel() o.sendBatch(flushCtx, batch) } if cfg.Debug { o.logger.Printf("worker %d stopped", id) } return case event, ok := <-eventCh: if !ok { if len(batch) > 0 { flushCtx, cancel := context.WithTimeout(context.Background(), cfg.Timeout) defer cancel() o.sendBatch(flushCtx, batch) } return } batch = append(batch, event) if len(batch) >= cfg.BatchSize { o.sendBatch(ctx, batch) batch = make([]*formatters.EventMsg, 0, cfg.BatchSize) } case <-ticker.C: if len(batch) > 0 { o.sendBatch(ctx, batch) batch = make([]*formatters.EventMsg, 0, cfg.BatchSize) } } } } func (o *otlpOutput) sendBatch(ctx context.Context, events []*formatters.EventMsg) { if len(events) == 0 { return } cfg := o.cfg.Load() start := time.Now() req := o.convertToOTLP(events) var err error for attempt := 0; attempt <= cfg.MaxRetries; attempt++ { err = o.sendGRPC(ctx, req) if err == nil { if cfg.Debug { o.logger.Printf("successfully sent %d events (attempt %d)", len(events), attempt+1) } if cfg.EnableMetrics { otlpNumberOfSentEvents.WithLabelValues(cfg.Name).Add(float64(len(events))) otlpSendDuration.WithLabelValues(cfg.Name).Observe(time.Since(start).Seconds()) } return } if attempt < cfg.MaxRetries { time.Sleep(time.Duration(attempt+1) * 100 * time.Millisecond) } } o.logger.Printf("failed to send batch after %d retries: %v", cfg.MaxRetries, err) if cfg.EnableMetrics { otlpNumberOfFailedEvents.WithLabelValues(cfg.Name, "send_failed").Add(float64(len(events))) } } func (o *otlpOutput) setDefaultsFor(c *config) { if c.Timeout == 0 { c.Timeout = defaultTimeout } if c.BatchSize == 0 { c.BatchSize = defaultBatchSize } if c.NumWorkers == 0 { c.NumWorkers = defaultNumWorkers } if c.MaxRetries == 0 { c.MaxRetries = defaultMaxRetries } if c.Protocol == "" { c.Protocol = defaultProtocol } if c.Name == "" { c.Name = "gnmic-otlp-" + uuid.New().String() } if c.Interval == 0 { c.Interval = 5 * time.Second } if c.BufferSize == 0 { c.BufferSize = c.BatchSize * 2 } c.resourceTagSet = make(map[string]bool, len(c.ResourceTagKeys)) for _, k := range c.ResourceTagKeys { c.resourceTagSet[k] = true } } func (o *otlpOutput) validateConfig(c *config) error { if c.Endpoint == "" { return fmt.Errorf("endpoint is required") } c.counterRegexes = make([]*regexp.Regexp, 0, len(c.CounterPatterns)) for _, p := range c.CounterPatterns { re, err := regexp.Compile(p) if err != nil { return fmt.Errorf("invalid counter-pattern %q: %w", p, err) } c.counterRegexes = append(c.counterRegexes, re) } return nil } func (o *otlpOutput) buildEventProcessors(cfg *config) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := gutils.GetConfigMaps(o.store) if err != nil { return nil, err } return formatters.MakeEventProcessors(o.logger, cfg.EventProcessors, ps, tcs, acts) } func (o *otlpOutput) initGRPCFor(cfg *config) (*grpcClientState, error) { var opts []grpc.DialOption if cfg.TLS != nil { tlsConfig, err := o.createTLSConfigFor(cfg) if err != nil { return nil, fmt.Errorf("failed to create TLS config: %w", err) } opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) } else { opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } conn, err := grpc.NewClient(cfg.Endpoint, opts...) if err != nil { return nil, fmt.Errorf("failed to create OTLP client: %w", err) } o.logger.Printf("initialized OTLP gRPC client for endpoint: %s", cfg.Endpoint) return &grpcClientState{ conn: conn, client: metricsv1.NewMetricsServiceClient(conn), }, nil } func (o *otlpOutput) createTLSConfigFor(cfg *config) (*tls.Config, error) { tlsConfig := &tls.Config{ InsecureSkipVerify: cfg.TLS.SkipVerify, } if cfg.TLS.CaFile != "" || cfg.TLS.CertFile != "" { return utils.NewTLSConfig( cfg.TLS.CaFile, cfg.TLS.CertFile, cfg.TLS.KeyFile, "", cfg.TLS.SkipVerify, false, ) } return tlsConfig, nil } func (o *otlpOutput) registerMetrics() error { cfg := o.cfg.Load() if !cfg.EnableMetrics { return nil } if o.reg == nil { return nil } if err := o.reg.Register(otlpNumberOfSentEvents); err != nil { return err } if err := o.reg.Register(otlpNumberOfFailedEvents); err != nil { return err } if err := o.reg.Register(otlpSendDuration); err != nil { return err } if err := o.reg.Register(otlpRejectedDataPoints); err != nil { return err } return nil } // Helper functions for detecting config changes func channelNeedsSwap(old, nw *config) bool { if old == nil || nw == nil { return true } return old.BufferSize != nw.BufferSize } func needsWorkerRestart(old, nw *config) bool { if old == nil || nw == nil { return true } return old.NumWorkers != nw.NumWorkers || old.BatchSize != nw.BatchSize || old.Interval != nw.Interval } func needsGRPCRebuild(old, nw *config) bool { if old == nil || nw == nil { return true } return old.Endpoint != nw.Endpoint || old.Protocol != nw.Protocol || !old.TLS.Equal(nw.TLS) } ================================================ FILE: pkg/outputs/otlp_output/otlp_output_test.go ================================================ // © 2025-2026 NVIDIA Corporation // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of NVIDIA's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package otlp_output import ( "context" "io" "log" "net" "regexp" "sync" "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metricsv1 "go.opentelemetry.io/proto/otlp/collector/metrics/v1" commonpb "go.opentelemetry.io/proto/otlp/common/v1" "google.golang.org/grpc" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/outputs" "github.com/zestor-dev/zestor/store" "github.com/zestor-dev/zestor/store/gomap" ) // newTestOutput creates an otlpOutput suitable for converter tests (no Init required). func newTestOutput(cfg *config) *otlpOutput { cfg.resourceTagSet = make(map[string]bool, len(cfg.ResourceTagKeys)) for _, k := range cfg.ResourceTagKeys { cfg.resourceTagSet[k] = true } cfg.counterRegexes = make([]*regexp.Regexp, 0, len(cfg.CounterPatterns)) for _, p := range cfg.CounterPatterns { cfg.counterRegexes = append(cfg.counterRegexes, regexp.MustCompile(p)) } o := &otlpOutput{} o.cfg = new(atomic.Pointer[config]) o.cfg.Store(cfg) o.logger = log.New(io.Discard, "", 0) return o } // Test 1: OTLP Message Structure func TestOTLP_MessageStructure(t *testing.T) { t.Skip("Implementation pending") // Test that gNMI metrics convert to proper OTLP structure // Given: gNMI metric update event := &formatters.EventMsg{ Name: "interfaces_interface_state_counters_in_octets", Timestamp: time.Now().UnixNano(), Tags: map[string]string{ "interface_name": "Ethernet1", "source": "10.1.1.1:6030", }, Values: map[string]interface{}{ "value": int64(1234567890), }, } // When: Converting to OTLP output := newTestOutput(&config{ Endpoint: "localhost:4317", }) otlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event}) // Then: Should have proper OTLP structure require.NotNil(t, otlpMetrics) require.Equal(t, 1, len(otlpMetrics.ResourceMetrics)) require.Equal(t, 1, len(otlpMetrics.ResourceMetrics[0].ScopeMetrics)) metric := otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics[0] assert.Equal(t, "interfaces_interface_state_counters_in_octets", metric.Name) // Verify it's a Sum (monotonic counter) assert.NotNil(t, metric.GetSum()) assert.True(t, metric.GetSum().IsMonotonic) } // Test 2: Resource Attributes func TestOTLP_ResourceAttributes(t *testing.T) { t.Skip("Implementation pending") // Test that device metadata becomes OTLP resource attributes // Given: gNMI update with device metadata event := &formatters.EventMsg{ Name: "interfaces_interface_state_counters_in_octets", Timestamp: time.Now().UnixNano(), Tags: map[string]string{ "device": "switch1-jhb01", "vendor": "arista", "site": "jhb01", "source": "10.1.1.1:6030", }, Values: map[string]interface{}{ "value": int64(100), }, } // When: Converting to OTLP output := newTestOutput(&config{}) otlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event}) // Then: Resource attributes should match metadata resource := otlpMetrics.ResourceMetrics[0].Resource assert.Equal(t, "switch1-jhb01", getAttributeValue(resource, "device")) assert.Equal(t, "arista", getAttributeValue(resource, "vendor")) assert.Equal(t, "jhb01", getAttributeValue(resource, "site")) assert.Equal(t, "10.1.1.1:6030", getAttributeValue(resource, "source")) } // Test 3: Metric Attributes from Path Keys func TestOTLP_PathKeysAsAttributes(t *testing.T) { t.Skip("Implementation pending") // Test that gNMI path keys become OTLP metric attributes // Given: Event with path key as tag event := &formatters.EventMsg{ Name: "interfaces_interface_state_counters_in_octets", Timestamp: time.Now().UnixNano(), Tags: map[string]string{ "interface_name": "Ethernet1", "source": "10.1.1.1:6030", }, Values: map[string]interface{}{ "value": int64(999), }, } // When: Converting to OTLP output := newTestOutput(&config{}) otlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event}) // Then: Path key becomes attribute dataPoint := otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics[0].GetSum().DataPoints[0] assert.Equal(t, "Ethernet1", getDataPointAttribute(dataPoint, "interface_name")) } // Test 4: Metric Type Detection func TestOTLP_MetricTypeDetection(t *testing.T) { t.Skip("Implementation pending") tests := []struct { name string metricName string value interface{} expectedType string // "Sum" or "Gauge" expectedMonotonic bool }{ { name: "counter metric", metricName: "interfaces_interface_state_counters_in_octets", value: int64(1000), expectedType: "Sum", expectedMonotonic: true, }, { name: "gauge metric - temperature", metricName: "components_component_temperature_instant", value: 45.5, expectedType: "Gauge", expectedMonotonic: false, }, { name: "gauge metric - status", metricName: "interfaces_interface_state_oper_status", value: "up", expectedType: "Gauge", expectedMonotonic: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { event := &formatters.EventMsg{ Name: tt.metricName, Timestamp: time.Now().UnixNano(), Values: map[string]interface{}{ "value": tt.value, }, } output := newTestOutput(&config{}) otlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event}) metric := otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics[0] switch tt.expectedType { case "Sum": assert.NotNil(t, metric.GetSum()) assert.Equal(t, tt.expectedMonotonic, metric.GetSum().IsMonotonic) case "Gauge": assert.NotNil(t, metric.GetGauge()) } }) } } // Test 5: gRPC Transport func TestOTLP_GRPCTransport(t *testing.T) { server, endpoint := startMockOTLPServer(t) defer server.Stop() cfg := map[string]interface{}{ "endpoint": endpoint, "protocol": "grpc", "timeout": "5s", "batch-size": 1, "interval": "100ms", } output := &otlpOutput{} err := output.Init(context.Background(), "test-otlp", cfg, outputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})), ) require.NoError(t, err) defer output.Close() event := createTestEvent() output.WriteEvent(context.Background(), event) time.Sleep(200 * time.Millisecond) assert.Greater(t, server.ReceivedMetricsCount(), 0) } // Test 6: Configuration Validation func TestOTLP_ConfigValidation(t *testing.T) { t.Skip("Implementation pending") tests := []struct { name string config map[string]interface{} expectError bool errorMsg string }{ { name: "valid gRPC config", config: map[string]interface{}{ "endpoint": "localhost:4317", "protocol": "grpc", }, expectError: false, }, { name: "valid HTTP config", config: map[string]interface{}{ "endpoint": "http://localhost:4318", "protocol": "http", }, expectError: false, }, { name: "missing endpoint", config: map[string]interface{}{ "protocol": "grpc", }, expectError: true, errorMsg: "endpoint is required", }, { name: "invalid protocol", config: map[string]interface{}{ "endpoint": "localhost:4317", "protocol": "invalid", }, expectError: true, errorMsg: "unsupported protocol", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { output := &otlpOutput{} err := output.Init(context.Background(), "test-otlp", tt.config) if tt.expectError { assert.Error(t, err) if tt.errorMsg != "" { assert.Contains(t, err.Error(), tt.errorMsg) } } else { assert.NoError(t, err) output.Close() } }) } } // Test 7: String Values as Attributes func TestOTLP_StringValuesAsAttributes(t *testing.T) { t.Skip("Implementation pending") // Test strings-as-attributes conversion // Given: String value metric event := &formatters.EventMsg{ Name: "interfaces_interface_state_oper_status", Timestamp: time.Now().UnixNano(), Tags: map[string]string{ "interface_name": "Ethernet1", }, Values: map[string]interface{}{ "value": "up", }, } // When: Converting with strings-as-attributes enabled output := newTestOutput(&config{StringsAsAttributes: true}) otlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event}) // Then: Should create gauge with value=1 and status as attribute metric := otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics[0] gauge := metric.GetGauge() require.NotNil(t, gauge) dataPoint := gauge.DataPoints[0] assert.Equal(t, float64(1), dataPoint.GetAsDouble()) assert.Equal(t, "up", getDataPointAttribute(dataPoint, "value")) } // Test 8: Subscription Name Mapping func TestOTLP_SubscriptionNameMapping(t *testing.T) { t.Skip("Implementation pending") // Test that subscription names become resource attributes // Given: Event with subscription name event := &formatters.EventMsg{ Name: "interfaces_interface_state_counters_in_octets", Timestamp: time.Now().UnixNano(), Tags: map[string]string{ "subscription_name": "arista", "source": "10.1.1.1:6030", }, Values: map[string]interface{}{ "value": int64(100), }, } // When: Converting with append-subscription-name enabled output := newTestOutput(&config{AppendSubscriptionName: true}) otlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event}) // Then: subscription_name should be in attributes dataPoint := otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics[0].GetSum().DataPoints[0] assert.Equal(t, "arista", getDataPointAttribute(dataPoint, "subscription_name")) } // TestBuildMetricName_StripLeadingUnderscore verifies the strip-leading-underscore config option. // gNMI paths arrive with a leading "/" (see pkg/formatters/event.go updateToEvent), which the // slash->underscore conversion turns into a leading "_". This test pins both the backward-compatible // default (option off) and the new behavior (option on). func TestBuildMetricName_StripLeadingUnderscore(t *testing.T) { tests := []struct { name string cfg *config event *formatters.EventMsg valueKey string expected string }{ { name: "default preserves leading underscore (backward compat)", cfg: &config{}, event: &formatters.EventMsg{Name: "sub1"}, valueKey: "/interfaces/interface/state/counters/in-octets", expected: "_interfaces_interface_state_counters_in_octets", }, { name: "enabled removes leading underscore", cfg: &config{StripLeadingUnderscore: true}, event: &formatters.EventMsg{Name: "sub1"}, valueKey: "/interfaces/interface/state/counters/in-octets", expected: "interfaces_interface_state_counters_in_octets", }, { name: "disabled with metric-prefix yields double underscore (backward compat)", cfg: &config{MetricPrefix: "gnmic"}, event: &formatters.EventMsg{Name: "sub1"}, valueKey: "/interfaces/interface/state/counters/in-octets", expected: "gnmic__interfaces_interface_state_counters_in_octets", }, { name: "enabled with metric-prefix has single underscore separator", cfg: &config{StripLeadingUnderscore: true, MetricPrefix: "gnmic"}, event: &formatters.EventMsg{Name: "sub1"}, valueKey: "/interfaces/interface/state/counters/in-octets", expected: "gnmic_interfaces_interface_state_counters_in_octets", }, { name: "enabled with append-subscription-name has single underscore separator", cfg: &config{StripLeadingUnderscore: true, AppendSubscriptionName: true}, event: &formatters.EventMsg{Name: "arista"}, valueKey: "/interfaces/interface/state/counters/in-octets", expected: "arista_interfaces_interface_state_counters_in_octets", }, { name: "enabled does not touch non-leading underscores", cfg: &config{StripLeadingUnderscore: true}, event: &formatters.EventMsg{Name: "sub1"}, valueKey: "/a_b/c", expected: "a_b_c", }, { name: "enabled is a no-op when path has no leading slash", cfg: &config{StripLeadingUnderscore: true}, event: &formatters.EventMsg{Name: "sub1"}, valueKey: "interfaces/interface", expected: "interfaces_interface", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { output := newTestOutput(tt.cfg) got := output.buildMetricName(tt.cfg, tt.event, tt.valueKey) assert.Equal(t, tt.expected, got) }) } } // Helper functions func createTestEvent() *formatters.EventMsg { return &formatters.EventMsg{ Name: "test_metric", Timestamp: time.Now().UnixNano(), Tags: map[string]string{ "source": "test:1234", }, Values: map[string]interface{}{ "value": int64(42), }, } } func getAttributeValue(resource interface{}, key string) string { // Helper to extract attribute value from resource // Will implement when we have the actual OTLP structures return "" } func getDataPointAttribute(dataPoint interface{}, key string) string { // Helper to extract attribute value from data point // Will implement when we have the actual OTLP structures return "" } // Mock OTLP server for testing type mockOTLPServer struct { metricsv1.UnimplementedMetricsServiceServer grpcServer *grpc.Server listener net.Listener m sync.Mutex metricsCount int receivedReqs []*metricsv1.ExportMetricsServiceRequest } func startMockOTLPServer(t *testing.T) (*mockOTLPServer, string) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) server := grpc.NewServer() mock := &mockOTLPServer{ grpcServer: server, listener: listener, } metricsv1.RegisterMetricsServiceServer(server, mock) go server.Serve(listener) return mock, listener.Addr().String() } func startMockOTLPServerOnAddress(t *testing.T, addr string) (*mockOTLPServer, string) { listener, err := net.Listen("tcp", addr) require.NoError(t, err) server := grpc.NewServer() mock := &mockOTLPServer{ grpcServer: server, listener: listener, } metricsv1.RegisterMetricsServiceServer(server, mock) go server.Serve(listener) return mock, listener.Addr().String() } func (m *mockOTLPServer) Export(ctx context.Context, req *metricsv1.ExportMetricsServiceRequest) (*metricsv1.ExportMetricsServiceResponse, error) { if err := ctx.Err(); err != nil { return nil, err } m.m.Lock() defer m.m.Unlock() m.receivedReqs = append(m.receivedReqs, req) m.metricsCount += len(req.ResourceMetrics) return &metricsv1.ExportMetricsServiceResponse{}, nil } func (m *mockOTLPServer) ReceivedMetricsCount() int { m.m.Lock() defer m.m.Unlock() return m.metricsCount } func (m *mockOTLPServer) Stop() { m.grpcServer.Stop() m.listener.Close() } // Test 9: Resource Tag Keys control data point vs resource attribute placement func TestOTLP_ResourceTagKeys(t *testing.T) { tests := []struct { name string resourceTagKeys []string eventTags map[string]string expectedInDataPoint []string expectedNotInDataPoint []string }{ { name: "empty resource-tag-keys: all tags become data point attributes", resourceTagKeys: []string{}, eventTags: map[string]string{ "device": "nvswitch1-nvl9-gp1-jhb01", "vendor": "nvidia", "model": "nvos", "interface_name": "Ethernet1", "subscription_name": "nvos", }, expectedInDataPoint: []string{"device", "vendor", "model", "interface_name", "subscription_name"}, expectedNotInDataPoint: []string{}, }, { name: "default resource-tag-keys: device/vendor/model/site/source excluded from data point", resourceTagKeys: []string{"device", "vendor", "model", "site", "source"}, eventTags: map[string]string{ "device": "nvswitch1-nvl9-gp1-jhb01", "vendor": "nvidia", "model": "nvos", "interface_name": "Ethernet1", }, expectedInDataPoint: []string{"interface_name"}, expectedNotInDataPoint: []string{"device", "vendor", "model"}, }, { name: "custom resource-tag-keys: only specified keys excluded", resourceTagKeys: []string{"source"}, eventTags: map[string]string{ "device": "nvswitch1", "vendor": "nvidia", "source": "10.0.0.1", "interface_name": "Ethernet1", }, expectedInDataPoint: []string{"device", "vendor", "interface_name"}, expectedNotInDataPoint: []string{"source"}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { event := &formatters.EventMsg{ Name: "test_metric", Timestamp: time.Now().UnixNano(), Tags: tt.eventTags, Values: map[string]interface{}{ "value": int64(100), }, } output := newTestOutput(&config{ ResourceTagKeys: tt.resourceTagKeys, }) otlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event}) require.NotNil(t, otlpMetrics) require.Len(t, otlpMetrics.ResourceMetrics, 1) require.Len(t, otlpMetrics.ResourceMetrics[0].ScopeMetrics, 1) require.Len(t, otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics, 1) metric := otlpMetrics.ResourceMetrics[0].ScopeMetrics[0].Metrics[0] var dataPointAttrs map[string]string if metric.GetGauge() != nil { dataPointAttrs = extractAttributesMap(metric.GetGauge().DataPoints[0].Attributes) } else if metric.GetSum() != nil { dataPointAttrs = extractAttributesMap(metric.GetSum().DataPoints[0].Attributes) } else { t.Fatal("Metric has neither Gauge nor Sum data") } for _, key := range tt.expectedInDataPoint { assert.Contains(t, dataPointAttrs, key, "Expected tag '%s' to be in data point attributes", key) assert.Equal(t, tt.eventTags[key], dataPointAttrs[key], "Tag '%s' value mismatch", key) } for _, key := range tt.expectedNotInDataPoint { assert.NotContains(t, dataPointAttrs, key, "Tag '%s' should NOT be in data point attributes", key) } }) } } // Test 10: Resource Attributes Behavior with ResourceTagKeys func TestOTLP_ResourceAttributesBehavior(t *testing.T) { tests := []struct { name string resourceTagKeys []string eventTags map[string]string expectInResource []string expectNotInResource []string }{ { name: "empty resource-tag-keys: no tags in resource", resourceTagKeys: []string{}, eventTags: map[string]string{ "device": "nvswitch1", "vendor": "nvidia", }, expectInResource: []string{}, expectNotInResource: []string{"device", "vendor"}, }, { name: "default resource-tag-keys: device/vendor in resource", resourceTagKeys: []string{"device", "vendor", "model", "site", "source"}, eventTags: map[string]string{ "device": "nvswitch1", "vendor": "nvidia", }, expectInResource: []string{"device", "vendor"}, expectNotInResource: []string{}, }, { name: "custom resource-tag-keys: only source in resource", resourceTagKeys: []string{"source"}, eventTags: map[string]string{ "device": "nvswitch1", "vendor": "nvidia", "source": "10.0.0.1", }, expectInResource: []string{"source"}, expectNotInResource: []string{"device", "vendor"}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { event := &formatters.EventMsg{ Name: "test_metric", Timestamp: time.Now().UnixNano(), Tags: tt.eventTags, Values: map[string]interface{}{ "value": int64(100), }, } output := newTestOutput(&config{ ResourceTagKeys: tt.resourceTagKeys, }) otlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event}) require.NotNil(t, otlpMetrics) resource := otlpMetrics.ResourceMetrics[0].Resource resourceAttrs := extractAttributesMap(resource.Attributes) for _, key := range tt.expectInResource { assert.Contains(t, resourceAttrs, key, "Expected tag '%s' in resource attributes", key) } for _, key := range tt.expectNotInResource { assert.NotContains(t, resourceAttrs, key, "Tag '%s' should NOT be in resource attributes", key) } }) } } // Test 11: Configured Resource Attributes Always Included func TestOTLP_ConfiguredResourceAttributesAlwaysIncluded(t *testing.T) { event := &formatters.EventMsg{ Name: "test_metric", Timestamp: time.Now().UnixNano(), Tags: map[string]string{ "device": "nvswitch1", }, Values: map[string]interface{}{ "value": int64(100), }, } output := newTestOutput(&config{ ResourceTagKeys: []string{}, ResourceAttributes: map[string]string{ "service.name": "gnmic-collector", "service.version": "0.42.0", }, }) otlpMetrics := output.convertToOTLP([]*formatters.EventMsg{event}) resource := otlpMetrics.ResourceMetrics[0].Resource resourceAttrs := extractAttributesMap(resource.Attributes) // Configured resource attributes should always be present assert.Equal(t, "gnmic-collector", resourceAttrs["service.name"]) assert.Equal(t, "0.42.0", resourceAttrs["service.version"]) } // Test 12: Init succeeds even when endpoint is unreachable func TestOTLP_InitSucceedsWithUnreachableEndpoint(t *testing.T) { cfg := map[string]interface{}{ "endpoint": "unreachable-host:4317", "protocol": "grpc", "timeout": "1s", } output := &otlpOutput{} err := output.Init(context.Background(), "test-otlp", cfg, outputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})), ) assert.NoError(t, err, "Init should succeed even with unreachable endpoint") gs := output.grpcState.Load() assert.NotNil(t, gs, "gRPC state should be created") assert.NotNil(t, gs.conn, "gRPC connection should be created") assert.NotNil(t, gs.client, "gRPC client should be created") output.Close() } // Test 13: Connection happens lazily on first RPC func TestOTLP_ConnectionOnFirstRPC(t *testing.T) { server, endpoint := startMockOTLPServer(t) defer server.Stop() cfg := map[string]interface{}{ "endpoint": endpoint, "protocol": "grpc", "timeout": "5s", "batch-size": 1, "interval": "100ms", } output := &otlpOutput{} err := output.Init(context.Background(), "test-otlp", cfg, outputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})), ) require.NoError(t, err) defer output.Close() assert.Equal(t, 0, server.ReceivedMetricsCount(), "No metrics should be sent yet") event := createTestEvent() output.WriteEvent(context.Background(), event) time.Sleep(200 * time.Millisecond) assert.Greater(t, server.ReceivedMetricsCount(), 0, "Metrics should be sent on first RPC") } // Test 14: Retry behavior with delayed endpoint availability func TestOTLP_ReconnectWhenEndpointBecomesAvailable(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) endpoint := listener.Addr().String() listener.Close() cfg := map[string]interface{}{ "endpoint": endpoint, "protocol": "grpc", "timeout": "2s", "batch-size": 1, "interval": "200ms", "max-retries": 10, } output := &otlpOutput{} err = output.Init(context.Background(), "test-otlp", cfg, outputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})), ) require.NoError(t, err) defer output.Close() server, newEndpoint := startMockOTLPServerOnAddress(t, endpoint) defer server.Stop() assert.Equal(t, endpoint, newEndpoint) event := createTestEvent() output.WriteEvent(context.Background(), event) time.Sleep(500 * time.Millisecond) assert.Greater(t, server.ReceivedMetricsCount(), 0, "Should successfully send after endpoint becomes available") } // Test 15: Graceful shutdown flushes remaining batch func TestOTLP_GracefulShutdownFlushes(t *testing.T) { server, endpoint := startMockOTLPServer(t) defer server.Stop() cfg := map[string]interface{}{ "endpoint": endpoint, "protocol": "grpc", "timeout": "5s", "batch-size": 100, "interval": "10s", } output := &otlpOutput{} err := output.Init(context.Background(), "test-otlp", cfg, outputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})), ) require.NoError(t, err) event := createTestEvent() output.WriteEvent(context.Background(), event) output.WriteEvent(context.Background(), event) time.Sleep(100 * time.Millisecond) assert.Equal(t, 0, server.ReceivedMetricsCount(), "Batch should not be sent yet (batch size not reached)") output.Close() time.Sleep(200 * time.Millisecond) assert.Greater(t, server.ReceivedMetricsCount(), 0, "Remaining batch should be flushed on shutdown") } // Test 16: Context cancellation sends final batch with fresh context func TestOTLP_ContextCancellationFlushes(t *testing.T) { server, endpoint := startMockOTLPServer(t) defer server.Stop() cfg := map[string]interface{}{ "endpoint": endpoint, "protocol": "grpc", "timeout": "5s", "batch-size": 100, "interval": "10s", } ctx, cancel := context.WithCancel(context.Background()) output := &otlpOutput{} err := output.Init(ctx, "test-otlp", cfg, outputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})), ) require.NoError(t, err) event := createTestEvent() output.WriteEvent(context.Background(), event) output.WriteEvent(context.Background(), event) time.Sleep(100 * time.Millisecond) assert.Equal(t, 0, server.ReceivedMetricsCount(), "Batch should not be sent yet") cancel() time.Sleep(200 * time.Millisecond) output.Close() assert.Greater(t, server.ReceivedMetricsCount(), 0, "Batch should be flushed even after context cancellation") } // Test 17: Channel close flushes remaining batch func TestOTLP_ChannelCloseFlushes(t *testing.T) { server, endpoint := startMockOTLPServer(t) defer server.Stop() cfg := map[string]interface{}{ "endpoint": endpoint, "protocol": "grpc", "timeout": "5s", "batch-size": 100, "interval": "10s", } output := &otlpOutput{} err := output.Init(context.Background(), "test-otlp", cfg, outputs.WithConfigStore(gomap.NewMemStore(store.StoreOptions[any]{})), ) require.NoError(t, err) event := createTestEvent() output.WriteEvent(context.Background(), event) output.WriteEvent(context.Background(), event) output.WriteEvent(context.Background(), event) time.Sleep(100 * time.Millisecond) assert.Equal(t, 0, server.ReceivedMetricsCount(), "Batch should not be sent yet") close(*output.eventCh.Load()) time.Sleep(200 * time.Millisecond) assert.Greater(t, server.ReceivedMetricsCount(), 0, "Remaining batch should be flushed when channel closes") } // Helper to extract attributes map from KeyValue slice func extractAttributesMap(attrs []*commonpb.KeyValue) map[string]string { result := make(map[string]string) for _, attr := range attrs { if strVal := attr.Value.GetStringValue(); strVal != "" { result[attr.Key] = strVal } } return result } ================================================ FILE: pkg/outputs/output.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 // © 2025 NVIDIA Corporation // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of NVIDIA's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package outputs import ( "bytes" "context" "encoding/json" "fmt" "log" "strings" "sync" "text/template" "github.com/mitchellh/mapstructure" "github.com/openconfig/gnmi/proto/gnmi" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" _ "github.com/openconfig/gnmic/pkg/formatters/all" pkgutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) type Output interface { // initialize the output Init(context.Context, string, map[string]any, ...Option) error // validate the config Validate(map[string]any) error // update the config Update(context.Context, map[string]any) error // update a processor UpdateProcessor(string, map[string]any) error // write a protobuf message to the output Write(context.Context, proto.Message, Meta) // write an event message to the output WriteEvent(context.Context, *formatters.EventMsg) // close the output Close() error // return a string representation of the output String() string } type Initializer func() Output var Outputs = map[string]Initializer{} var OutputTypes = map[string]struct{}{ "file": {}, "influxdb": {}, "kafka": {}, "nats": {}, "otlp": {}, "prometheus": {}, "prometheus_write": {}, "tcp": {}, "udp": {}, "gnmi": {}, "jetstream": {}, "snmp": {}, "asciigraph": {}, } func Register(name string, initFn Initializer) { Outputs[name] = initFn } var bytesBufferPool = sync.Pool{ New: func() any { return new(bytes.Buffer) }, } var stringBuilderPool = sync.Pool{ New: func() any { return new(strings.Builder) }, } type Meta map[string]string func DecodeConfig(src, dst any) error { decoder, err := mapstructure.NewDecoder( &mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), Result: dst, }, ) if err != nil { return err } return decoder.Decode(src) } func AddSubscriptionTarget(msg proto.Message, meta Meta, addTarget string, tpl *template.Template) (*gnmi.SubscribeResponse, error) { if addTarget == "" { if message, ok := msg.(*gnmi.SubscribeResponse); ok { return message, nil } return nil, nil } msg = proto.Clone(msg) switch trsp := msg.(type) { case *gnmi.SubscribeResponse: switch rrsp := trsp.Response.(type) { case *gnmi.SubscribeResponse_Update: if rrsp.Update.Prefix == nil { rrsp.Update.Prefix = new(gnmi.Path) } switch addTarget { case "overwrite": sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() err := tpl.Execute(sb, meta) if err != nil { return nil, err } rrsp.Update.Prefix.Target = sb.String() return trsp, nil case "if-not-present": if rrsp.Update.Prefix.Target == "" { sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() err := tpl.Execute(sb, meta) if err != nil { return nil, err } rrsp.Update.Prefix.Target = sb.String() } return trsp, nil } } } return nil, nil } func ExecTemplate(content []byte, tpl *template.Template) ([]byte, error) { var input interface{} err := json.Unmarshal(content, &input) if err != nil { return nil, fmt.Errorf("failed to marshal input: %v", err) } bf := bytesBufferPool.Get().(*bytes.Buffer) defer func() { bf.Reset() bytesBufferPool.Put(bf) }() err = tpl.Execute(bf, input) if err != nil { return nil, fmt.Errorf("failed to execute msg template: %v", err) } result := bf.Bytes() out := make([]byte, len(result)) copy(out, result) return out, nil } var ( DefaultTargetTemplate = template.Must( template.New("target-template"). Funcs(TemplateFuncs). Parse(defaultTargetTemplateString)) TemplateFuncs = template.FuncMap{ "host": utils.GetHost, } ) const ( defaultTargetTemplateString = ` {{- if index . "subscription-target" -}} {{ index . "subscription-target" }} {{- else -}} {{ index . "source" | host }} {{- end -}}` ) func Marshal(pmsg protoreflect.ProtoMessage, meta map[string]string, mo *formatters.MarshalOptions, splitEvents bool, evps ...formatters.EventProcessor) ([][]byte, error) { switch mo.Format { case "event": if splitEvents { return marshalSplit(pmsg, meta, mo, evps...) } fallthrough default: b, err := mo.Marshal(pmsg, meta, evps...) if err != nil { return nil, err } if len(b) == 0 { return nil, nil } return [][]byte{b}, nil } } func marshalSplit(pmsg protoreflect.ProtoMessage, meta map[string]string, mo *formatters.MarshalOptions, evps ...formatters.EventProcessor) ([][]byte, error) { var subscriptionName string var ok bool if subscriptionName, ok = meta["subscription-name"]; !ok { subscriptionName = "default" } switch msg := pmsg.(type) { case *gnmi.SubscribeResponse: switch msg.GetResponse().(type) { case *gnmi.SubscribeResponse_Update: events, err := formatters.ResponseToEventMsgs(subscriptionName, msg, meta, evps...) if err != nil { return nil, fmt.Errorf("failed converting response to events: %v", err) } numEvents := len(events) if numEvents == 0 { return nil, nil } rs := make([][]byte, 0, numEvents) marshalFn := json.Marshal if mo.Multiline { marshalFn = func(v any) ([]byte, error) { return json.MarshalIndent(v, "", mo.Indent) } } for _, ev := range events { b, err := marshalFn(ev) if err != nil { return nil, err } rs = append(rs, b) } return rs, nil default: return nil, fmt.Errorf("unexpected message type: %T", msg) } default: return nil, fmt.Errorf("unexpected message type: %T", msg) } } type BaseOutput struct { } func (b *BaseOutput) Init(context.Context, string, map[string]any, ...Option) error { return nil } func (b *BaseOutput) Validate(map[string]any) error { return nil } func (b *BaseOutput) Update(context.Context, map[string]any) error { return nil } func (b *BaseOutput) UpdateProcessor(string, map[string]any) error { return nil } func (b *BaseOutput) Write(context.Context, proto.Message, Meta) {} func (b *BaseOutput) WriteEvent(context.Context, *formatters.EventMsg) {} func (b *BaseOutput) Close() error { return nil } func (b *BaseOutput) String() string { return "" } // update processor helper func UpdateProcessorInSlice( logger *log.Logger, storeObj store.Store[any], eventProcessors []string, currentEvps []formatters.EventProcessor, processorName string, pcfg map[string]any, ) ([]formatters.EventProcessor, bool, error) { tcs, ps, acts, err := pkgutils.GetConfigMaps(storeObj) if err != nil { return nil, false, err } for i, epName := range eventProcessors { if epName == processorName { ep, err := formatters.MakeProcessor(logger, processorName, pcfg, ps, tcs, acts) if err != nil { return nil, false, err } if i >= len(currentEvps) { return nil, false, fmt.Errorf("output processors are not properly initialized") } // create new slice with updated processor newEvps := make([]formatters.EventProcessor, len(currentEvps)) copy(newEvps, currentEvps) newEvps[i] = ep logger.Printf("updated event processor %s", processorName) return newEvps, true, nil } } // processor not found - return currentEvps return currentEvps, false, nil } ================================================ FILE: pkg/outputs/prometheus_output/prometheus_common.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package prometheus_output import ( "cmp" "errors" "fmt" "hash/fnv" "math" "path" "path/filepath" "regexp" "slices" "sort" "strconv" "strings" "sync" "time" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/formatters" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" ) const ( metricNameRegex = "[^a-zA-Z0-9_]+" defaultMetricHelp = "gNMIc generated metric" ) var ( MetricNameRegex = regexp.MustCompile(metricNameRegex) ) var stringBuilderPool = sync.Pool{ New: func() any { return new(strings.Builder) }, } type PromMetric struct { Name string Time *time.Time // AddedAt is used to expire metrics if the time field is not initialized // this happens when ExportTimestamp == false AddedAt time.Time labels []prompb.Label value float64 } // Metric func (p *PromMetric) CalculateKey() uint64 { h := fnv.New64a() h.Write([]byte(p.Name)) if len(p.labels) > 0 { h.Write([]byte(":")) sort.Slice(p.labels, func(i, j int) bool { return p.labels[i].Name < p.labels[j].Name }) for _, label := range p.labels { h.Write([]byte(label.Name)) h.Write([]byte(":")) h.Write([]byte(label.Value)) h.Write([]byte(":")) } } return h.Sum64() } func (p *PromMetric) String() string { if p == nil { return "" } sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() sb.WriteString("name=") sb.WriteString(p.Name) sb.WriteString(",") numLabels := len(p.labels) if numLabels > 0 { sb.WriteString("labels=[") for i, lb := range p.labels { sb.WriteString(lb.Name) sb.WriteString("=") sb.WriteString(lb.Value) if i < numLabels-1 { sb.WriteString(",") } } sb.WriteString("],") } sb.WriteString(fmt.Sprintf("value=%f,", p.value)) sb.WriteString("time=") if p.Time != nil { sb.WriteString(p.Time.String()) } else { sb.WriteString("nil") } sb.WriteString(",addedAt=") sb.WriteString(p.AddedAt.String()) return sb.String() } // Desc implements prometheus.Metric func (p *PromMetric) Desc() *prometheus.Desc { labelNames := make([]string, 0, len(p.labels)) for _, label := range p.labels { labelNames = append(labelNames, label.Name) } return prometheus.NewDesc(p.Name, defaultMetricHelp, labelNames, nil) } // Write implements prometheus.Metric func (p *PromMetric) Write(out *dto.Metric) error { out.Untyped = &dto.Untyped{ Value: &p.value, } out.Label = make([]*dto.LabelPair, 0, len(p.labels)) for i := range p.labels { out.Label = append(out.Label, &dto.LabelPair{Name: &p.labels[i].Name, Value: &p.labels[i].Value}) } if p.Time == nil { return nil } timestamp := p.Time.UnixNano() / 1000000 out.TimestampMs = ×tamp return nil } func (mb *MetricBuilder) MetricsFromEvent(ev *formatters.EventMsg, now time.Time) []*PromMetric { pms := make([]*PromMetric, 0, len(ev.Values)) labels := mb.GetLabels(ev) for vName, val := range ev.Values { v, err := toFloat(val) if err != nil { if !mb.StringsAsLabels { continue } v = 1.0 } pm := &PromMetric{ Name: mb.MetricName(ev.Name, vName), labels: labels, value: v, AddedAt: now, } if mb.OverrideTimestamps && mb.ExportTimestamps { ev.Timestamp = now.UnixNano() } if mb.ExportTimestamps { tm := time.Unix(0, ev.Timestamp) pm.Time = &tm } pms = append(pms, pm) } return pms } type MetricBuilder struct { Prefix string AppendSubscriptionName bool StringsAsLabels bool OverrideTimestamps bool ExportTimestamps bool } func (m *MetricBuilder) GetLabels(ev *formatters.EventMsg) []prompb.Label { labels := make([]prompb.Label, 0, len(ev.Tags)) addedLabels := make(map[string]struct{}) for k, v := range ev.Tags { labelName := MetricNameRegex.ReplaceAllString(path.Base(k), "_") if _, ok := addedLabels[labelName]; ok { continue } labels = append(labels, prompb.Label{Name: labelName, Value: v}) addedLabels[labelName] = struct{}{} } if !m.StringsAsLabels { return labels } labelsFromValues := buildUniqueLabelsFromValues(ev.Values, addedLabels) labels = append(labels, labelsFromValues...) return labels } func toFloat(v interface{}) (float64, error) { switch i := v.(type) { case float64: return float64(i), nil case float32: return float64(i), nil case int64: return float64(i), nil case int32: return float64(i), nil case int16: return float64(i), nil case int8: return float64(i), nil case uint64: return float64(i), nil case uint32: return float64(i), nil case uint16: return float64(i), nil case uint8: return float64(i), nil case int: return float64(i), nil case uint: return float64(i), nil case bool: if i { return 1, nil } return 0, nil case string: f, err := strconv.ParseFloat(i, 64) if err != nil { return math.NaN(), err } return f, err //lint:ignore SA1019 still need DecimalVal for backward compatibility case *gnmi.Decimal64: return float64(i.Digits) / math.Pow10(int(i.Precision)), nil default: return math.NaN(), errors.New("toFloat: unknown value is of incompatible type") } } // MetricName generates the prometheus metric name based on the output plugin, // the measurement name and the value name. // it makes sure the name matches the regex "[^a-zA-Z0-9_]+" func (m *MetricBuilder) MetricName(measName, valueName string) string { sb := stringBuilderPool.Get().(*strings.Builder) defer func() { sb.Reset() stringBuilderPool.Put(sb) }() if m.Prefix != "" { sb.WriteString(MetricNameRegex.ReplaceAllString(m.Prefix, "_")) sb.WriteString("_") } if m.AppendSubscriptionName { sb.WriteString(strings.TrimRight(MetricNameRegex.ReplaceAllString(measName, "_"), "_")) sb.WriteString("_") } sb.WriteString(strings.TrimLeft(MetricNameRegex.ReplaceAllString(valueName, "_"), "_")) return sb.String() } type NamedTimeSeries struct { Name string TS *prompb.TimeSeries } func (m *MetricBuilder) TimeSeriesFromEvent(ev *formatters.EventMsg) []*NamedTimeSeries { promTS := make([]*NamedTimeSeries, 0, len(ev.Values)) tsLabels := m.GetLabels(ev) timestamp := ev.Timestamp / int64(time.Millisecond) for k, v := range ev.Values { fv, err := toFloat(v) if err != nil { if !m.StringsAsLabels { continue } fv = 1.0 } tsName := m.MetricName(ev.Name, k) tsLabelsWithName := make([]prompb.Label, 0, len(tsLabels)+1) tsLabelsWithName = append(tsLabelsWithName, tsLabels...) tsLabelsWithName = append(tsLabelsWithName, prompb.Label{ Name: labels.MetricName, Value: tsName, }) // The prometheus spec requires label names to be sorted // https://prometheus.io/docs/concepts/remote_write_spec/ slices.SortFunc(tsLabelsWithName, func(a prompb.Label, b prompb.Label) int { return cmp.Compare(a.Name, b.Name) }) nts := &NamedTimeSeries{ Name: tsName, TS: &prompb.TimeSeries{ Labels: tsLabelsWithName, Samples: []prompb.Sample{ { Value: fv, Timestamp: timestamp, }, }, }, } promTS = append(promTS, nts) } return promTS } type tempLabel struct { path string // xpath name string // label name value string // label value suffixCount int // suffix count to handle duplicates } func labelNameFromPath(path string, numElems int) string { elems := strings.Split(path, "/") nonEmpty := make([]string, 0, len(elems)) for _, e := range elems { if e != "" { nonEmpty = append(nonEmpty, e) } } if numElems > len(nonEmpty) { numElems = len(nonEmpty) } selected := nonEmpty[len(nonEmpty)-numElems:] return MetricNameRegex.ReplaceAllString(strings.Join(selected, "_"), "_") } func buildUniqueLabelsFromValues(values map[string]any, addedLabels map[string]struct{}) []prompb.Label { tempLabels := make([]tempLabel, 0, len(values)) var err error // gather strings and booleans as labels for k, v := range values { _, err = toFloat(v) if err == nil { continue } val := "" switch v := v.(type) { case string: val = v case bool: val = strconv.FormatBool(v) } labelName := MetricNameRegex.ReplaceAllString(filepath.Base(k), "_") tempLabels = append(tempLabels, tempLabel{ path: k, name: labelName, value: val, suffixCount: 1, }) } // resolve duplicate label names by including more xpath elements // from the end of the path until all names are unique and don't // collide with already added label tags. for { groups := make(map[string][]int, len(tempLabels)) for idx, l := range tempLabels { groups[l.name] = append(groups[l.name], idx) } changed := false for name, indices := range groups { _, alreadyAdded := addedLabels[name] if len(indices) <= 1 && !alreadyAdded { continue } for _, idx := range indices { tempLabels[idx].suffixCount++ newName := labelNameFromPath(tempLabels[idx].path, tempLabels[idx].suffixCount) if newName != tempLabels[idx].name { tempLabels[idx].name = newName changed = true } } } if !changed { break } } // drop any labels that still collide after exhausting path elements. taken := make(map[string]struct{}, len(addedLabels)+len(tempLabels)) for k := range addedLabels { taken[k] = struct{}{} } result := make([]prompb.Label, 0, len(tempLabels)) for _, l := range tempLabels { if _, exists := taken[l.name]; exists { continue } taken[l.name] = struct{}{} result = append(result, prompb.Label{Name: l.name, Value: l.value}) } return result } ================================================ FILE: pkg/outputs/prometheus_output/prometheus_common_test.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package prometheus_output import ( "cmp" "slices" "sort" "testing" "time" "github.com/openconfig/gnmic/pkg/formatters" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/prompb" ) var metricNameSet = map[string]struct { p *MetricBuilder measName string // aka subscription name valueName string want string }{ "with_prefix_with_subscription_with_value_no-append-subsc": { p: &MetricBuilder{ Prefix: "gnmic", }, measName: "sub", valueName: "value", want: "gnmic_value", }, "with_prefix_with_subscription_with_value_with_append-subsc": { p: &MetricBuilder{ Prefix: "gnmic", AppendSubscriptionName: true, }, measName: "sub", valueName: "value", want: "gnmic_sub_value", }, "with_prefix-bad-chars_with_subscription_with_value_with_append-subsc": { p: &MetricBuilder{ Prefix: "gnmic-prefix", AppendSubscriptionName: true, }, measName: "sub", valueName: "value", want: "gnmic_prefix_sub_value", }, "without_prefix_with_subscription_with_value_no-append-subsc": { p: &MetricBuilder{}, measName: "sub", valueName: "value", want: "value", }, "without_prefix_with_subscription_with_value_with_append-subsc": { p: &MetricBuilder{ AppendSubscriptionName: true, }, measName: "sub", valueName: "value", want: "sub_value", }, "without_prefix_with_subscription-bad-chars_with_value-bad-chars_with_append-subsc": { p: &MetricBuilder{ AppendSubscriptionName: true, }, measName: "sub-name", valueName: "value-name2", want: "sub_name_value_name2", }, } func TestTimeSeriesFromEvent(t *testing.T) { metricBuilder := &MetricBuilder{StringsAsLabels: true} event := &formatters.EventMsg{ Name: "eventName", Timestamp: 12345, Tags: map[string]string{ "tagName": "tagVal", }, Values: map[string]interface{}{ "strName1": "strVal1", "strName2": "strVal2", "intName1": 1, "intName2": 2, }, Deletes: []string{}, } for _, nts := range metricBuilder.TimeSeriesFromEvent(event) { for _, label := range nts.TS.Labels { if label.Name == labels.MetricName && label.Value != nts.Name { t.Errorf("__name__ label wrong, expected '%s', got '%s'", nts.Name, label.Value) } } } } func TestTimeSeriesLabelsSorted(t *testing.T) { metricBuilder := &MetricBuilder{StringsAsLabels: true} event := &formatters.EventMsg{ Name: "eventName", Timestamp: 12345, Tags: map[string]string{ "tagName": "tagVal", }, Values: map[string]interface{}{ "strName1": "strVal1", "strName2": "strVal2", "intName1": 1, "intName2": 2, }, Deletes: []string{}, } for _, nts := range metricBuilder.TimeSeriesFromEvent(event) { areLabelsSorted := slices.IsSortedFunc(nts.TS.Labels, func(a prompb.Label, b prompb.Label) int { return cmp.Compare(a.Name, b.Name) }) if !areLabelsSorted { t.Errorf("labels names are not sorted, got '%v'", nts.TS.Labels) } } } func TestMetricName(t *testing.T) { for name, tc := range metricNameSet { t.Run(name, func(t *testing.T) { got := tc.p.MetricName(tc.measName, tc.valueName) if got != tc.want { t.Errorf("failed at '%s', expected %v, got %+v", name, tc.want, got) } }) } } func BenchmarkMetricName(b *testing.B) { for name, tc := range metricNameSet { b.Run(name, func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { tc.p.MetricName(tc.measName, tc.valueName) } }) } } func Test_buildUniqueLabelsFromValues(t *testing.T) { tests := []struct { name string values map[string]any addedLabels map[string]struct{} want []prompb.Label }{ { name: "no_duplicates", values: map[string]any{ "a/b/c": "a", "a/b/d": "b", "a/b/e": "c", }, want: []prompb.Label{ {Name: "c", Value: "a"}, {Name: "d", Value: "b"}, {Name: "e", Value: "c"}, }, }, { name: "with_duplicates", values: map[string]any{ "a/a/name": "a", "a/b/name": "b", "a/c/name": "c", }, want: []prompb.Label{ {Name: "a_name", Value: "a"}, {Name: "b_name", Value: "b"}, {Name: "c_name", Value: "c"}, }, }, { name: "with_duplicates_3_elements", values: map[string]any{ "a/a/name": "a", "b/a/name": "b", "c/a/name": "c", }, want: []prompb.Label{ {Name: "a_a_name", Value: "a"}, {Name: "b_a_name", Value: "b"}, {Name: "c_a_name", Value: "c"}, }, }, { name: "with_duplicates_and_floats", values: map[string]any{ "a/a/name": "a", "a/b/name": "b", "a/c/name": "1", }, want: []prompb.Label{ {Name: "a_name", Value: "a"}, {Name: "b_name", Value: "b"}, }, }, { name: "collision_with_added_labels", values: map[string]any{ "a/b/name": "val", }, addedLabels: map[string]struct{}{ "name": {}, }, want: []prompb.Label{ {Name: "b_name", Value: "val"}, }, }, { name: "collision_with_added_labels_and_duplicates", values: map[string]any{ "a/b/name": "v1", "a/c/name": "v2", }, addedLabels: map[string]struct{}{ "name": {}, }, want: []prompb.Label{ {Name: "b_name", Value: "v1"}, {Name: "c_name", Value: "v2"}, }, }, { name: "collision_with_added_labels_and_duplicates_2", values: map[string]any{ "a/b/name": "v1", "a/c/name": "v2", }, addedLabels: map[string]struct{}{ "b_name": {}, }, want: []prompb.Label{ {Name: "a_b_name", Value: "v1"}, {Name: "c_name", Value: "v2"}, }, }, { name: "collision_with_added_labels_full_path_exhausted", values: map[string]any{ "a/b/name": "val", }, addedLabels: map[string]struct{}{ "name": {}, "b_name": {}, "a_b_name": {}, }, want: []prompb.Label{}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { addedLabels := tt.addedLabels if addedLabels == nil { addedLabels = make(map[string]struct{}) } got := buildUniqueLabelsFromValues(tt.values, addedLabels) if len(got) != len(tt.want) { t.Errorf("buildUniqueLabelsFromValues() length = %d, want %d", len(got), len(tt.want)) return } sort.Slice(got, func(i, j int) bool { return got[i].Name < got[j].Name }) sort.Slice(tt.want, func(i, j int) bool { return tt.want[i].Name < tt.want[j].Name }) for i, label := range got { expected := tt.want[i] if label.Name != expected.Name || label.Value != expected.Value { t.Errorf("Label mismatch at index %d: got %+v, want %+v", i, label, expected) } } }) } } func TestMetricBuilder_MetricsFromEvent(t *testing.T) { tests := []struct { name string // description of this test case // Named input parameters for target function. ev *formatters.EventMsg now time.Time want []*PromMetric }{ { name: "no_duplicates", ev: &formatters.EventMsg{ Name: "eventName", Timestamp: 42, Tags: map[string]string{ "t1": "v1", "t2": "v2", }, Values: map[string]any{ "a/b/c": "1", }, }, now: time.Unix(0, 42), want: []*PromMetric{ { Name: "a_b_c", value: 1, labels: []prompb.Label{ {Name: "t1", Value: "v1"}, {Name: "t2", Value: "v2"}, }, }, }, }, { name: "no_duplicates_strings_as_labels", ev: &formatters.EventMsg{ Name: "eventName", Timestamp: 42, Tags: map[string]string{ "t1": "v1", "t2": "v2", }, Values: map[string]any{ "a/b/c": "a", }, }, now: time.Unix(0, 42), want: []*PromMetric{ { Name: "a_b_c", value: 1, labels: []prompb.Label{ {Name: "t1", Value: "v1"}, {Name: "t2", Value: "v2"}, {Name: "c", Value: "a"}, }, }, }, }, { name: "duplicates_strings_as_labels", ev: &formatters.EventMsg{ Name: "eventName", Timestamp: 42, Tags: map[string]string{ "t1": "v1", "t2": "v2", }, Values: map[string]any{ "a/a/c": "a", "a/b/c": "b", }, }, now: time.Unix(0, 42), want: []*PromMetric{ { Name: "a_a_c", value: 1, labels: []prompb.Label{ {Name: "t1", Value: "v1"}, {Name: "t2", Value: "v2"}, {Name: "a_c", Value: "a"}, {Name: "b_c", Value: "b"}, }, }, { Name: "a_b_c", value: 1, labels: []prompb.Label{ {Name: "t1", Value: "v1"}, {Name: "t2", Value: "v2"}, {Name: "a_c", Value: "a"}, {Name: "b_c", Value: "b"}, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mb := &MetricBuilder{ StringsAsLabels: true, } got := mb.MetricsFromEvent(tt.ev, tt.now) if len(got) != len(tt.want) { t.Errorf("MetricsFromEvent() = %v, want %v", got, tt.want) } sort.Slice(got, func(i, j int) bool { return got[i].Name < got[j].Name }) sort.Slice(tt.want, func(i, j int) bool { return tt.want[i].Name < tt.want[j].Name }) for i, pm := range got { expected := tt.want[i] if pm.Name != expected.Name || pm.value != expected.value { t.Errorf("Metric mismatch at index %d: got %+v, want %+v", i, pm, expected) } if len(pm.labels) != len(expected.labels) { t.Errorf("Metric labels mismatch at index %d: got %+v, want %+v", i, pm.labels, expected.labels) } sort.Slice(pm.labels, func(i, j int) bool { return pm.labels[i].Name < pm.labels[j].Name }) sort.Slice(expected.labels, func(i, j int) bool { return expected.labels[i].Name < expected.labels[j].Name }) for j, label := range pm.labels { expectedLabel := expected.labels[j] if label.Name != expectedLabel.Name || label.Value != expectedLabel.Value { t.Errorf("Metric label mismatch at index %d: got %+v, want %+v", j, label, expectedLabel) } } } }) } } ================================================ FILE: pkg/outputs/prometheus_output/prometheus_output/prometheus_cache.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package prometheus_output import ( "context" "time" "github.com/openconfig/gnmi/proto/gnmi" "github.com/prometheus/client_golang/prometheus" "github.com/openconfig/gnmic/pkg/cache" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/outputs" ) func (p *prometheusOutput) collectFromCache(ch chan<- prometheus.Metric) { notifications, err := p.gnmiCache.ReadAll() if err != nil { p.logger.Printf("failed to read from cache: %v", err) return } cfg := p.cfg.Load() dc := p.dynCfg.Load() if cfg == nil || dc == nil { return } numNotifications := len(notifications) prometheusNumberOfCachedMetrics.WithLabelValues(cfg.Name).Set(float64(numNotifications)) p.targetsMeta.DeleteExpired() events := make([]*formatters.EventMsg, 0, numNotifications) for subName, notifs := range notifications { // build events without processors for _, notif := range notifs { targetName := notif.GetPrefix().GetTarget() var meta outputs.Meta if item := p.targetsMeta.Get(subName + "/" + targetName); item != nil { meta = item.Value() } ievents, err := formatters.ResponseToEventMsgs( subName, &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{Update: notif}, }, meta) if err != nil { p.logger.Printf("failed to convert gNMI notifications to events: %v", err) return } events = append(events, ievents...) } } if cfg.CacheConfig.Debug { p.logger.Printf("got %d events from cache pre processors", len(events)) } for _, proc := range dc.evps { events = proc.Apply(events...) } if cfg.CacheConfig.Debug { p.logger.Printf("got %d events from cache post processors", len(events)) } ctx, cancel := context.WithTimeout(context.Background(), cfg.Timeout) defer cancel() now := time.Now() for _, ev := range events { for _, pm := range dc.mb.MetricsFromEvent(ev, now) { select { case <-ctx.Done(): p.logger.Printf("collection context terminated: %v", ctx.Err()) return case ch <- pm: } } } } func cacheEqual(a, b *cache.Config) bool { if a == nil && b == nil { return true } return a != nil && b != nil && a.Expiration == b.Expiration && a.Debug == b.Debug && a.Address == b.Address && a.Timeout == b.Timeout && a.Type == b.Type && a.Username == b.Username && a.Password == b.Password && a.MaxBytes == b.MaxBytes && a.MaxMsgsPerSubscription == b.MaxMsgsPerSubscription && a.FetchBatchSize == b.FetchBatchSize && a.FetchWaitTime == b.FetchWaitTime } ================================================ FILE: pkg/outputs/prometheus_output/prometheus_output/prometheus_metrics.go ================================================ // © 2023 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package prometheus_output import ( "sync" "github.com/prometheus/client_golang/prometheus" ) const ( namespace = "gnmic" subsystem = "prometheus_output" ) var registerMetricsOnce sync.Once var prometheusNumberOfMetrics = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "number_of_prometheus_metrics_total", Help: "Number of metrics stored by the prometheus output", }, []string{"name"}) var prometheusNumberOfCachedMetrics = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "number_of_prometheus_cached_metrics_total", Help: "Number of metrics cached by the prometheus output", }, []string{"name"}) func (p *prometheusOutput) initMetrics(cfg *config) { if cfg.CacheConfig == nil { prometheusNumberOfMetrics.WithLabelValues(cfg.Name).Set(0) return } prometheusNumberOfCachedMetrics.WithLabelValues(cfg.Name).Set(0) } func (p *prometheusOutput) registerMetrics() error { cfg := p.cfg.Load() if cfg == nil { return nil } if !cfg.EnableMetrics { return nil } if p.reg == nil { return nil } var err error registerMetricsOnce.Do(func() { if cfg.CacheConfig == nil { err = p.reg.Register(prometheusNumberOfMetrics) return } err = p.reg.Register(prometheusNumberOfCachedMetrics) }) p.initMetrics(cfg) return err } ================================================ FILE: pkg/outputs/prometheus_output/prometheus_output/prometheus_output.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package prometheus_output import ( "context" "crypto/tls" "encoding/json" "fmt" "log" "net" "net/http" "os" "slices" "strconv" "strings" "sync" "sync/atomic" "text/template" "time" "github.com/google/uuid" "github.com/hashicorp/consul/api" "github.com/jellydator/ttlcache/v3" "github.com/openconfig/gnmi/proto/gnmi" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/cache" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/outputs" promcom "github.com/openconfig/gnmic/pkg/outputs/prometheus_output" gutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( outputType = "prometheus" defaultListen = ":9804" defaultPath = "/metrics" defaultExpiration = time.Minute loggingPrefix = "[prometheus_output:%s] " // this is used to timeout the collection method // in case it drags for too long defaultTimeout = 10 * time.Second defaultNumWorkers = 1 ) func init() { outputs.Register(outputType, func() outputs.Output { return &prometheusOutput{} }) } type prometheusOutput struct { outputs.BaseOutput cfg *atomic.Pointer[config] dynCfg *atomic.Pointer[dynConfig] logger *log.Logger eventChan chan *formatters.EventMsg msgChan chan *outputs.ProtoMsg wg *sync.WaitGroup server *http.Server sync.Mutex entries map[uint64]*promcom.PromMetric consulClient *api.Client gnmiCache cache.Cache targetsMeta *ttlcache.Cache[string, outputs.Meta] reg *prometheus.Registry store store.Store[any] runCfn context.CancelFunc runCtx context.Context } type dynConfig struct { targetTpl *template.Template evps []formatters.EventProcessor mb *promcom.MetricBuilder } type config struct { Name string `mapstructure:"name,omitempty" json:"name,omitempty"` Listen string `mapstructure:"listen,omitempty" json:"listen,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty" json:"tls,omitempty"` Path string `mapstructure:"path,omitempty" json:"path,omitempty"` Expiration time.Duration `mapstructure:"expiration,omitempty" json:"expiration,omitempty"` MetricPrefix string `mapstructure:"metric-prefix,omitempty" json:"metric-prefix,omitempty"` AppendSubscriptionName bool `mapstructure:"append-subscription-name,omitempty" json:"append-subscription-name,omitempty"` ExportTimestamps bool `mapstructure:"export-timestamps,omitempty" json:"export-timestamps,omitempty"` OverrideTimestamps bool `mapstructure:"override-timestamps,omitempty" json:"override-timestamps,omitempty"` AddTarget string `mapstructure:"add-target,omitempty" json:"add-target,omitempty"` TargetTemplate string `mapstructure:"target-template,omitempty" json:"target-template,omitempty"` StringsAsLabels bool `mapstructure:"strings-as-labels,omitempty" json:"strings-as-labels,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty" json:"event-processors,omitempty"` ServiceRegistration *serviceRegistration `mapstructure:"service-registration,omitempty" json:"service-registration,omitempty"` Timeout time.Duration `mapstructure:"timeout,omitempty" json:"timeout,omitempty"` CacheConfig *cache.Config `mapstructure:"cache,omitempty" json:"cache-config,omitempty"` NumWorkers int `mapstructure:"num-workers,omitempty" json:"num-workers,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty" json:"enable-metrics,omitempty"` clusterName string address string port int } func (p *prometheusOutput) String() string { cfg := p.cfg.Load() if cfg == nil { return "" } b, err := json.Marshal(cfg) if err != nil { return "" } return string(b) } func (p *prometheusOutput) buildEventProcessors(cfg *config) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := gutils.GetConfigMaps(p.store) if err != nil { return nil, err } return formatters.MakeEventProcessors(p.logger, cfg.EventProcessors, ps, tcs, acts) } func (p *prometheusOutput) setLogger(logger *log.Logger) { if logger != nil && p.logger != nil { p.logger.SetOutput(logger.Writer()) p.logger.SetFlags(logger.Flags()) } } func (p *prometheusOutput) init() { p.cfg = new(atomic.Pointer[config]) p.dynCfg = new(atomic.Pointer[dynConfig]) p.logger = log.New(os.Stderr, loggingPrefix, utils.DefaultLoggingFlags) p.eventChan = make(chan *formatters.EventMsg) p.msgChan = make(chan *outputs.ProtoMsg, 10_000) p.wg = new(sync.WaitGroup) p.entries = make(map[uint64]*promcom.PromMetric) } func (p *prometheusOutput) Init(ctx context.Context, name string, cfg map[string]any, opts ...outputs.Option) error { p.init() // init struct fields p.runCtx, p.runCfn = context.WithCancel(ctx) newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } if newCfg.Name == "" { newCfg.Name = name } p.logger.SetPrefix(fmt.Sprintf(loggingPrefix, newCfg.Name)) options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } p.store = options.Store // apply logger p.setLogger(options.Logger) // set defaults err = p.setDefaultsFor(newCfg) if err != nil { return err } // initialize registry p.reg = options.Registry err = p.registerMetrics() if err != nil { return err } p.setName(options.Name, newCfg) p.setClusterName(options.ClusterName, newCfg) p.cfg.Store(newCfg) dc := new(dynConfig) // initialize target template if newCfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } // initialize event processors dc.evps, err = p.buildEventProcessors(newCfg) if err != nil { return err } dc.mb = &promcom.MetricBuilder{ Prefix: newCfg.MetricPrefix, AppendSubscriptionName: newCfg.AppendSubscriptionName, StringsAsLabels: newCfg.StringsAsLabels, OverrideTimestamps: newCfg.OverrideTimestamps, ExportTimestamps: newCfg.ExportTimestamps, } p.dynCfg.Store(dc) if newCfg.CacheConfig != nil { p.gnmiCache, err = cache.New( newCfg.CacheConfig, cache.WithLogger(p.logger), ) if err != nil { return err } p.targetsMeta = ttlcache.New(ttlcache.WithTTL[string, outputs.Meta](newCfg.Expiration)) } // create prometheus registry registry := prometheus.NewRegistry() err = registry.Register(p) if err != nil { return err } // create http server promHandler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}) mux := http.NewServeMux() mux.Handle(newCfg.Path, promHandler) p.server = &http.Server{ Addr: newCfg.Listen, Handler: mux, } // create tcp listener listener, err := p.createListenerFor(newCfg) if err != nil { return err } // start worker p.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { go p.worker(p.runCtx) } if newCfg.CacheConfig == nil { p.wg.Add(1) go func() { defer p.wg.Done() p.expireMetricsPeriodic(p.runCtx) }() } p.wg.Add(1) go func() { defer p.wg.Done() defer listener.Close() err = p.server.Serve(listener) if err != nil && err != http.ErrServerClosed { p.logger.Printf("prometheus server error: %v", err) } }() go p.registerService(p.runCtx) p.logger.Printf("initialized prometheus output: %s", p.String()) return nil } func (p *prometheusOutput) Validate(cfg map[string]any) error { ncfg := new(config) err := outputs.DecodeConfig(cfg, ncfg) if err != nil { return err } err = p.setDefaultsFor(ncfg) if err != nil { return err } _, err = gtemplate.CreateTemplate("target-template", ncfg.TargetTemplate) if err != nil { return err } _, err = gtemplate.CreateTemplate("target-template", ncfg.TargetTemplate) if err != nil { return err } return nil } func (p *prometheusOutput) Update(ctx context.Context, cfg map[string]any) error { // decode new config newCfg := new(config) if err := outputs.DecodeConfig(cfg, newCfg); err != nil { return err } currCfg := p.cfg.Load() dc := new(dynConfig) // apply defaults and derived fields for the new config tmp := *newCfg // copy for mutation if p.cfg != nil { // init name and service registration name, id and tags tmp.Name = currCfg.Name if currCfg.ServiceRegistration != nil { if tmp.ServiceRegistration.Name == "" { tmp.ServiceRegistration.Name = currCfg.ServiceRegistration.Name } tmp.ServiceRegistration.id = fmt.Sprintf("%s-%s", tmp.ServiceRegistration.Name, tmp.Name) tmp.ServiceRegistration.Tags = append(tmp.ServiceRegistration.Tags, fmt.Sprintf("gnmic-instance=%s", tmp.ServiceRegistration.Name)) } } if err := p.setDefaultsFor(&tmp); err != nil { // factor setDefaults to accept *config return err } // rebuild objects that depend on config dc.targetTpl = outputs.DefaultTargetTemplate if tmp.TargetTemplate != "" { t, err := gtemplate.CreateTemplate("target-template", tmp.TargetTemplate) if err != nil { return err } dc.targetTpl = t.Funcs(outputs.TemplateFuncs) } // event processors var err error prevDC := p.dynCfg.Load() if slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 { dc.evps, err = p.buildEventProcessors(&tmp) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } // metric builder dc.mb = &promcom.MetricBuilder{ Prefix: tmp.MetricPrefix, AppendSubscriptionName: tmp.AppendSubscriptionName, StringsAsLabels: tmp.StringsAsLabels, OverrideTimestamps: tmp.OverrideTimestamps, ExportTimestamps: tmp.ExportTimestamps, } p.dynCfg.Store(dc) // rebuild http objects if needed rebuildHTTPServer := p.needHTTPRebuild(currCfg, &tmp) var newServer *http.Server var newListener net.Listener if rebuildHTTPServer { reg := prometheus.NewRegistry() if err := reg.Register(p); err != nil { return err } promHandler := promhttp.HandlerFor(reg, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}) mux := http.NewServeMux() mux.Handle(tmp.Path, promHandler) s := &http.Server{ Addr: tmp.Listen, Handler: mux, } l, err := p.createListenerFor(&tmp) if err != nil { return err } newServer = s newListener = l } // cache rebuild if CacheConfig toggled or changed var newCache cache.Cache var newTargetsMeta *ttlcache.Cache[string, outputs.Meta] if !cacheEqual(currCfg.CacheConfig, tmp.CacheConfig) { if tmp.CacheConfig != nil { c, err := cache.New(tmp.CacheConfig, cache.WithLogger(p.logger)) if err != nil { return err } newCache = c newTargetsMeta = ttlcache.New(ttlcache.WithTTL[string, outputs.Meta](tmp.Expiration)) } } else { // keep existing cache/meta if not changed p.Lock() newCache = p.gnmiCache newTargetsMeta = p.targetsMeta p.Unlock() } // swap under lock p.Lock() oldServer := p.server oldRunCfn := p.runCfn oldCache := p.gnmiCache p.cfg.Store(&tmp) if rebuildHTTPServer { p.server = newServer } if newCache != nil || (oldCache != nil && tmp.CacheConfig == nil) { p.gnmiCache = newCache p.targetsMeta = newTargetsMeta } // create a new worker ctx p.runCtx, p.runCfn = context.WithCancel(ctx) p.Unlock() // Start/Restart components that changed // HTTP server if rebuildHTTPServer { if oldServer != nil { _ = oldServer.Close() // stop old server; Serve will exit } // start the new one p.wg.Add(1) go func(srv *http.Server, l net.Listener) { defer p.wg.Done() defer l.Close() if err := srv.Serve(l); err != nil && err != http.ErrServerClosed { p.logger.Printf("prometheus server error: %v", err) } }(newServer, newListener) } // workers (stop old, start new) if oldRunCfn != nil { oldRunCfn() } // start workers with new num-workers p.wg.Add(tmp.NumWorkers) for i := 0; i < tmp.NumWorkers; i++ { go p.worker(p.runCtx) } if tmp.CacheConfig == nil { p.wg.Add(1) go func() { defer p.wg.Done() p.expireMetricsPeriodic(p.runCtx) }() } // restart service registration go p.registerService(p.runCtx) p.logger.Printf("updated prometheus output: %s", p.String()) return nil } func (p *prometheusOutput) UpdateProcessor(name string, pcfg map[string]any) error { cfg := p.cfg.Load() dc := p.dynCfg.Load() newEvps, changed, err := outputs.UpdateProcessorInSlice( p.logger, p.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps p.dynCfg.Store(&newDC) p.logger.Printf("updated event processor %s", name) } return nil } func (p *prometheusOutput) needHTTPRebuild(old, new *config) bool { if p.server == nil || old == nil || new == nil { return true } return old.Listen != new.Listen || old.Path != new.Path || !old.TLS.Equal(new.TLS) } func (p *prometheusOutput) createListenerFor(c *config) (net.Listener, error) { if c.TLS == nil { return net.Listen("tcp", c.Listen) } tlsConfig, err := utils.NewTLSConfig( c.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile, c.TLS.ClientAuth, true, true, ) if err != nil { return nil, err } return tls.Listen("tcp", c.Listen, tlsConfig) } // Write implements the outputs.Output interface func (p *prometheusOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) { if rsp == nil { return } cfg := p.cfg.Load() if cfg == nil { return } wctx, cancel := context.WithTimeout(ctx, cfg.Timeout) defer cancel() select { case <-ctx.Done(): return case p.msgChan <- outputs.NewProtoMsg(rsp, meta): case <-wctx.Done(): if cfg.Debug { p.logger.Printf("writing expired after %s", cfg.Timeout) } return } } func (p *prometheusOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) { dc := p.dynCfg.Load() if dc == nil { return } select { case <-ctx.Done(): return default: var evs = []*formatters.EventMsg{ev} for _, proc := range dc.evps { evs = proc.Apply(evs...) } for _, pev := range evs { p.eventChan <- pev } } } func (p *prometheusOutput) Close() error { p.Lock() consulClient := p.consulClient gnmiCache := p.gnmiCache server := p.server cfg := p.cfg.Load() p.Unlock() var err error if consulClient != nil && cfg != nil && cfg.ServiceRegistration != nil { err = consulClient.Agent().ServiceDeregister(cfg.ServiceRegistration.id) if err != nil { // ignore 404 and unknown service ID errors if !strings.Contains(err.Error(), "404") && !strings.Contains(err.Error(), "Unknown service ID") { p.logger.Printf("failed to deregister consul service: %v", err) } } } if p.runCfn != nil { p.runCfn() } if gnmiCache != nil { gnmiCache.Stop() } if server != nil { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() err = server.Shutdown(ctx) if err != nil { p.logger.Printf("failed to shutdown http server: %v", err) } } p.logger.Printf("closed.") p.wg.Wait() return nil } // Describe implements prometheus.Collector func (p *prometheusOutput) Describe(ch chan<- *prometheus.Desc) {} // Collect implements prometheus.Collector func (p *prometheusOutput) Collect(ch chan<- prometheus.Metric) { cfg := p.cfg.Load() if cfg == nil { return } p.Lock() defer p.Unlock() if cfg.CacheConfig != nil { p.collectFromCache(ch) return } // No cache // run expire before exporting metrics p.expireMetrics() ctx, cancel := context.WithTimeout(context.Background(), cfg.Timeout) defer cancel() for _, entry := range p.entries { select { case <-ctx.Done(): p.logger.Printf("collection context terminated: %v", ctx.Err()) return case ch <- entry: } } } func (p *prometheusOutput) worker(ctx context.Context) { defer p.wg.Done() for { select { case <-ctx.Done(): return case ev := <-p.eventChan: p.workerHandleEvent(ev) case m := <-p.msgChan: p.workerHandleProto(ctx, m) } } } func (p *prometheusOutput) workerHandleProto(ctx context.Context, m *outputs.ProtoMsg) { pmsg := m.GetMsg() cfg := p.cfg.Load() dc := p.dynCfg.Load() if cfg == nil || dc == nil { return } p.Lock() gnmiCache := p.gnmiCache targetsMeta := p.targetsMeta p.Unlock() switch pmsg := pmsg.(type) { case *gnmi.SubscribeResponse: meta := m.GetMeta() measName := "default" if subName, ok := meta["subscription-name"]; ok { measName = subName } var err error pmsg, err = outputs.AddSubscriptionTarget(pmsg, m.GetMeta(), cfg.AddTarget, dc.targetTpl) if err != nil { p.logger.Printf("failed to add target to the response: %v", err) } if gnmiCache != nil { gnmiCache.Write(ctx, measName, pmsg) target := utils.GetHost(meta["source"]) targetsMeta.Set(measName+"/"+target, meta, ttlcache.DefaultTTL) return } events, err := formatters.ResponseToEventMsgs(measName, pmsg, meta, dc.evps...) if err != nil { p.logger.Printf("failed to convert message to event: %v", err) return } p.workerHandleEvent(events...) } } type metricAndKey struct { k uint64 m *promcom.PromMetric } func (p *prometheusOutput) workerHandleEvent(evs ...*formatters.EventMsg) { cfg := p.cfg.Load() dc := p.dynCfg.Load() if cfg == nil || dc == nil { return } if cfg.Debug { p.logger.Printf("got event to store: %+v", evs) } mks := make([]*metricAndKey, 0, len(evs)) for _, ev := range evs { for _, pm := range dc.mb.MetricsFromEvent(ev, time.Now()) { mks = append(mks, &metricAndKey{ m: pm, k: pm.CalculateKey(), }) } } p.Lock() defer p.Unlock() for _, mk := range mks { // key := pm.CalculateKey() e, ok := p.entries[mk.k] // if the entry key is not present add it to the map. // if present add it only if the entry timestamp is newer than the // existing one. if !ok || mk.m.Time == nil || (ok && mk.m.Time != nil && e.Time.Before(*mk.m.Time)) { p.entries[mk.k] = mk.m if cfg.Debug { p.logger.Printf("saved key=%d, metric: %+v", mk.k, mk.m) } } } } func (p *prometheusOutput) expireMetrics() { cfg := p.cfg.Load() if cfg == nil || cfg.Expiration <= 0 { return } expiry := time.Now().Add(-cfg.Expiration) for k, e := range p.entries { if cfg.ExportTimestamps { if e.Time.Before(expiry) { delete(p.entries, k) } continue } if e.AddedAt.Before(expiry) { delete(p.entries, k) } } } func (p *prometheusOutput) expireMetricsPeriodic(ctx context.Context) { cfg := p.cfg.Load() if cfg == nil || cfg.Expiration <= 0 { return } p.Lock() prometheusNumberOfMetrics.WithLabelValues(cfg.Name).Set(float64(len(p.entries))) p.Unlock() ticker := time.NewTicker(cfg.Expiration) defer ticker.Stop() for { select { case <-ctx.Done(): return case <-ticker.C: cfg := p.cfg.Load() if cfg == nil { continue } p.Lock() p.expireMetrics() prometheusNumberOfMetrics.WithLabelValues(cfg.Name).Set(float64(len(p.entries))) p.Unlock() } } } func (p *prometheusOutput) setDefaultsFor(c *config) error { if c.Listen == "" { c.Listen = defaultListen } if c.Path == "" { c.Path = defaultPath } if c.Expiration == 0 { c.Expiration = defaultExpiration } if c.CacheConfig != nil && c.AddTarget == "" { c.AddTarget = "if-not-present" } if c.Timeout <= 0 { c.Timeout = defaultTimeout } if c.NumWorkers <= 0 { c.NumWorkers = defaultNumWorkers } if c.ServiceRegistration == nil { return nil } p.setServiceRegistrationDefaults(c) var err error var port string switch { case c.ServiceRegistration.ServiceAddress != "": c.address, port, err = net.SplitHostPort(c.ServiceRegistration.ServiceAddress) if err != nil { // if service-address does not include a port number, use the port number from the listen field if strings.Contains(err.Error(), "missing port in address") { c.address = c.ServiceRegistration.ServiceAddress _, port, err = net.SplitHostPort(c.Listen) if err != nil { p.logger.Printf("invalid 'listen' field format: %v", err) return err } c.port, err = strconv.Atoi(port) if err != nil { p.logger.Printf("invalid 'listen' field format: %v", err) return err } return nil } // if the error is not related to a missing port, fail p.logger.Printf("invalid 'service-registration.service-address' field format: %v", err) return err } // the service-address contains both an address and a port number c.port, err = strconv.Atoi(port) if err != nil { p.logger.Printf("invalid 'service-registration.service-address' field format: %v", err) return err } default: c.address, port, err = net.SplitHostPort(c.Listen) if err != nil { p.logger.Printf("invalid 'listen' field format: %v", err) return err } c.port, err = strconv.Atoi(port) if err != nil { p.logger.Printf("invalid 'listen' field format: %v", err) return err } } return nil } func (p *prometheusOutput) setName(name string, cfg *config) { if cfg.Name == "" { cfg.Name = name } if cfg.ServiceRegistration != nil { if cfg.ServiceRegistration.Name == "" { cfg.ServiceRegistration.Name = fmt.Sprintf("prometheus-%s", cfg.Name) } if name == "" { name = uuid.New().String() } cfg.ServiceRegistration.id = fmt.Sprintf("%s-%s", cfg.ServiceRegistration.Name, name) cfg.ServiceRegistration.Tags = append(cfg.ServiceRegistration.Tags, fmt.Sprintf("gnmic-instance=%s", name)) } } func (p *prometheusOutput) setClusterName(name string, cfg *config) { cfg.clusterName = name if cfg.ServiceRegistration != nil { cfg.ServiceRegistration.Tags = append(cfg.ServiceRegistration.Tags, fmt.Sprintf("gnmic-cluster=%s", name)) } } ================================================ FILE: pkg/outputs/prometheus_output/prometheus_output/prometheus_service_registration.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package prometheus_output import ( "context" "encoding/json" "errors" "fmt" "path/filepath" "strings" "time" "github.com/hashicorp/consul/api" "github.com/openconfig/gnmic/pkg/lockers" ) const ( defaultServiceRegistrationAddress = "localhost:8500" defaultRegistrationCheckInterval = 5 * time.Second defaultMaxServiceFail = 3 ) type serviceRegistration struct { Address string `mapstructure:"address,omitempty" json:"address,omitempty"` Datacenter string `mapstructure:"datacenter,omitempty" json:"datacenter,omitempty"` Username string `mapstructure:"username,omitempty" json:"username,omitempty"` Password string `mapstructure:"password,omitempty" json:"password,omitempty"` Token string `mapstructure:"token,omitempty" json:"token,omitempty"` Name string `mapstructure:"name,omitempty" json:"name,omitempty"` CheckInterval time.Duration `mapstructure:"check-interval,omitempty" json:"check-interval,omitempty"` MaxFail int `mapstructure:"max-fail,omitempty" json:"max-fail,omitempty"` Tags []string `mapstructure:"tags,omitempty" json:"tags,omitempty"` EnableHTTPCheck bool `mapstructure:"enable-http-check,omitempty" json:"enable-http-check,omitempty"` HTTPCheckAddress string `mapstructure:"http-check-address,omitempty" json:"http-check-address,omitempty"` UseLock bool `mapstructure:"use-lock,omitempty" json:"use-lock,omitempty"` ServiceAddress string `mapstructure:"service-address,omitempty" json:"service-address,omitempty"` deregisterAfter string id string httpCheckAddress string } func (p *prometheusOutput) registerService(ctx context.Context) { cfg := p.cfg.Load() if cfg == nil { return } if cfg.ServiceRegistration == nil { return } defer func() { p.logger.Printf("deregistering service: %s", cfg.ServiceRegistration.Name) }() p.logger.Printf("registering service: %s", cfg.ServiceRegistration.Name) var err error clientConfig := &api.Config{ Address: cfg.ServiceRegistration.Address, Scheme: "http", Datacenter: cfg.ServiceRegistration.Datacenter, Token: cfg.ServiceRegistration.Token, } if cfg.ServiceRegistration.Username != "" && cfg.ServiceRegistration.Password != "" { clientConfig.HttpAuth = &api.HttpBasicAuth{ Username: cfg.ServiceRegistration.Username, Password: cfg.ServiceRegistration.Password, } } doneCh := make(chan struct{}) INITCONSUL: if ctx.Err() != nil { if errors.Is(ctx.Err(), context.Canceled) { p.logger.Printf("context canceled: %v", ctx.Err()) close(doneCh) if p.consulClient != nil { err = p.consulClient.Agent().ServiceDeregister(cfg.ServiceRegistration.id) if err != nil { p.logger.Printf("failed to deregister service in consul: %v", err) } } return } } p.consulClient, err = api.NewClient(clientConfig) if err != nil { p.logger.Printf("failed to connect to consul: %v", err) time.Sleep(1 * time.Second) goto INITCONSUL } self, err := p.consulClient.Agent().Self() if err != nil { p.logger.Printf("failed to connect to consul: %v", err) time.Sleep(1 * time.Second) goto INITCONSUL } if cfg, ok := self["Config"]; ok { b, _ := json.Marshal(cfg) p.logger.Printf("consul agent config: %s", string(b)) } ctx, cancel := context.WithCancel(ctx) defer cancel() if cfg.ServiceRegistration.UseLock { doneCh, err = p.acquireAndKeepLock(ctx, "gnmic/"+cfg.clusterName+"/prometheus-output", []byte(cfg.ServiceRegistration.id)) if err != nil { p.logger.Printf("failed to acquire lock: %v", err) time.Sleep(1 * time.Second) goto INITCONSUL } } ttlCheckID := "ttl:" + cfg.ServiceRegistration.id service := &api.AgentServiceRegistration{ ID: cfg.ServiceRegistration.id, Name: cfg.ServiceRegistration.Name, Address: cfg.address, Port: cfg.port, Tags: cfg.ServiceRegistration.Tags, Checks: api.AgentServiceChecks{ { CheckID: ttlCheckID, TTL: cfg.ServiceRegistration.CheckInterval.String(), DeregisterCriticalServiceAfter: cfg.ServiceRegistration.deregisterAfter, }, }, } if cfg.ServiceRegistration.EnableHTTPCheck { service.Checks = append(service.Checks, &api.AgentServiceCheck{ CheckID: "http:" + cfg.ServiceRegistration.id, HTTP: cfg.ServiceRegistration.httpCheckAddress, Method: "GET", Interval: cfg.ServiceRegistration.CheckInterval.String(), TLSSkipVerify: true, DeregisterCriticalServiceAfter: cfg.ServiceRegistration.deregisterAfter, }) } b, _ := json.Marshal(service) p.logger.Printf("registering service: %s", string(b)) err = p.consulClient.Agent().ServiceRegister(service) if err != nil { p.logger.Printf("failed to register service in consul: %v", err) return } err = p.consulClient.Agent().UpdateTTL(ttlCheckID, "", api.HealthPassing) if err != nil { p.logger.Printf("failed to pass TTL check: %v", err) } ticker := time.NewTicker(cfg.ServiceRegistration.CheckInterval / 2) for { select { case <-ticker.C: err = p.consulClient.Agent().UpdateTTL(ttlCheckID, "", api.HealthPassing) if err != nil { p.logger.Printf("failed to update TTL check to Passing: %v", err) } case <-ctx.Done(): err = p.consulClient.Agent().UpdateTTL(ttlCheckID, ctx.Err().Error(), api.HealthCritical) if err != nil { p.logger.Printf("failed to update TTL check to Critical: %v", err) } ticker.Stop() goto INITCONSUL case <-doneCh: ticker.Stop() goto INITCONSUL } } } func (p *prometheusOutput) setServiceRegistrationDefaults(c *config) { if c.ServiceRegistration.Address == "" { c.ServiceRegistration.Address = defaultServiceRegistrationAddress } if c.ServiceRegistration.CheckInterval <= 5*time.Second { c.ServiceRegistration.CheckInterval = defaultRegistrationCheckInterval } if c.ServiceRegistration.MaxFail <= 0 { c.ServiceRegistration.MaxFail = defaultMaxServiceFail } deregisterTimer := c.ServiceRegistration.CheckInterval * time.Duration(c.ServiceRegistration.MaxFail) c.ServiceRegistration.deregisterAfter = deregisterTimer.String() if !c.ServiceRegistration.EnableHTTPCheck { return } c.ServiceRegistration.httpCheckAddress = c.ServiceRegistration.HTTPCheckAddress if c.ServiceRegistration.httpCheckAddress != "" { c.ServiceRegistration.httpCheckAddress = filepath.Join(c.ServiceRegistration.httpCheckAddress, c.Path) if !strings.HasPrefix(c.ServiceRegistration.httpCheckAddress, "http") { c.ServiceRegistration.httpCheckAddress = "http://" + c.ServiceRegistration.httpCheckAddress } return } c.ServiceRegistration.httpCheckAddress = filepath.Join(c.Listen, c.Path) if !strings.HasPrefix(c.ServiceRegistration.httpCheckAddress, "http") { c.ServiceRegistration.httpCheckAddress = "http://" + c.ServiceRegistration.httpCheckAddress } } func (p *prometheusOutput) acquireLock(ctx context.Context, key string, val []byte) (string, error) { cfg := p.cfg.Load() if cfg == nil { return "", fmt.Errorf("config not found") } var err error var acquired = false writeOpts := new(api.WriteOptions) writeOpts = writeOpts.WithContext(ctx) kvPair := &api.KVPair{Key: key, Value: val} doneChan := make(chan struct{}) for { select { case <-ctx.Done(): return "", ctx.Err() case <-doneChan: return "", lockers.ErrCanceled default: acquired = false kvPair.Session, _, err = p.consulClient.Session().Create( &api.SessionEntry{ Behavior: "delete", TTL: time.Duration(cfg.ServiceRegistration.CheckInterval * 2).String(), LockDelay: 0, }, writeOpts, ) if err != nil { p.logger.Printf("failed creating session: %v", err) time.Sleep(time.Second) continue } acquired, _, err = p.consulClient.KV().Acquire(kvPair, writeOpts) if err != nil { p.logger.Printf("failed acquiring lock to %q: %v", kvPair.Key, err) time.Sleep(time.Second) continue } if acquired { return kvPair.Session, nil } if cfg.Debug { p.logger.Printf("failed acquiring lock to %q: already locked", kvPair.Key) } time.Sleep(10 * time.Second) } } } func (p *prometheusOutput) keepLock(ctx context.Context, sessionID string) (chan struct{}, chan error) { writeOpts := new(api.WriteOptions) writeOpts = writeOpts.WithContext(ctx) doneChan := make(chan struct{}) errChan := make(chan error) go func() { if sessionID == "" { errChan <- fmt.Errorf("unknown key") close(doneChan) return } cfg := p.cfg.Load() if cfg == nil { errChan <- fmt.Errorf("config not found") close(doneChan) return } err := p.consulClient.Session().RenewPeriodic( time.Duration(cfg.ServiceRegistration.CheckInterval/2).String(), sessionID, writeOpts, doneChan, ) if err != nil { errChan <- err } }() return doneChan, errChan } func (p *prometheusOutput) acquireAndKeepLock(ctx context.Context, key string, val []byte) (chan struct{}, error) { sessionID, err := p.acquireLock(ctx, key, val) if err != nil { p.logger.Printf("failed to acquire lock: %v", err) return nil, err } doneCh, errCh := p.keepLock(ctx, sessionID) go func() { for { select { case <-ctx.Done(): close(doneCh) return case <-doneCh: _, err := p.consulClient.KV().Delete(key, nil) if err != nil { p.logger.Printf("failed to delete lock from consul: %v", err) } _, err = p.consulClient.Session().Destroy(sessionID, nil) if err != nil { p.logger.Printf("failed to destroy session in consul: %v", err) } return case err := <-errCh: p.logger.Printf("failed maintaining the lock: %v", err) close(doneCh) } } }() return doneCh, nil } ================================================ FILE: pkg/outputs/prometheus_output/prometheus_write_output/prometheus_write_client.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package prometheus_write_output import ( "bytes" "context" "errors" "fmt" "io" "net/http" "sort" "time" gogoproto "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/prometheus/prompb" "github.com/openconfig/gnmic/pkg/api/utils" ) var ( ErrMarshal = errors.New("marshal error") ) const backoff = 100 * time.Millisecond func (p *promWriteOutput) createHTTPClientFor(c *config) (*http.Client, error) { cl := &http.Client{ Timeout: c.Timeout, } if c.TLS != nil { tlsCfg, err := utils.NewTLSConfig( c.TLS.CaFile, c.TLS.CertFile, c.TLS.KeyFile, "", c.TLS.SkipVerify, false, ) if err != nil { return nil, err } cl.Transport = &http.Transport{ TLSClientConfig: tlsCfg, } } return cl, nil } func (p *promWriteOutput) writer(ctx context.Context) { defer p.wg.Done() defer p.logger.Printf("writer stopped") cfg := p.cfg.Load() p.logger.Printf("starting writer") ticker := time.NewTicker(cfg.Interval) defer ticker.Stop() for { timeSeriesCh := *p.timeSeriesCh.Load() select { case <-ctx.Done(): return case <-ticker.C: if cfg.Debug { p.logger.Printf("write interval reached, writing to remote") } p.write(ctx, timeSeriesCh) case <-p.buffDrainCh: if cfg.Debug { p.logger.Printf("buffer full, writing to remote") } p.write(ctx, timeSeriesCh) } } } func (p *promWriteOutput) write(ctx context.Context, timeSeriesCh <-chan *prompb.TimeSeries) { cfg := p.cfg.Load() buffSize := len(timeSeriesCh) if cfg.Debug { p.logger.Printf("write triggered, buffer size: %d", buffSize) } if buffSize == 0 { return } pts := make([]prompb.TimeSeries, 0, buffSize) // read from buff channel for 1 second or // until we read a number of timeSeries equal to the buffer size for { select { case ts := <-timeSeriesCh: pts = append(pts, *ts) if len(pts) == buffSize { goto WRITE } case <-time.After(time.Second): goto WRITE } } WRITE: numTS := len(pts) if numTS == 0 { return } // sort timeSeries by timestamp sort.Slice(pts, func(i, j int) bool { return pts[i].Samples[0].Timestamp < pts[j].Samples[0].Timestamp }) chunk := make([]prompb.TimeSeries, 0, cfg.MaxTimeSeriesPerWrite) for i, pt := range pts { // append timeSeries to chunk chunk = append(chunk, pt) // if the chunk size reaches the configured max or // we reach the max number of time series gathered, send. chunkSize := len(chunk) if chunkSize == cfg.MaxTimeSeriesPerWrite || i+1 == numTS { if cfg.Debug { p.logger.Printf("writing a %d time series chunk", chunkSize) } start := time.Now() err := p.writeRequest(ctx, &prompb.WriteRequest{ Timeseries: chunk, }, cfg) if err != nil { if cfg.Debug { p.logger.Print(err) } continue } prometheusWriteSendDuration.WithLabelValues(cfg.Name).Set(float64(time.Since(start).Nanoseconds())) prometheusWriteNumberOfSentMsgs.WithLabelValues(cfg.Name).Add(float64(chunkSize)) // return if we are done with the gathered time series if i+1 == numTS { return } // reset chunk if we are not done yet chunk = make([]prompb.TimeSeries, 0, cfg.MaxTimeSeriesPerWrite) } } } // writeRequest marshals the supplied prompb.WriteRequest, // creates an HTTP request with the proper configured options (Authentication, Headers,...), // sends the request and checks the returned response status code. // It returns an error if the status code is >=300. func (p *promWriteOutput) writeRequest(ctx context.Context, wr *prompb.WriteRequest, cfg *config) error { httpReq, err := p.makeHTTPRequest(ctx, wr) if err != nil { return err } // send request with retries retries := 0 RETRY: httpClient := p.httpClient.Load() //cfg := p.cfg.Load() rsp, err := httpClient.Do(httpReq) if err != nil { retries++ err = fmt.Errorf("failed to write to remote: %w", err) p.logger.Print(err) if retries < cfg.MaxRetries { time.Sleep(backoff) goto RETRY } prometheusWriteNumberOfFailSendMsgs.WithLabelValues(cfg.Name, "client_failure").Inc() return err } defer rsp.Body.Close() if cfg.Debug { p.logger.Printf("got response from remote: status=%s", rsp.Status) } if rsp.StatusCode >= 300 { prometheusWriteNumberOfFailSendMsgs.WithLabelValues(cfg.Name, fmt.Sprintf("status_code=%d", rsp.StatusCode)).Inc() msg, err := io.ReadAll(rsp.Body) if err != nil { return err } return fmt.Errorf("write response failed, code=%d, body=%s", rsp.StatusCode, string(msg)) } return nil } // metadataWriter writes the cached metadata entries to the remote address each `metadata.interval` func (p *promWriteOutput) metadataWriter(ctx context.Context) { defer p.wg.Done() defer p.logger.Printf("metadata writer stopped") cfg := p.cfg.Load() if cfg.Metadata == nil || !cfg.Metadata.Include { return } p.writeMetadata(ctx) ticker := time.NewTicker(cfg.Metadata.Interval) defer ticker.Stop() for { select { case <-ctx.Done(): return case <-ticker.C: p.writeMetadata(ctx) } } } // writeMetadata writes the currently cached metadata entries to the remote address, // it will multiple prompb.WriteRequest with at most `metadata.max-entries` each until all entries are sent. func (p *promWriteOutput) writeMetadata(ctx context.Context) { cfg := p.cfg.Load() p.m.Lock() defer p.m.Unlock() if len(p.metadataCache) == 0 { return } mds := make([]prompb.MetricMetadata, 0, cfg.Metadata.MaxEntriesPerWrite) count := 0 // keep track of the number of entries in mds for _, md := range p.metadataCache { if count < cfg.Metadata.MaxEntriesPerWrite { count++ mds = append(mds, md) continue } // max entries reached, write accumulated entries if cfg.Debug { p.logger.Printf("writing %d metadata points", len(mds)) } start := time.Now() err := p.writeRequest(ctx, &prompb.WriteRequest{ Metadata: mds, }, cfg) if err != nil { prometheusWriteNumberOfFailSendMetadataMsgs.WithLabelValues(cfg.Name).Add(1) if cfg.Debug { p.logger.Print(err) } return } prometheusWriteMetadataSendDuration.WithLabelValues(cfg.Name).Set(float64(time.Since(start).Nanoseconds())) prometheusWriteNumberOfSentMetadataMsgs.WithLabelValues(cfg.Name).Add(float64(len(mds))) // reset counter and array then continue with the loop count = 0 mds = make([]prompb.MetricMetadata, 0, cfg.Metadata.MaxEntriesPerWrite) } // no metadata entries to write, return if len(mds) == 0 { return } // loop done with some metadata entries left to write if cfg.Debug { p.logger.Printf("writing %d metadata points", len(mds)) } start := time.Now() err := p.writeRequest(ctx, &prompb.WriteRequest{ Metadata: mds, }, cfg) if err != nil { if cfg.Debug { p.logger.Print(err) } return } prometheusWriteMetadataSendDuration.WithLabelValues(cfg.Name).Set(float64(time.Since(start).Nanoseconds())) prometheusWriteNumberOfSentMetadataMsgs.WithLabelValues(cfg.Name).Add(float64(len(mds))) } func (p *promWriteOutput) makeHTTPRequest(ctx context.Context, wr *prompb.WriteRequest) (*http.Request, error) { cfg := p.cfg.Load() b, err := gogoproto.Marshal(wr) if err != nil { prometheusWriteNumberOfFailSendMsgs.WithLabelValues(cfg.Name, "marshal_error").Inc() return nil, fmt.Errorf("marshal error: %w", err) } compBytes := snappy.Encode(nil, b) httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, cfg.URL, bytes.NewBuffer(compBytes)) if err != nil { return nil, fmt.Errorf("failed to create HTTP request: %v", err) } httpReq.Header.Set("X-Prometheus-Remote-Write-Version", "0.1.0") httpReq.Header.Set("Content-Encoding", "snappy") httpReq.Header.Set("User-Agent", userAgent) httpReq.Header.Set("Content-Type", "application/x-protobuf") if cfg.Authentication != nil { httpReq.SetBasicAuth(cfg.Authentication.Username, cfg.Authentication.Password) } if cfg.Authorization != nil && cfg.Authorization.Type != "" { httpReq.Header.Set("Authorization", fmt.Sprintf("%s %s", cfg.Authorization.Type, cfg.Authorization.Credentials)) } for k, v := range cfg.Headers { httpReq.Header.Add(k, v) } return httpReq, nil } ================================================ FILE: pkg/outputs/prometheus_output/prometheus_write_output/prometheus_write_metrics.go ================================================ // © 2023 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package prometheus_write_output import ( "sync" "github.com/prometheus/client_golang/prometheus" ) const ( namespace = "gnmic" subsystem = "prometheus_write_output" ) var registerMetricsOnce sync.Once var prometheusWriteNumberOfSentMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "number_of_prometheus_write_msgs_sent_success_total", Help: "Number of msgs successfully sent by gnmic prometheus_write output", }, []string{"name"}) var prometheusWriteNumberOfFailSendMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "number_of_prometheus_write_msgs_sent_fail_total", Help: "Number of failed msgs sent by gnmic prometheus_write output", }, []string{"name", "reason"}) var prometheusWriteSendDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "msg_send_duration_ns", Help: "gnmic prometheus_write output send duration in ns", }, []string{"name"}) var prometheusWriteNumberOfSentMetadataMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "number_of_prometheus_write_metadata_msgs_sent_success_total", Help: "Number of metadata msgs successfully sent by gnmic prometheus_write output", }, []string{"name"}) var prometheusWriteNumberOfFailSendMetadataMsgs = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "number_of_prometheus_write_metadata_msgs_sent_fail_total", Help: "Number of failed metadata msgs sent by gnmic prometheus_write output", }, []string{"name", "reason"}) var prometheusWriteMetadataSendDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "metadata_msg_send_duration_ns", Help: "gnmic prometheus_write output metadata send duration in ns", }, []string{"name"}) func initMetrics(name string) { // data msgs metrics prometheusWriteNumberOfSentMsgs.WithLabelValues(name).Add(0) prometheusWriteNumberOfFailSendMsgs.WithLabelValues(name, "").Add(0) prometheusWriteSendDuration.WithLabelValues(name).Set(0) // metadata msgs metrics prometheusWriteNumberOfSentMetadataMsgs.WithLabelValues(name).Add(0) prometheusWriteNumberOfFailSendMetadataMsgs.WithLabelValues(name, "").Add(0) prometheusWriteMetadataSendDuration.WithLabelValues(name).Set(0) } func (p *promWriteOutput) registerMetrics() error { cfg := p.cfg.Load() if cfg == nil { return nil } if !cfg.EnableMetrics { return nil } if p.reg == nil { return nil } var err error registerMetricsOnce.Do(func() { if err = p.reg.Register(prometheusWriteNumberOfSentMsgs); err != nil { p.logger.Printf("failed to register metric: %v", err) return } if err = p.reg.Register(prometheusWriteNumberOfFailSendMsgs); err != nil { p.logger.Printf("failed to register metric: %v", err) return } if err = p.reg.Register(prometheusWriteSendDuration); err != nil { p.logger.Printf("failed to register metric: %v", err) return } if err = p.reg.Register(prometheusWriteNumberOfSentMetadataMsgs); err != nil { p.logger.Printf("failed to register metric: %v", err) return } if err = p.reg.Register(prometheusWriteNumberOfFailSendMetadataMsgs); err != nil { p.logger.Printf("failed to register metric: %v", err) return } if err = p.reg.Register(prometheusWriteMetadataSendDuration); err != nil { p.logger.Printf("failed to register metric: %v", err) return } }) initMetrics(cfg.Name) return err } ================================================ FILE: pkg/outputs/prometheus_output/prometheus_write_output/prometheus_write_output.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package prometheus_write_output import ( "context" "encoding/json" "errors" "fmt" "io" "log" "net/http" "net/url" "slices" "sync" "sync/atomic" "text/template" "time" "github.com/openconfig/gnmi/proto/gnmi" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/prompb" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/types" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/outputs" promcom "github.com/openconfig/gnmic/pkg/outputs/prometheus_output" gutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( outputType = "prometheus_write" loggingPrefix = "[prometheus_write_output:%s] " defaultTimeout = 10 * time.Second defaultWriteInterval = 10 * time.Second defaultMetadataWriteInterval = time.Minute defaultBufferSize = 1000 defaultMaxTSPerWrite = 500 defaultMaxMetaDataEntriesPerWrite = 500 defaultMetricHelp = "gNMIc generated metric" userAgent = "gNMIc prometheus write" defaultNumWorkers = 1 defaultNumWriters = 1 ) func init() { outputs.Register(outputType, func() outputs.Output { return &promWriteOutput{} }) } type promWriteOutput struct { outputs.BaseOutput cfg *atomic.Pointer[config] dynCfg *atomic.Pointer[dynConfig] httpClient *atomic.Pointer[http.Client] timeSeriesCh *atomic.Pointer[chan *prompb.TimeSeries] logger *log.Logger eventChan chan *formatters.EventMsg msgChan chan *outputs.ProtoMsg buffDrainCh chan struct{} m *sync.Mutex metadataCache map[string]prompb.MetricMetadata rootCtx context.Context cancelFn context.CancelFunc wg *sync.WaitGroup reg *prometheus.Registry store store.Store[any] } type config struct { Name string `mapstructure:"name,omitempty" json:"name,omitempty"` URL string `mapstructure:"url,omitempty" json:"url,omitempty"` Timeout time.Duration `mapstructure:"timeout,omitempty" json:"timeout,omitempty"` Headers map[string]string `mapstructure:"headers,omitempty" json:"headers,omitempty"` Authentication *auth `mapstructure:"authentication,omitempty" json:"authentication,omitempty"` Authorization *authorization `mapstructure:"authorization,omitempty" json:"authorization,omitempty"` TLS *types.TLSConfig `mapstructure:"tls,omitempty" json:"tls,omitempty"` Interval time.Duration `mapstructure:"interval,omitempty" json:"interval,omitempty"` BufferSize int `mapstructure:"buffer-size,omitempty" json:"buffer-size,omitempty"` MaxTimeSeriesPerWrite int `mapstructure:"max-time-series-per-write,omitempty" json:"max-time-series-per-write,omitempty"` MaxRetries int `mapstructure:"max-retries,omitempty" json:"max-retries,omitempty"` Metadata *metadata `mapstructure:"metadata,omitempty" json:"metadata,omitempty"` Debug bool `mapstructure:"debug,omitempty" json:"debug,omitempty"` // MetricPrefix string `mapstructure:"metric-prefix,omitempty" json:"metric-prefix,omitempty"` AppendSubscriptionName bool `mapstructure:"append-subscription-name,omitempty" json:"append-subscription-name,omitempty"` AddTarget string `mapstructure:"add-target,omitempty" json:"add-target,omitempty"` TargetTemplate string `mapstructure:"target-template,omitempty" json:"target-template,omitempty"` StringsAsLabels bool `mapstructure:"strings-as-labels,omitempty" json:"strings-as-labels,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty" json:"event-processors,omitempty"` NumWorkers int `mapstructure:"num-workers,omitempty" json:"num-workers,omitempty"` NumWriters int `mapstructure:"num-writers,omitempty" json:"num-writers,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty" json:"enable-metrics,omitempty"` } type dynConfig struct { targetTpl *template.Template evps []formatters.EventProcessor mb *promcom.MetricBuilder } type auth struct { Username string `mapstructure:"username,omitempty" json:"username,omitempty"` Password string `mapstructure:"password,omitempty" json:"password,omitempty"` } type authorization struct { Type string `mapstructure:"type,omitempty" json:"type,omitempty"` Credentials string `mapstructure:"credentials,omitempty" json:"credentials,omitempty"` } type metadata struct { Include bool `mapstructure:"include,omitempty" json:"include,omitempty"` Interval time.Duration `mapstructure:"interval,omitempty" json:"interval,omitempty"` MaxEntriesPerWrite int `mapstructure:"max-entries-per-write,omitempty" json:"max-entries-per-write,omitempty"` } func (p *promWriteOutput) init() { p.cfg = new(atomic.Pointer[config]) p.dynCfg = new(atomic.Pointer[dynConfig]) p.httpClient = new(atomic.Pointer[http.Client]) p.timeSeriesCh = new(atomic.Pointer[chan *prompb.TimeSeries]) p.wg = new(sync.WaitGroup) p.m = new(sync.Mutex) p.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags) p.eventChan = make(chan *formatters.EventMsg) p.msgChan = make(chan *outputs.ProtoMsg) p.buffDrainCh = make(chan struct{}, 1) p.metadataCache = make(map[string]prompb.MetricMetadata) } func (p *promWriteOutput) buildEventProcessors(cfg *config) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := gutils.GetConfigMaps(p.store) if err != nil { return nil, err } return formatters.MakeEventProcessors(p.logger, cfg.EventProcessors, ps, tcs, acts) } func (p *promWriteOutput) setLogger(logger *log.Logger) { if logger != nil && p.logger != nil { p.logger.SetOutput(logger.Writer()) p.logger.SetFlags(logger.Flags()) } } func (p *promWriteOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error { p.init() ncfg := new(config) err := outputs.DecodeConfig(cfg, ncfg) if err != nil { return err } if ncfg.URL == "" { return errors.New("missing url field") } _, err = url.Parse(ncfg.URL) if err != nil { return err } if ncfg.Name == "" { ncfg.Name = name } p.logger.SetPrefix(fmt.Sprintf(loggingPrefix, ncfg.Name)) options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } p.store = options.Store // apply logger p.setLogger(options.Logger) // set defaults p.setDefaultsFor(ncfg) p.cfg.Store(ncfg) // initialize registry p.reg = options.Registry err = p.registerMetrics() if err != nil { return err } // prep dynamic config dc := new(dynConfig) // initialize event processors dc.evps, err = p.buildEventProcessors(ncfg) if err != nil { return err } // initialize target template if ncfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if ncfg.AddTarget != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", ncfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } dc.mb = &promcom.MetricBuilder{ Prefix: ncfg.MetricPrefix, AppendSubscriptionName: ncfg.AppendSubscriptionName, StringsAsLabels: ncfg.StringsAsLabels, } p.dynCfg.Store(dc) // initialize buffer chan timeSeriesCh := make(chan *prompb.TimeSeries, ncfg.BufferSize) p.timeSeriesCh.Store(&timeSeriesCh) cl, err := p.createHTTPClientFor(ncfg) if err != nil { return err } p.httpClient.Store(cl) p.rootCtx = ctx var wctx context.Context wctx, p.cancelFn = context.WithCancel(p.rootCtx) p.wg.Add(ncfg.NumWorkers) for i := 0; i < ncfg.NumWorkers; i++ { go p.worker(wctx) } p.wg.Add(ncfg.NumWriters) for i := 0; i < ncfg.NumWriters; i++ { go p.writer(wctx) } p.wg.Add(1) go p.metadataWriter(wctx) p.logger.Printf("initialized prometheus write output %s: %s", ncfg.Name, p.String()) return nil } func (p *promWriteOutput) Update(ctx context.Context, cfg map[string]any) error { newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } if newCfg.URL == "" { return errors.New("missing url field") } if _, err := url.Parse(newCfg.URL); err != nil { return fmt.Errorf("invalid url: %w", err) } p.setDefaultsFor(newCfg) currCfg := p.cfg.Load() swapChannel := channelNeedsSwap(currCfg, newCfg) restartWorkers := needsWorkerRestart(currCfg, newCfg) rebuildClient := needsClientRebuild(currCfg, newCfg) rebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 // Rebuild dynamic config dc := new(dynConfig) // target template if newCfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { t, err := gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = t.Funcs(outputs.TemplateFuncs) } else { dc.targetTpl = outputs.DefaultTargetTemplate } // metric builder dc.mb = &promcom.MetricBuilder{ Prefix: newCfg.MetricPrefix, AppendSubscriptionName: newCfg.AppendSubscriptionName, StringsAsLabels: newCfg.StringsAsLabels, } // rebuild processors ? prevDC := p.dynCfg.Load() if rebuildProcessors { dc.evps, err = p.buildEventProcessors(newCfg) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } // store new dynamic config p.dynCfg.Store(dc) // rebuild HTTP client if needed if rebuildClient { newClient, err := p.createHTTPClientFor(newCfg) if err != nil { return err } oldClient := p.httpClient.Swap(newClient) if oldClient != nil { oldClient.CloseIdleConnections() } } // store new config p.cfg.Store(newCfg) if swapChannel || restartWorkers { var newChan chan *prompb.TimeSeries if swapChannel { newChan = make(chan *prompb.TimeSeries, newCfg.BufferSize) } else { newChan = *p.timeSeriesCh.Load() } runCtx, cancel := context.WithCancel(p.rootCtx) newWG := new(sync.WaitGroup) // save old pointers oldCancel := p.cancelFn oldWG := p.wg oldTSCh := *p.timeSeriesCh.Load() // swap p.cancelFn = cancel p.wg = newWG p.timeSeriesCh.Store(&newChan) // restart workers p.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { go p.worker(runCtx) } p.wg.Add(newCfg.NumWriters) for i := 0; i < newCfg.NumWriters; i++ { go p.writer(runCtx) } p.wg.Add(1) go p.metadataWriter(runCtx) // cancel old workers if oldCancel != nil { oldCancel() } if oldWG != nil { oldWG.Wait() } if swapChannel { // best effort drain old channel OUTER_LOOP: for { select { case ts, ok := <-oldTSCh: if !ok { break } select { case newChan <- ts: default: // new channel full, drop message } default: break OUTER_LOOP } } } } p.logger.Printf("updated prometheus write output: %s", p.String()) return nil } func (p *promWriteOutput) Validate(cfg map[string]any) error { ncfg := new(config) err := outputs.DecodeConfig(cfg, ncfg) if err != nil { return err } if ncfg.URL == "" { return errors.New("missing url field") } if _, err := url.Parse(ncfg.URL); err != nil { return fmt.Errorf("invalid url: %w", err) } _, err = gtemplate.CreateTemplate("target-template", ncfg.TargetTemplate) if err != nil { return err } return nil } func (p *promWriteOutput) Write(ctx context.Context, rsp proto.Message, meta outputs.Meta) { if rsp == nil { return } cfg := p.cfg.Load() wctx, cancel := context.WithTimeout(ctx, cfg.Timeout) defer cancel() select { case <-ctx.Done(): return case p.msgChan <- outputs.NewProtoMsg(rsp, meta): case <-wctx.Done(): if cfg.Debug { p.logger.Printf("writing expired after %s", cfg.Timeout) } return } } func (p *promWriteOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) { dc := p.dynCfg.Load() if dc == nil { return } select { case <-ctx.Done(): return default: var evs = []*formatters.EventMsg{ev} for _, proc := range dc.evps { evs = proc.Apply(evs...) } for _, pev := range evs { p.eventChan <- pev } } } func (p *promWriteOutput) Close() error { if p.cancelFn != nil { p.cancelFn() } p.wg.Wait() client := p.httpClient.Load() if client != nil { client.CloseIdleConnections() } p.logger.Printf("closed prometheus write output: %s", p.String()) return nil } func (p *promWriteOutput) String() string { cfg := p.cfg.Load() b, err := json.Marshal(cfg) if err != nil { return "" } return string(b) } func (p *promWriteOutput) worker(ctx context.Context) { defer p.wg.Done() defer p.logger.Printf("worker stopped") for { select { case <-ctx.Done(): return case ev := <-p.eventChan: p.workerHandleEvent(ev) case m := <-p.msgChan: p.workerHandleProto(ctx, m) } } } func (p *promWriteOutput) workerHandleProto(_ context.Context, m *outputs.ProtoMsg) { cfg := p.cfg.Load() dc := p.dynCfg.Load() pmsg := m.GetMsg() switch pmsg := pmsg.(type) { case *gnmi.SubscribeResponse: meta := m.GetMeta() measName := "default" if subName, ok := meta["subscription-name"]; ok { measName = subName } var err error pmsg, err = outputs.AddSubscriptionTarget(pmsg, m.GetMeta(), cfg.AddTarget, dc.targetTpl) if err != nil { p.logger.Printf("failed to add target to the response: %v", err) } events, err := formatters.ResponseToEventMsgs(measName, pmsg, meta, dc.evps...) if err != nil { p.logger.Printf("failed to convert message to event: %v", err) return } for _, ev := range events { p.workerHandleEvent(ev) } } } func (p *promWriteOutput) workerHandleEvent(ev *formatters.EventMsg) { cfg := p.cfg.Load() dc := p.dynCfg.Load() tsCh := p.timeSeriesCh.Load() if cfg.Debug { p.logger.Printf("got event to buffer: %+v", ev) } for _, pts := range dc.mb.TimeSeriesFromEvent(ev) { if len(*tsCh) >= cfg.BufferSize { if cfg.Debug { p.logger.Printf("buffer size reached, triggering write") } p.buffDrainCh <- struct{}{} } // populate metadata cache p.m.Lock() if cfg.Debug { p.logger.Printf("saving metrics metadata") } p.metadataCache[pts.Name] = prompb.MetricMetadata{ Type: prompb.MetricMetadata_COUNTER, MetricFamilyName: pts.Name, Help: defaultMetricHelp, } p.m.Unlock() // write time series to buffer if cfg.Debug { p.logger.Printf("writing TimeSeries to buffer") } *tsCh <- pts.TS } } func (p *promWriteOutput) setDefaultsFor(c *config) { if c.Timeout <= 0 { c.Timeout = defaultTimeout } if c.Interval <= 0 { c.Interval = defaultWriteInterval } if c.BufferSize <= 0 { c.BufferSize = defaultBufferSize } if c.NumWorkers <= 0 { c.NumWorkers = defaultNumWorkers } if c.NumWriters <= 0 { c.NumWriters = defaultNumWriters } if c.MaxTimeSeriesPerWrite <= 0 { c.MaxTimeSeriesPerWrite = defaultMaxTSPerWrite } if c.Metadata == nil { c.Metadata = &metadata{ Include: true, Interval: defaultMetadataWriteInterval, MaxEntriesPerWrite: defaultMaxMetaDataEntriesPerWrite, } return } if c.Metadata.Include { if c.Metadata.Interval <= 0 { c.Metadata.Interval = defaultMetadataWriteInterval } if c.Metadata.MaxEntriesPerWrite <= 0 { c.Metadata.MaxEntriesPerWrite = defaultMaxMetaDataEntriesPerWrite } } } // Helper functions func channelNeedsSwap(old, nw *config) bool { if old == nil || nw == nil { return true } return old.BufferSize != nw.BufferSize } func needsWorkerRestart(old, nw *config) bool { if old == nil || nw == nil { return true } return old.NumWorkers != nw.NumWorkers || old.NumWriters != nw.NumWriters || old.Interval != nw.Interval || metadataChanged(old.Metadata, nw.Metadata) } func metadataChanged(old, nw *metadata) bool { if old == nil || nw == nil { return true } return old.Include != nw.Include || old.Interval != nw.Interval || old.MaxEntriesPerWrite != nw.MaxEntriesPerWrite } func needsClientRebuild(old, nw *config) bool { if old == nil || nw == nil { return true } return old.URL != nw.URL || old.Timeout != nw.Timeout || !old.TLS.Equal(nw.TLS) || !authEq(old.Authentication, nw.Authentication) || !authzEq(old.Authorization, nw.Authorization) } func authEq(a, b *auth) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } return a.Username == b.Username && a.Password == b.Password } func authzEq(a, b *authorization) bool { if a == nil && b == nil { return true } if a == nil || b == nil { return false } return a.Type == b.Type && a.Credentials == b.Credentials } ================================================ FILE: pkg/outputs/protometa.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package outputs import ( "google.golang.org/protobuf/proto" ) type ProtoMsg struct { m proto.Message meta Meta } func NewProtoMsg(m proto.Message, meta Meta) *ProtoMsg { return &ProtoMsg{ m: m, meta: meta, } } func (m *ProtoMsg) GetMsg() proto.Message { if m == nil { return nil } return m.m } func (m *ProtoMsg) GetMeta() Meta { if m == nil { return nil } return m.meta } ================================================ FILE: pkg/outputs/snmp_output/snmp_metrics.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package snmpoutput import ( "sync" "github.com/prometheus/client_golang/prometheus" ) var registerMetricsOnce sync.Once var snmpNumberOfSentTraps = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "snmp_output", Name: "number_of_snmp_traps_sent_total", Help: "Number of SNMP trap sent by gnmic SNMP output", }, []string{"name", "trap_index"}) var snmpNumberOfTrapSendFailureTraps = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "snmp_output", Name: "number_of_snmp_trap_sent_fail_total", Help: "Number of SNMP trap sending failures", }, []string{"name", "trap_index", "reason"}) var snmpNumberOfFailedTrapGeneration = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "gnmic", Subsystem: "snmp_output", Name: "number_of_snmp_trap_failed_generation", Help: "Number of failed trap generation", }, []string{"name", "trap_index", "reason"}) var snmpTrapGenerationDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "gnmic", Subsystem: "snmp_output", Name: "snmp_trap_generation_duration_ns", Help: "SNMP trap generation duration in ns", }, []string{"name", "trap_index"}) func (s *snmpOutput) initMetrics() { snmpNumberOfSentTraps.WithLabelValues(s.name, "0").Add(0) snmpNumberOfTrapSendFailureTraps.WithLabelValues(s.name, "0", "").Add(0) snmpNumberOfFailedTrapGeneration.WithLabelValues(s.name, "0", "").Add(0) snmpTrapGenerationDuration.WithLabelValues(s.name, "0").Set(0) } func (s *snmpOutput) registerMetrics() error { cfg := s.cfg.Load() if cfg == nil { return nil } if !cfg.EnableMetrics { return nil } if s.reg == nil { s.logger.Printf("ERR: metrics enabled but main registry is not initialized, enable main metrics under `api-server`") return nil } var err error registerMetricsOnce.Do(func() { if err = s.reg.Register(snmpNumberOfSentTraps); err != nil { return } if err = s.reg.Register(snmpNumberOfTrapSendFailureTraps); err != nil { return } if err = s.reg.Register(snmpNumberOfFailedTrapGeneration); err != nil { return } if err = s.reg.Register(snmpTrapGenerationDuration); err != nil { return } }) s.initMetrics() return err } ================================================ FILE: pkg/outputs/snmp_output/snmp_output.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package snmpoutput import ( "context" "encoding/json" "errors" "fmt" "io" "log" "slices" "strings" "sync" "sync/atomic" "text/template" "time" g "github.com/gosnmp/gosnmp" "github.com/itchyny/gojq" "github.com/openconfig/gnmi/proto/gnmi" "github.com/prometheus/client_golang/prometheus" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/path" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/cache" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/outputs" gutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( loggingPrefix = "[snmp_output:%s] " defaultPort = 162 defaultCommunity = "public" minStartDelay = 5 * time.Second initialEventsBufferSize = 1000 // sysUpTimeInstanceOID = "1.3.6.1.2.1.1.3.0" ) func init() { outputs.Register("snmp", func() outputs.Output { return &snmpOutput{} }) } type snmpOutput struct { outputs.BaseOutput name string cfg *atomic.Pointer[Config] dynCfg *atomic.Pointer[dynConfig] snmpClient *atomic.Pointer[g.Handler] logger *log.Logger rootCtx context.Context cancelFn context.CancelFunc eventChan chan *formatters.EventMsg wg *sync.WaitGroup cache cache.Cache startTime time.Time reg *prometheus.Registry store store.Store[any] } type dynConfig struct { targetTpl *template.Template evps []formatters.EventProcessor } type Config struct { Address string `mapstructure:"address,omitempty" json:"address,omitempty"` Port uint16 `mapstructure:"port,omitempty" json:"port,omitempty"` Community string `mapstructure:"community,omitempty" json:"community,omitempty"` StartDelay time.Duration `mapstructure:"start-delay,omitempty" json:"start-delay,omitempty"` Traps []*trap `mapstructure:"traps,omitempty" json:"traps,omitempty"` AddTarget string `mapstructure:"add-target,omitempty" json:"add-target,omitempty"` TargetTemplate string `mapstructure:"target-template,omitempty" json:"target-template,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty" json:"enable-metrics,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty" json:"event-processors,omitempty"` } type binding struct { Path string `mapstructure:"path,omitempty" json:"path,omitempty"` OID string `mapstructure:"oid,omitempty" json:"oid,omitempty"` Type string `mapstructure:"type,omitempty" json:"type,omitempty"` Value string `mapstructure:"value,omitempty" json:"value,omitempty"` pathTemplate *gojq.Code oidTemplate *gojq.Code valTemplate *gojq.Code } type trap struct { InformPDU bool `mapstructure:"inform,omitempty" json:"inform,omitempty"` Trigger *binding `mapstructure:"trigger,omitempty" json:"trigger,omitempty"` Bindings []*binding `mapstructure:"bindings,omitempty" json:"bindings,omitempty"` } func (s *snmpOutput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := gutils.GetConfigMaps(s.store) if err != nil { return nil, err } evps, err := formatters.MakeEventProcessors( logger, eventProcessors, ps, tcs, acts, ) if err != nil { return nil, err } return evps, nil } func (s *snmpOutput) setLogger(logger *log.Logger) { if logger != nil && s.logger != nil { s.logger.SetOutput(logger.Writer()) s.logger.SetFlags(logger.Flags()) } } func (s *snmpOutput) init() { s.cfg = new(atomic.Pointer[Config]) s.dynCfg = new(atomic.Pointer[dynConfig]) s.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags) s.eventChan = make(chan *formatters.EventMsg, initialEventsBufferSize) s.snmpClient = new(atomic.Pointer[g.Handler]) s.wg = new(sync.WaitGroup) } func (s *snmpOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error { s.init() // init struct fields newCfg := new(Config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } s.name = name //TODO: atomic ? s.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name)) options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } s.store = options.Store // apply logger s.setLogger(options.Logger) s.setDefaultsFor(newCfg) s.cfg.Store(newCfg) if len(newCfg.Traps) == 0 { return errors.New("missing traps definition") } dc := new(dynConfig) dc.evps, err = s.buildEventProcessors(options.Logger, newCfg.EventProcessors) if err != nil { return err } dc.targetTpl = outputs.DefaultTargetTemplate if newCfg.TargetTemplate != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } s.dynCfg.Store(dc) // initialize registry s.reg = options.Registry err = s.registerMetrics() if err != nil { return err } // initialize traps err = s.initializeTrapsFor(newCfg) if err != nil { return err } s.cache, err = cache.New(&cache.Config{Expiration: -1}, cache.WithLogger(s.logger)) if err != nil { return err } s.rootCtx = ctx ctx, s.cancelFn = context.WithCancel(s.rootCtx) s.startTime = time.Now() s.wg.Add(1) go s.start(ctx) s.logger.Printf("initialized SNMP output: %s", s.String()) return nil } func (s *snmpOutput) Validate(cfg map[string]any) error { newCfg := new(Config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } if len(newCfg.Traps) == 0 { return errors.New("missing traps definition") } return s.initializeTrapsFor(newCfg) } func (s *snmpOutput) Update(_ context.Context, cfg map[string]any) error { newCfg := new(Config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } s.setDefaultsFor(newCfg) err = s.initializeTrapsFor(newCfg) if err != nil { return err } currCfg := s.cfg.Load() prevDC := s.dynCfg.Load() dc := new(dynConfig) processorsChanged := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 if processorsChanged { dc.evps, err = s.buildEventProcessors(s.logger, newCfg.EventProcessors) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } if newCfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } else { dc.targetTpl = outputs.DefaultTargetTemplate } s.dynCfg.Store(dc) s.cfg.Store(newCfg) // cancel old context if running if s.cancelFn != nil { s.cancelFn() s.wg.Wait() } // create new context and start new loop var ctx context.Context ctx, s.cancelFn = context.WithCancel(s.rootCtx) s.wg.Add(1) go s.start(ctx) s.logger.Printf("updated SNMP output: %s", s.String()) return nil } func (s *snmpOutput) UpdateProcessor(name string, pcfg map[string]any) error { cfg := s.cfg.Load() dc := s.dynCfg.Load() newEvps, changed, err := outputs.UpdateProcessorInSlice( s.logger, s.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps s.dynCfg.Store(&newDC) s.logger.Printf("updated event processor %s", name) } return nil } func (s *snmpOutput) initializeTrapsFor(cfg *Config) error { var err error for i, trap := range cfg.Traps { if trap.Trigger == nil { return fmt.Errorf("trap index %d missing \"trigger\"", i) } if trap.Trigger.Path == "" { return fmt.Errorf("trap index %d missing \"path\"", i) } // init trap and bindings trap.Trigger.oidTemplate, err = parseJQ(trap.Trigger.OID) if err != nil { return err } trap.Trigger.valTemplate, err = parseJQ(trap.Trigger.Value) if err != nil { return err } for _, bd := range trap.Bindings { bd.pathTemplate, err = parseJQ(bd.Path) if err != nil { return err } bd.oidTemplate, err = parseJQ(bd.OID) if err != nil { return err } bd.valTemplate, err = parseJQ(bd.Value) if err != nil { return err } } } return nil } func (s *snmpOutput) Write(ctx context.Context, m proto.Message, meta outputs.Meta) { if m == nil { return } cfg := s.cfg.Load() if cfg == nil { return } select { case <-ctx.Done(): return default: dc := s.dynCfg.Load() if dc == nil { return } rsp, err := outputs.AddSubscriptionTarget(m, meta, "if-not-present", dc.targetTpl) if err != nil { s.logger.Printf("failed to add target to the response: %v", err) return } measName := meta["subscription-name"] if measName == "" { measName = "default" } s.cache.Write(ctx, measName, rsp) events, err := formatters.ResponseToEventMsgs(measName, rsp, meta, dc.evps...) if err != nil { s.logger.Printf("failed to convert message to event: %v", err) return } for _, ev := range events { select { case <-ctx.Done(): return case s.eventChan <- ev: } } } } func (s *snmpOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {} func (s *snmpOutput) Close() error { s.cancelFn() s.wg.Wait() snmpClient := s.snmpClient.Load() if snmpClient != nil { return (*snmpClient).Close() } return nil } func (s *snmpOutput) String() string { cfg := s.cfg.Load() if cfg == nil { return "" } b, err := json.Marshal(cfg) if err != nil { return "" } return string(b) } func (s *snmpOutput) start(ctx context.Context) { defer s.wg.Done() s.createSNMPHandler() var init = true for { select { case <-ctx.Done(): return case ev := <-s.eventChan: if ev == nil { return } cfg := s.cfg.Load() if cfg == nil { continue } if init { <-time.After(cfg.StartDelay) init = false } for idx := range cfg.Traps { err := s.handleEvent(cfg, ev, idx) if err != nil { s.logger.Printf("failed to handle event %+v : %v", ev, err) } } } } } func (s *snmpOutput) setDefaultsFor(cfg *Config) { if cfg.Port <= 0 { cfg.Port = defaultPort } if cfg.Community == "" { cfg.Community = defaultCommunity } if cfg.StartDelay < minStartDelay { cfg.StartDelay = minStartDelay } } func (s *snmpOutput) createSNMPHandler() { cfg := s.cfg.Load() if cfg == nil { return } snmpClient := g.NewHandler() snmpClient.SetTarget(cfg.Address) snmpClient.SetCommunity(cfg.Community) snmpClient.SetPort(cfg.Port) snmpClient.SetVersion(g.Version2c) CONN: err := snmpClient.Connect() if err != nil { s.logger.Printf("failed to connect: %v", err) time.Sleep(time.Second) goto CONN } s.logger.Print("SNMP connected") s.snmpClient.Store(&snmpClient) } func pduType(typ string) g.Asn1BER { switch typ { case "bool": return g.Boolean case "int": return g.Integer case "bitString": return g.BitString case "octetString": return g.OctetString case "null": return g.Null case "objectID": return g.ObjectIdentifier case "objectDescription": return g.ObjectDescription case "ipAddress": return g.IPAddress case "counter32": return g.Counter32 case "gauge32": return g.Gauge32 case "timeTicks": return g.TimeTicks case "opaque": return g.Opaque case "nsapAddress": return g.NsapAddress case "counter64": return g.Counter64 case "uint32": return g.Uinteger32 case "opaqueFloat": return g.OpaqueFloat case "opaqueDouble": return g.OpaqueDouble } return g.UnknownType } func parseJQ(code string) (*gojq.Code, error) { q, err := gojq.Parse(strings.TrimSpace(code)) if err != nil { return nil, err } return gojq.Compile(q) } func (s *snmpOutput) runJQ(code *gojq.Code, ev map[string]interface{}) (interface{}, error) { iter := code.Run(ev) for { r, ok := iter.Next() if !ok { break } switch r := r.(type) { case error: return nil, r default: return r, nil } } return nil, nil } func (s *snmpOutput) handleEvent(cfg *Config, ev *formatters.EventMsg, idx int) error { trap := cfg.Traps[idx] // trigger ? if _, ok := ev.Values[trap.Trigger.Path]; !ok { return nil } start := time.Now() var err error var target string if tg, ok := ev.Tags["source"]; ok { target = tg } else if tg, ok := ev.Tags["target"]; ok { target = tg } else { err = errors.New("missing 'source' or 'target' field") snmpNumberOfFailedTrapGeneration.WithLabelValues(s.name, fmt.Sprintf("%d", idx), err.Error()).Inc() return err } // pdus := make([]g.SnmpPDU, 0, len(trap.Bindings)+2) // append systemUptime pdu pdus = append(pdus, g.SnmpPDU{ Name: sysUpTimeInstanceOID, Type: g.TimeTicks, Value: uint32(time.Since(s.startTime).Seconds()), }) pdu, err := s.buildTriggerPDU(trap.Trigger, target, ev) if err != nil { err = fmt.Errorf("failed to build PDU from trigger: %v", err) snmpNumberOfFailedTrapGeneration.WithLabelValues(s.name, fmt.Sprintf("%d", idx), err.Error()).Inc() return err } pdus = append(pdus, pdu) for i, bd := range trap.Bindings { pdu, err := s.buildPDUFromCache(bd, target, ev) if err != nil { err = fmt.Errorf("failed to build PDU from binding index %d: %v", i, err) s.logger.Printf("%v", err) snmpNumberOfFailedTrapGeneration.WithLabelValues(s.name, fmt.Sprintf("%d", idx), err.Error()).Inc() continue } pdus = append(pdus, pdu) } // snmpNumberOfSentTraps.WithLabelValues(s.name, fmt.Sprintf("%d", idx)).Add(1) snmpClient := s.snmpClient.Load() if snmpClient == nil { return fmt.Errorf("SNMP client not initialized") } _, err = (*snmpClient).SendTrap(g.SnmpTrap{ Variables: pdus, IsInform: trap.InformPDU, }) if err != nil { snmpNumberOfTrapSendFailureTraps.WithLabelValues(s.name, fmt.Sprintf("%d", idx), err.Error()).Inc() return fmt.Errorf("failed to send trap: %v", err) } snmpTrapGenerationDuration.WithLabelValues(s.name, fmt.Sprintf("%d", idx)).Set(float64(time.Since(start).Nanoseconds())) return nil } func (s *snmpOutput) buildTriggerPDU(bd *binding, targetName string, ev *formatters.EventMsg) (g.SnmpPDU, error) { var oid string var val any input := ev.ToMap() oidResult, err := s.runJQ(bd.oidTemplate, input) if err != nil { return g.SnmpPDU{}, fmt.Errorf("failed to run OID JQ: %v", err) } var ok bool oid, ok = oidResult.(string) if !ok { return g.SnmpPDU{}, fmt.Errorf("unexpected OID result type: %T", oidResult) } val, err = s.runJQ(bd.valTemplate, input) if err != nil { return g.SnmpPDU{}, fmt.Errorf("failed to run Value JQ: %v", err) } pdu := g.SnmpPDU{ Name: oid, Type: pduType(bd.Type), Value: val, } return pdu, nil } func (s *snmpOutput) buildPDUFromCache(bd *binding, targetName string, ev *formatters.EventMsg) (g.SnmpPDU, error) { input := ev.ToMap() pathResult, err := s.runJQ(bd.pathTemplate, input) if err != nil { return g.SnmpPDU{}, fmt.Errorf("failed to run path JQ: %v", err) } xpath, ok := pathResult.(string) if !ok { return g.SnmpPDU{}, fmt.Errorf("unexpected XPATH result type: %T", pathResult) } gp, err := path.ParsePath(xpath) if err != nil { return g.SnmpPDU{}, err } rsps, err := s.cache.Read("*", targetName, gp) if err != nil { return g.SnmpPDU{}, err } evs := make([]*formatters.EventMsg, 0) for subName, notifs := range rsps { for _, notif := range notifs { revs, err := formatters.ResponseToEventMsgs(ev.Name, &gnmi.SubscribeResponse{ Response: &gnmi.SubscribeResponse_Update{ Update: notif, }, }, map[string]string{"subscription-name": subName}) if err != nil { return g.SnmpPDU{}, err } evs = append(evs, revs...) } } if len(evs) != 1 { return g.SnmpPDU{}, errors.New("failed to build PDU, corresponding value not found or too many values found") } pduInput := evs[0].ToMap() oidResult, err := s.runJQ(bd.oidTemplate, pduInput) if err != nil { return g.SnmpPDU{}, fmt.Errorf("failed to run OID JQ: %v", err) } oid, ok := oidResult.(string) if !ok { return g.SnmpPDU{}, fmt.Errorf("unexpected OID result type: %T", oidResult) } val, err := s.runJQ(bd.valTemplate, pduInput) if err != nil { return g.SnmpPDU{}, fmt.Errorf("failed to run Value JQ: %v", err) } pdu := g.SnmpPDU{ Name: oid, Type: pduType(bd.Type), Value: val, } return pdu, nil } ================================================ FILE: pkg/outputs/tcp_output/tcp_output.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package tcp_output import ( "context" "encoding/json" "errors" "fmt" "io" "log" "net" "slices" "sync" "sync/atomic" "text/template" "time" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/outputs" gutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( defaultRetryTimer = 2 * time.Second defaultNumWorkers = 1 loggingPrefix = "[tcp_output:%s] " ) func init() { outputs.Register("tcp", func() outputs.Output { return &tcpOutput{} }) } type tcpOutput struct { outputs.BaseOutput cfg *atomic.Pointer[config] dynCfg *atomic.Pointer[dynConfig] rootCtx context.Context cancelFn context.CancelFunc wg *sync.WaitGroup buffer *atomic.Pointer[chan []byte] logger *log.Logger store store.Store[any] } type dynConfig struct { targetTpl *template.Template evps []formatters.EventProcessor mo *formatters.MarshalOptions delimiter []byte limiter *time.Ticker } type config struct { Address string `mapstructure:"address,omitempty"` // ip:port Rate time.Duration `mapstructure:"rate,omitempty"` BufferSize uint `mapstructure:"buffer-size,omitempty"` Format string `mapstructure:"format,omitempty"` AddTarget string `mapstructure:"add-target,omitempty"` TargetTemplate string `mapstructure:"target-template,omitempty"` OverrideTimestamps bool `mapstructure:"override-timestamps,omitempty"` SplitEvents bool `mapstructure:"split-events,omitempty"` Delimiter string `mapstructure:"delimiter,omitempty"` KeepAlive time.Duration `mapstructure:"keep-alive,omitempty"` RetryInterval time.Duration `mapstructure:"retry-interval,omitempty"` NumWorkers int `mapstructure:"num-workers,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty"` } func (t *tcpOutput) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := gutils.GetConfigMaps(t.store) if err != nil { return nil, err } evps, err := formatters.MakeEventProcessors( logger, eventProcessors, ps, tcs, acts, ) if err != nil { return nil, err } return evps, nil } func (t *tcpOutput) init() { t.cfg = new(atomic.Pointer[config]) t.dynCfg = new(atomic.Pointer[dynConfig]) t.buffer = new(atomic.Pointer[chan []byte]) t.wg = new(sync.WaitGroup) t.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags) } func (t *tcpOutput) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error { t.init() newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } setDefaultsFor(newCfg) t.cfg.Store(newCfg) t.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name)) options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } t.store = options.Store // apply logger if options.Logger != nil && t.logger != nil { t.logger.SetOutput(options.Logger.Writer()) t.logger.SetFlags(options.Logger.Flags()) } dc := new(dynConfig) // initialize event processors dc.evps, err = t.buildEventProcessors(options.Logger, newCfg.EventProcessors) if err != nil { return err } dc.mo = &formatters.MarshalOptions{ Format: newCfg.Format, OverrideTS: newCfg.OverrideTimestamps, } if newCfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } _, _, err = net.SplitHostPort(newCfg.Address) if err != nil { return fmt.Errorf("wrong address format: %v", err) } ch := make(chan []byte, newCfg.BufferSize) t.buffer.Store(&ch) if newCfg.Rate > 0 { dc.limiter = time.NewTicker(newCfg.Rate) } if len(newCfg.Delimiter) > 0 { dc.delimiter = []byte(newCfg.Delimiter) } t.dynCfg.Store(dc) t.cfg.Store(newCfg) t.rootCtx = ctx ctx, t.cancelFn = context.WithCancel(t.rootCtx) t.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { go t.start(ctx, i) } return nil } func setDefaultsFor(cfg *config) { if cfg.RetryInterval == 0 { cfg.RetryInterval = defaultRetryTimer } if cfg.NumWorkers < 1 { cfg.NumWorkers = defaultNumWorkers } } func validate(cfg *config) error { if cfg.Address == "" { return errors.New("address is required") } _, _, err := net.SplitHostPort(cfg.Address) if err != nil { return fmt.Errorf("wrong address format: %v", err) } if cfg.TargetTemplate == "" { return errors.New("target-template is required") } return nil } func (t *tcpOutput) Validate(cfg map[string]any) error { newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } setDefaultsFor(newCfg) return validate(newCfg) } func (t *tcpOutput) Update(_ context.Context, cfg map[string]any) error { newCfg := new(config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } setDefaultsFor(newCfg) currCfg := t.cfg.Load() swapChannel := channelNeedsSwap(currCfg, newCfg) restartWorkers := needsWorkerRestart(currCfg, newCfg) rebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 dc := new(dynConfig) prevDC := t.dynCfg.Load() if rebuildProcessors { dc.evps, err = t.buildEventProcessors(t.logger, newCfg.EventProcessors) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } dc.delimiter = []byte(newCfg.Delimiter) if newCfg.Rate > 0 { // if rate changed if currCfg.Rate != newCfg.Rate { if prevDC != nil && prevDC.limiter != nil { prevDC.limiter.Stop() } dc.limiter = time.NewTicker(newCfg.Rate) } else { dc.limiter = prevDC.limiter } } else if prevDC != nil && prevDC.limiter != nil { // stop old limiter if any prevDC.limiter.Stop() } dc.mo = &formatters.MarshalOptions{ Format: newCfg.Format, OverrideTS: newCfg.OverrideTimestamps, } if newCfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } else { dc.targetTpl = outputs.DefaultTargetTemplate } t.dynCfg.Store(dc) t.cfg.Store(newCfg) oldChan := *t.buffer.Load() oldWg := t.wg t.wg = new(sync.WaitGroup) oldCancel := t.cancelFn if swapChannel || restartWorkers { var newChan chan []byte if swapChannel { newChan = make(chan []byte, newCfg.BufferSize) } else { newChan = oldChan } // swap channel t.buffer.Store(&newChan) var ctx context.Context ctx, t.cancelFn = context.WithCancel(t.rootCtx) t.wg.Add(newCfg.NumWorkers) for i := 0; i < newCfg.NumWorkers; i++ { go t.start(ctx, i) } if oldCancel != nil { oldCancel() } if oldWg != nil { oldWg.Wait() } if swapChannel { DRAIN_LOOP: for { select { case b, ok := <-oldChan: if !ok { break } select { case newChan <- b: default: // new channel full, drop message } default: break DRAIN_LOOP } } } t.logger.Printf("restarted TCP output workers") } else { t.logger.Printf("no changes to TCP output") } t.logger.Printf("updated TCP output: %s", t.String()) return nil } func (t *tcpOutput) UpdateProcessor(name string, pcfg map[string]any) error { cfg := t.cfg.Load() dc := t.dynCfg.Load() newEvps, changed, err := outputs.UpdateProcessorInSlice( t.logger, t.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps t.dynCfg.Store(&newDC) t.logger.Printf("updated event processor %s", name) } return nil } func (t *tcpOutput) Write(ctx context.Context, m proto.Message, meta outputs.Meta) { if m == nil { return } select { case <-ctx.Done(): return default: cfg := t.cfg.Load() dc := t.dynCfg.Load() rsp, err := outputs.AddSubscriptionTarget(m, meta, cfg.AddTarget, dc.targetTpl) if err != nil { t.logger.Printf("failed to add target to the response: %v", err) } bb, err := outputs.Marshal(rsp, meta, dc.mo, cfg.SplitEvents, dc.evps...) if err != nil { t.logger.Printf("failed marshaling proto msg: %v", err) return } buffer := t.buffer.Load() for _, b := range bb { (*buffer) <- b } } } func (t *tcpOutput) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {} func (t *tcpOutput) Close() error { t.cancelFn() t.wg.Wait() dc := t.dynCfg.Load() if dc != nil && dc.limiter != nil { dc.limiter.Stop() } return nil } func (t *tcpOutput) String() string { cfg := t.cfg.Load() b, err := json.Marshal(cfg) if err != nil { return "" } return string(b) } func (t *tcpOutput) start(ctx context.Context, idx int) { defer t.wg.Done() workerLogPrefix := fmt.Sprintf("worker-%d", idx) START: if ctx.Err() != nil { t.logger.Printf("context error: %v", ctx.Err()) return } cfg := t.cfg.Load() dc := t.dynCfg.Load() tcpAddr, err := net.ResolveTCPAddr("tcp", cfg.Address) if err != nil { t.logger.Printf("%s failed to resolve address: %v", workerLogPrefix, err) time.Sleep(cfg.RetryInterval) goto START } conn, err := net.DialTCP("tcp", nil, tcpAddr) if err != nil { t.logger.Printf("%s failed to dial TCP: %v", workerLogPrefix, err) time.Sleep(cfg.RetryInterval) goto START } defer conn.Close() if cfg.KeepAlive > 0 { conn.SetKeepAlive(true) conn.SetKeepAlivePeriod(cfg.KeepAlive) } buffer := *t.buffer.Load() for { select { case <-ctx.Done(): return case b := <-buffer: delimiter := dc.delimiter if dc.limiter != nil { <-dc.limiter.C } // append delimiter b = append(b, delimiter...) _, err = conn.Write(b) if err != nil { t.logger.Printf("%s failed sending tcp bytes: %v", workerLogPrefix, err) conn.Close() time.Sleep(cfg.RetryInterval) goto START } } } } func channelNeedsSwap(old, nw *config) bool { if old == nil || nw == nil { return true } return old.BufferSize != nw.BufferSize } func needsWorkerRestart(old, nw *config) bool { if old == nil || nw == nil { return true } return old.NumWorkers != nw.NumWorkers } ================================================ FILE: pkg/outputs/udp_output/udp_output.go ================================================ // © 2022 Nokia. // // This code is a Contribution to the gNMIc project ("Work") made under the Google Software Grant and Corporate Contributor License Agreement ("CLA") and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia's intellectual property are granted for any other purpose. // This code is provided on an "as is" basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package udp_output import ( "context" "encoding/json" "errors" "fmt" "io" "log" "net" "slices" "sync" "sync/atomic" "text/template" "time" "google.golang.org/protobuf/proto" "github.com/openconfig/gnmic/pkg/api/utils" "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/gtemplate" "github.com/openconfig/gnmic/pkg/outputs" gutils "github.com/openconfig/gnmic/pkg/utils" "github.com/zestor-dev/zestor/store" ) const ( defaultRetryTimer = 2 * time.Second loggingPrefix = "[udp_output:%s] " ) func init() { outputs.Register("udp", func() outputs.Output { return &udpSock{} }) } type udpSock struct { outputs.BaseOutput cfg *atomic.Pointer[Config] dynCfg *atomic.Pointer[dynConfig] buffer *atomic.Pointer[chan []byte] rootCtx context.Context cancelFn context.CancelFunc wg *sync.WaitGroup logger *log.Logger store store.Store[any] } type dynConfig struct { targetTpl *template.Template evps []formatters.EventProcessor mo *formatters.MarshalOptions limiter *time.Ticker } type Config struct { Address string `mapstructure:"address,omitempty"` // ip:port Rate time.Duration `mapstructure:"rate,omitempty"` BufferSize uint `mapstructure:"buffer-size,omitempty"` Format string `mapstructure:"format,omitempty"` AddTarget string `mapstructure:"add-target,omitempty"` TargetTemplate string `mapstructure:"target-template,omitempty"` OverrideTimestamps bool `mapstructure:"override-timestamps,omitempty"` SplitEvents bool `mapstructure:"split-events,omitempty"` RetryInterval time.Duration `mapstructure:"retry-interval,omitempty"` EnableMetrics bool `mapstructure:"enable-metrics,omitempty"` EventProcessors []string `mapstructure:"event-processors,omitempty"` } func (u *udpSock) buildEventProcessors(logger *log.Logger, eventProcessors []string) ([]formatters.EventProcessor, error) { tcs, ps, acts, err := gutils.GetConfigMaps(u.store) if err != nil { return nil, err } evps, err := formatters.MakeEventProcessors( logger, eventProcessors, ps, tcs, acts, ) if err != nil { return nil, err } return evps, nil } func (u *udpSock) init() { u.cfg = new(atomic.Pointer[Config]) u.dynCfg = new(atomic.Pointer[dynConfig]) u.buffer = new(atomic.Pointer[chan []byte]) u.wg = new(sync.WaitGroup) u.logger = log.New(io.Discard, loggingPrefix, utils.DefaultLoggingFlags) } func (u *udpSock) Init(ctx context.Context, name string, cfg map[string]interface{}, opts ...outputs.Option) error { u.init() newCfg := new(Config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } setDefaultsFor(newCfg) u.logger.SetPrefix(fmt.Sprintf(loggingPrefix, name)) options := &outputs.OutputOptions{} for _, opt := range opts { if err := opt(options); err != nil { return err } } u.store = options.Store // apply logger if options.Logger != nil && u.logger != nil { u.logger.SetOutput(options.Logger.Writer()) u.logger.SetFlags(options.Logger.Flags()) } dc := new(dynConfig) // initialize event processors dc.evps, err = u.buildEventProcessors(options.Logger, newCfg.EventProcessors) if err != nil { return err } _, _, err = net.SplitHostPort(newCfg.Address) if err != nil { return fmt.Errorf("wrong address format: %v", err) } if newCfg.Rate > 0 { dc.limiter = time.NewTicker(newCfg.Rate) } dc.mo = &formatters.MarshalOptions{ Format: newCfg.Format, OverrideTS: newCfg.OverrideTimestamps, } if newCfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } ch := make(chan []byte, newCfg.BufferSize) u.buffer.Store(&ch) u.dynCfg.Store(dc) u.cfg.Store(newCfg) u.rootCtx = ctx u.rootCtx, u.cancelFn = context.WithCancel(u.rootCtx) u.wg.Add(1) go u.start(u.rootCtx) u.logger.Printf("initialized UDP output: %s", u.String()) return nil } func setDefaultsFor(cfg *Config) { if cfg.RetryInterval == 0 { cfg.RetryInterval = defaultRetryTimer } } func validate(cfg *Config) error { if cfg.Address == "" { return errors.New("address is required") } _, _, err := net.SplitHostPort(cfg.Address) if err != nil { return fmt.Errorf("wrong address format: %v", err) } return nil } func (u *udpSock) Validate(cfg map[string]any) error { newCfg := new(Config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } setDefaultsFor(newCfg) return validate(newCfg) } func (u *udpSock) Update(_ context.Context, cfg map[string]any) error { newCfg := new(Config) err := outputs.DecodeConfig(cfg, newCfg) if err != nil { return err } setDefaultsFor(newCfg) currCfg := u.cfg.Load() swapChannel := channelNeedsSwap(currCfg, newCfg) restartWorker := needsWorkerRestart(currCfg, newCfg) rebuildProcessors := slices.Compare(currCfg.EventProcessors, newCfg.EventProcessors) != 0 dc := new(dynConfig) prevDC := u.dynCfg.Load() if rebuildProcessors { dc.evps, err = u.buildEventProcessors(u.logger, newCfg.EventProcessors) if err != nil { return err } } else if prevDC != nil { dc.evps = prevDC.evps } // handle rate limiter changes if newCfg.Rate > 0 { if currCfg.Rate != newCfg.Rate { // rate changed, stop old limiter and create new one if prevDC != nil && prevDC.limiter != nil { prevDC.limiter.Stop() } dc.limiter = time.NewTicker(newCfg.Rate) } else { // rate unchanged, copy old limiter if prevDC != nil { dc.limiter = prevDC.limiter } } } else { // no rate limiting, stop old limiter if any if prevDC != nil && prevDC.limiter != nil { prevDC.limiter.Stop() } dc.limiter = nil } dc.mo = &formatters.MarshalOptions{ Format: newCfg.Format, OverrideTS: newCfg.OverrideTimestamps, } if newCfg.TargetTemplate == "" { dc.targetTpl = outputs.DefaultTargetTemplate } else if newCfg.AddTarget != "" { dc.targetTpl, err = gtemplate.CreateTemplate("target-template", newCfg.TargetTemplate) if err != nil { return err } dc.targetTpl = dc.targetTpl.Funcs(outputs.TemplateFuncs) } else { dc.targetTpl = outputs.DefaultTargetTemplate } // store new configs u.dynCfg.Store(dc) u.cfg.Store(newCfg) if swapChannel || restartWorker { var newChan chan []byte oldChan := *u.buffer.Load() if swapChannel { newChan = make(chan []byte, newCfg.BufferSize) } else { newChan = oldChan } // create new context and WaitGroup runCtx, cancel := context.WithCancel(u.rootCtx) newWG := new(sync.WaitGroup) // Save old pointers oldCancel := u.cancelFn oldWG := u.wg // swap u.cancelFn = cancel u.wg = newWG u.buffer.Store(&newChan) // start new worker u.wg.Add(1) go u.start(runCtx) // cancel old worker and wait if oldCancel != nil { oldCancel() } if oldWG != nil { oldWG.Wait() } // drain old channel if we swapped if swapChannel { DRAIN_LOOP: for { select { case b, ok := <-oldChan: if !ok { break } select { case newChan <- b: default: // new channel is full, drop message } default: break DRAIN_LOOP } } } u.logger.Printf("restarted UDP output worker") } u.logger.Printf("updated UDP output: %s", u.String()) return nil } func (u *udpSock) UpdateProcessor(name string, pcfg map[string]any) error { cfg := u.cfg.Load() dc := u.dynCfg.Load() newEvps, changed, err := outputs.UpdateProcessorInSlice( u.logger, u.store, cfg.EventProcessors, dc.evps, name, pcfg, ) if err != nil { return err } if changed { newDC := *dc newDC.evps = newEvps u.dynCfg.Store(&newDC) u.logger.Printf("updated event processor %s", name) } return nil } func (u *udpSock) Write(ctx context.Context, m proto.Message, meta outputs.Meta) { if m == nil { return } select { case <-ctx.Done(): return default: cfg := u.cfg.Load() dc := u.dynCfg.Load() if cfg == nil || dc == nil { return } rsp, err := outputs.AddSubscriptionTarget(m, meta, cfg.AddTarget, dc.targetTpl) if err != nil { u.logger.Printf("failed to add target to the response: %v", err) } bb, err := outputs.Marshal(rsp, meta, dc.mo, cfg.SplitEvents, dc.evps...) if err != nil { u.logger.Printf("failed marshaling proto msg: %v", err) return } buffer := u.buffer.Load() if buffer == nil { return } for _, b := range bb { select { case <-ctx.Done(): return case (*buffer) <- b: } } } } func (u *udpSock) WriteEvent(ctx context.Context, ev *formatters.EventMsg) {} func (u *udpSock) Close() error { if u.cancelFn != nil { u.cancelFn() } u.wg.Wait() // Stop limiter if exists dc := u.dynCfg.Load() if dc != nil && dc.limiter != nil { dc.limiter.Stop() } return nil } func (u *udpSock) String() string { cfg := u.cfg.Load() if cfg == nil { return "" } b, err := json.Marshal(cfg) if err != nil { return "" } return string(b) } func (u *udpSock) start(ctx context.Context) { defer u.wg.Done() DIAL: if ctx.Err() != nil { u.logger.Printf("context error: %v", ctx.Err()) return } cfg := u.cfg.Load() dc := u.dynCfg.Load() if cfg == nil || dc == nil { u.logger.Printf("config not loaded") return } udpAddr, err := net.ResolveUDPAddr("udp", cfg.Address) if err != nil { u.logger.Printf("failed to resolve UDP address: %v", err) time.Sleep(cfg.RetryInterval) goto DIAL } conn, err := net.DialUDP("udp", nil, udpAddr) if err != nil { u.logger.Printf("failed to dial UDP: %v", err) time.Sleep(cfg.RetryInterval) goto DIAL } defer conn.Close() u.logger.Printf("connected to %s", cfg.Address) // Snapshot buffer and limiter at connection time buffer := *u.buffer.Load() limiter := dc.limiter for { select { case <-ctx.Done(): u.logger.Printf("UDP worker shutting down") return case b, ok := <-buffer: if !ok { u.logger.Printf("buffer channel closed") return } if limiter != nil { select { case <-ctx.Done(): return case <-limiter.C: } } _, err = conn.Write(b) if err != nil { u.logger.Printf("failed sending UDP bytes: %v", err) conn.Close() time.Sleep(cfg.RetryInterval) goto DIAL } } } } func channelNeedsSwap(old, nw *Config) bool { if old == nil || nw == nil { return true } return old.BufferSize != nw.BufferSize } func needsWorkerRestart(old, nw *Config) bool { if old == nil || nw == nil { return true } return old.Address != nw.Address } ================================================ FILE: pkg/pipeline/pipeline.go ================================================ package pipeline import ( "github.com/openconfig/gnmic/pkg/formatters" "github.com/openconfig/gnmic/pkg/outputs" "google.golang.org/protobuf/proto" ) // Msg contains the data to be passed from targets or inputs to outputs. type Msg struct { Msg proto.Message Meta outputs.Meta Events []*formatters.EventMsg Outputs map[string]struct{} } func NewMsg(msg proto.Message, meta outputs.Meta, events []*formatters.EventMsg, outputs map[string]struct{}) *Msg { return &Msg{ Msg: msg, Meta: meta, Events: events, Outputs: outputs, } } ================================================ FILE: pkg/utils/authbrearer.go ================================================ // originally from: https://github.com/damiannolan/sasl // © 2024 Nokia. // // This code is a Contribution to the gNMIc project (“Work”) made under the Google Software Grant and Corporate Contributor License Agreement (“CLA”) and governed by the Apache License 2.0. // No other rights or licenses in or to any of Nokia’s intellectual property are granted for any other purpose. // This code is provided on an “as is” basis without any warranties of any kind. // // SPDX-License-Identifier: Apache-2.0 package utils import ( "context" "github.com/IBM/sarama" "golang.org/x/oauth2" "golang.org/x/oauth2/clientcredentials" ) // TokenProvider is a simple struct that implements sarama.AccessTokenProvider. type TokenProvider struct { tokenSource oauth2.TokenSource } func NewTokenProvider(clientID, clientSecret, tokenURL string) sarama.AccessTokenProvider { cfg := clientcredentials.Config{ ClientID: clientID, ClientSecret: clientSecret, TokenURL: tokenURL, } return &TokenProvider{ tokenSource: cfg.TokenSource(context.Background()), } } func (t *TokenProvider) Token() (*sarama.AccessToken, error) { token, err := t.tokenSource.Token() if err != nil { return nil, err } return &sarama.AccessToken{Token: token.AccessToken}, nil } ================================================ FILE: pkg/utils/gnmi.go ================================================ package utils import ( "errors" "fmt" "strconv" "strings" "github.com/AlekSi/pointer" "github.com/openconfig/gnmi/proto/gnmi" "github.com/openconfig/gnmic/pkg/api" "github.com/openconfig/gnmic/pkg/api/types" ) const ( SubscriptionMode_STREAM = "STREAM" SubscriptionMode_ONCE = "ONCE" SubscriptionMode_POLL = "POLL" SubscriptionStreamMode_TARGET_DEFINED = "TARGET_DEFINED" SubscriptionStreamMode_ON_CHANGE = "ON_CHANGE" SubscriptionStreamMode_SAMPLE = "SAMPLE" ) const ( subscriptionDefaultMode = SubscriptionMode_STREAM subscriptionDefaultStreamMode = SubscriptionStreamMode_TARGET_DEFINED subscriptionDefaultEncoding = "JSON" ) var ErrConfig = errors.New("config error") func CreateSubscribeRequest(cfg *types.SubscriptionConfig, tc *types.TargetConfig, defaultEncoding string) (*gnmi.SubscribeRequest, error) { if err := validateAndSetDefaults(cfg); err != nil { return nil, err } gnmiOpts, err := SubscriptionOpts(cfg, tc, defaultEncoding) if err != nil { return nil, err } return api.NewSubscribeRequest(gnmiOpts...) } func validateAndSetDefaults(sc *types.SubscriptionConfig) error { numPaths := len(sc.Paths) numStreamSubs := len(sc.StreamSubscriptions) if sc.Prefix == "" && numPaths == 0 && numStreamSubs == 0 { return fmt.Errorf("%w: missing path(s) in subscription %q", ErrConfig, sc.Name) } if numPaths > 0 && numStreamSubs > 0 { return fmt.Errorf("%w: subscription %q: cannot set 'paths' and 'stream-subscriptions' at the same time", ErrConfig, sc.Name) } // validate subscription Mode switch strings.ToUpper(sc.Mode) { case "": sc.Mode = subscriptionDefaultMode case "ONCE", "POLL": if numStreamSubs > 0 { return fmt.Errorf("%w: subscription %q: cannot set 'stream-subscriptions' and 'mode'", ErrConfig, sc.Name) } case "STREAM": default: return fmt.Errorf("%w: subscription %s: unknown subscription mode %q", ErrConfig, sc.Name, sc.Mode) } // validate encoding if sc.Encoding != nil { switch strings.ToUpper(strings.ReplaceAll(*sc.Encoding, "-", "_")) { case "": sc.Encoding = pointer.ToString(subscriptionDefaultEncoding) case "JSON": case "BYTES": case "PROTO": case "ASCII": case "JSON_IETF": default: // allow integer encoding values _, err := strconv.Atoi(*sc.Encoding) if err != nil { return fmt.Errorf("%w: subscription %s: unknown encoding type %q", ErrConfig, sc.Name, *sc.Encoding) } } } // validate subscription stream mode if strings.ToUpper(sc.Mode) == "STREAM" { if len(sc.StreamSubscriptions) == 0 { switch strings.ToUpper(strings.ReplaceAll(sc.StreamMode, "-", "_")) { case "": sc.StreamMode = subscriptionDefaultStreamMode case "TARGET_DEFINED": case "SAMPLE": case "ON_CHANGE": default: return fmt.Errorf("%w: subscription %s: unknown stream-mode type %q", ErrConfig, sc.Name, sc.StreamMode) } return nil } // stream subscriptions for i, scs := range sc.StreamSubscriptions { if scs.Mode != "" { return fmt.Errorf("%w: subscription %s/%d: 'mode' attribute cannot be set", ErrConfig, sc.Name, i) } if scs.Prefix != "" { return fmt.Errorf("%w: subscription %s/%d: 'prefix' attribute cannot be set", ErrConfig, sc.Name, i) } if scs.Target != "" { return fmt.Errorf("%w: subscription %s/%d: 'target' attribute cannot be set", ErrConfig, sc.Name, i) } if scs.SetTarget { return fmt.Errorf("%w: subscription %s/%d: 'set-target' attribute cannot be set", ErrConfig, sc.Name, i) } if scs.Encoding != nil { return fmt.Errorf("%w: subscription %s/%d: 'encoding' attribute cannot be set", ErrConfig, sc.Name, i) } if scs.History != nil { return fmt.Errorf("%w: subscription %s/%d: 'history' attribute cannot be set", ErrConfig, sc.Name, i) } if scs.Models != nil { return fmt.Errorf("%w: subscription %s/%d: 'models' attribute cannot be set", ErrConfig, sc.Name, i) } if scs.UpdatesOnly { return fmt.Errorf("%w: subscription %s/%d: 'updates-only' attribute cannot be set", ErrConfig, sc.Name, i) } if scs.StreamSubscriptions != nil { return fmt.Errorf("%w: subscription %s/%d: 'subscriptions' attribute cannot be set", ErrConfig, sc.Name, i) } if scs.Qos != nil { return fmt.Errorf("%w: subscription %s/%d: 'qos' attribute cannot be set", ErrConfig, sc.Name, i) } switch strings.ReplaceAll(strings.ToUpper(scs.StreamMode), "-", "_") { case "": scs.StreamMode = subscriptionDefaultStreamMode case "TARGET_DEFINED": case "SAMPLE": case "ON_CHANGE": default: return fmt.Errorf("%w: subscription %s/%d: unknown subscription stream mode %q", ErrConfig, sc.Name, i, scs.StreamMode) } } } return nil } func SubscriptionOpts(sc *types.SubscriptionConfig, tc *types.TargetConfig, defaultEncoding string) ([]api.GNMIOption, error) { gnmiOpts := make([]api.GNMIOption, 0, 4) gnmiOpts = append(gnmiOpts, api.Prefix(sc.Prefix), api.SubscriptionListMode(sc.Mode), api.UpdatesOnly(sc.UpdatesOnly), ) // encoding switch { case sc.Encoding != nil: gnmiOpts = append(gnmiOpts, api.Encoding(*sc.Encoding)) case tc != nil && tc.Encoding != nil: gnmiOpts = append(gnmiOpts, api.Encoding(*tc.Encoding)) default: gnmiOpts = append(gnmiOpts, api.Encoding(defaultEncoding)) } // history extension if sc.History != nil { if !sc.History.Snapshot.IsZero() { gnmiOpts = append(gnmiOpts, api.Extension_HistorySnapshotTime(sc.History.Snapshot)) } if !sc.History.Start.IsZero() && !sc.History.End.IsZero() { gnmiOpts = append(gnmiOpts, api.Extension_HistoryRange(sc.History.Start, sc.History.End)) } } // QoS if sc.Qos != nil { gnmiOpts = append(gnmiOpts, api.Qos(*sc.Qos)) } // add models for _, m := range sc.Models { gnmiOpts = append(gnmiOpts, api.UseModel(m, "", "")) } // add target opt if sc.Target != "" { gnmiOpts = append(gnmiOpts, api.Target(sc.Target)) } else if sc.SetTarget { gnmiOpts = append(gnmiOpts, api.Target(tc.Name)) } // add gNMI subscriptions // multiple stream subscriptions if len(sc.StreamSubscriptions) > 0 { for _, ssc := range sc.StreamSubscriptions { streamGNMIOpts, err := streamSubscriptionOpts(ssc) if err != nil { return nil, err } gnmiOpts = append(gnmiOpts, streamGNMIOpts...) } } for _, p := range sc.Paths { subGnmiOpts := make([]api.GNMIOption, 0, 2) switch gnmi.SubscriptionList_Mode(gnmi.SubscriptionList_Mode_value[strings.ToUpper(sc.Mode)]) { case gnmi.SubscriptionList_STREAM: switch gnmi.SubscriptionMode(gnmi.SubscriptionMode_value[strings.Replace(strings.ToUpper(sc.StreamMode), "-", "_", -1)]) { case gnmi.SubscriptionMode_ON_CHANGE: if sc.HeartbeatInterval != nil { subGnmiOpts = append(subGnmiOpts, api.HeartbeatInterval(*sc.HeartbeatInterval)) } subGnmiOpts = append(subGnmiOpts, api.SubscriptionMode(sc.StreamMode)) case gnmi.SubscriptionMode_TARGET_DEFINED: if sc.HeartbeatInterval != nil { subGnmiOpts = append(subGnmiOpts, api.HeartbeatInterval(*sc.HeartbeatInterval)) } if sc.SampleInterval != nil { subGnmiOpts = append(subGnmiOpts, api.SampleInterval(*sc.SampleInterval)) } subGnmiOpts = append(subGnmiOpts, api.SuppressRedundant(sc.SuppressRedundant)) if sc.SuppressRedundant && sc.HeartbeatInterval != nil { subGnmiOpts = append(subGnmiOpts, api.HeartbeatInterval(*sc.HeartbeatInterval)) } subGnmiOpts = append(subGnmiOpts, api.SubscriptionMode(sc.StreamMode)) case gnmi.SubscriptionMode_SAMPLE: if sc.SampleInterval != nil { subGnmiOpts = append(subGnmiOpts, api.SampleInterval(*sc.SampleInterval)) } subGnmiOpts = append(subGnmiOpts, api.SuppressRedundant(sc.SuppressRedundant)) if sc.SuppressRedundant && sc.HeartbeatInterval != nil { subGnmiOpts = append(subGnmiOpts, api.HeartbeatInterval(*sc.HeartbeatInterval)) } subGnmiOpts = append(subGnmiOpts, api.SubscriptionMode(sc.StreamMode)) default: return nil, fmt.Errorf("%w: subscription %s unknown stream subscription mode %s", ErrConfig, sc.Name, sc.StreamMode) } default: // poll and once subscription modes } // subGnmiOpts = append(subGnmiOpts, api.Path(p)) gnmiOpts = append(gnmiOpts, api.Subscription(subGnmiOpts...), ) } // Depth extension if sc.Depth > 0 { gnmiOpts = append(gnmiOpts, api.Extension_Depth(sc.Depth)) } return gnmiOpts, nil } func streamSubscriptionOpts(sc *types.SubscriptionConfig) ([]api.GNMIOption, error) { gnmiOpts := make([]api.GNMIOption, 0) for _, p := range sc.Paths { subGnmiOpts := make([]api.GNMIOption, 0, 2) switch gnmi.SubscriptionMode(gnmi.SubscriptionMode_value[strings.Replace(strings.ToUpper(sc.StreamMode), "-", "_", -1)]) { case gnmi.SubscriptionMode_ON_CHANGE: if sc.HeartbeatInterval != nil { subGnmiOpts = append(subGnmiOpts, api.HeartbeatInterval(*sc.HeartbeatInterval)) } subGnmiOpts = append(subGnmiOpts, api.SubscriptionMode(sc.StreamMode)) case gnmi.SubscriptionMode_SAMPLE, gnmi.SubscriptionMode_TARGET_DEFINED: if sc.SampleInterval != nil { subGnmiOpts = append(subGnmiOpts, api.SampleInterval(*sc.SampleInterval)) } subGnmiOpts = append(subGnmiOpts, api.SuppressRedundant(sc.SuppressRedundant)) if sc.SuppressRedundant && sc.HeartbeatInterval != nil { subGnmiOpts = append(subGnmiOpts, api.HeartbeatInterval(*sc.HeartbeatInterval)) } subGnmiOpts = append(subGnmiOpts, api.SubscriptionMode(sc.StreamMode)) default: return nil, fmt.Errorf("%w: subscription %s unknown stream subscription mode %s", ErrConfig, sc.Name, sc.StreamMode) } subGnmiOpts = append(subGnmiOpts, api.Path(p)) gnmiOpts = append(gnmiOpts, api.Subscription(subGnmiOpts...), ) } return gnmiOpts, nil } ================================================ FILE: pkg/utils/gnmiext.go ================================================ package utils import ( "fmt" "strconv" "strings" ) type RegisteredExtensions map[int32]string func ParseRegisteredExtensions(pairs []string) (RegisteredExtensions, error) { res := RegisteredExtensions{} for _, p := range pairs { idMsg := strings.Split(p, ":") if len(idMsg) < 2 { return nil, fmt.Errorf("'%s' registered extension has invalid format, 123:package.Message format is expected", p) } id, err := strconv.ParseInt(idMsg[0], 10, 32) if err != nil { return nil, err } res[int32(id)] = idMsg[1] } return res, nil } ================================================ FILE: pkg/utils/store.go ================================================ package utils import ( "github.com/openconfig/gnmic/pkg/api/types" "github.com/zestor-dev/zestor/store" ) func GetConfigMaps(s store.Store[any]) (map[string]*types.TargetConfig, map[string]map[string]any, map[string]map[string]any, error) { tgm, err := s.List("targets") if err != nil { return nil, nil, nil, err } tcs := make(map[string]*types.TargetConfig) for n, t := range tgm { if tc, ok := t.(*types.TargetConfig); ok { tcs[n] = tc } } egm, err := s.List("processors") if err != nil { return nil, nil, nil, err } eps := make(map[string]map[string]any) for n, e := range egm { if ep, ok := e.(map[string]any); ok { eps[n] = ep } } agm, err := s.List("actions") if err != nil { return nil, nil, nil, err } acts := make(map[string]map[string]any) for n, a := range agm { if act, ok := a.(map[string]any); ok { acts[n] = act } } return tcs, eps, acts, nil } ================================================ FILE: pkg/version/version.go ================================================ package version var ( Version = "dev" Commit = "none" Date = "unknown" GitURL = "" ) ================================================ FILE: tests/api.sh ================================================ #!/bin/bash trap 'failure ${LINENO} "$BASH_COMMAND"' ERR curl -sS http://$1/api/v1/config | yq eval -P curl -sS http://$1/api/v1/config/targets | yq eval -P for i in $(curl -sS http://$1/api/v1/config/targets | jq -r 'keys[]'); do curl -sS http://$1/api/v1/config/targets/$i |yq eval -P done curl -sS http://$1/api/v1/config/subscriptions | yq eval -P curl -sS http://$1/api/v1/config/outputs | yq eval -P curl -sS http://$1/api/v1/config/inputs | yq eval -P curl -sS http://$1/api/v1/config/processors | yq eval -P curl -sS http://$1/api/v1/config/clustering | yq eval -P curl -sS http://$1/api/v1/config/api-server | yq eval -P curl -sS http://$1/api/v1/config/gnmi-server | yq eval -P curl -sS http://$1/api/v1/targets | yq eval -P for i in $(curl -sS http://$1/api/v1/targets | jq -r 'keys[]'); do curl -sS http://$1/api/v1/targets/$i | yq eval -P done curl -sS http://$1/api/v1/cluster | yq eval -P curl -sS http://$1/api/v1/cluster/members | yq eval -P curl -sS http://$1/api/v1/cluster/leader | yq eval -P ================================================ FILE: tests/capabilities_cmd.sh ================================================ #!/bin/bash gnmic_base_cmd="./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify -d" trap 'failure ${LINENO} "$BASH_COMMAND"' ERR # capabilities $gnmic_base_cmd -a clab-test1-srl1 capabilities $gnmic_base_cmd -a clab-test1-srl2 capabilities $gnmic_base_cmd -a clab-test1-srl3 capabilities # capabilities multi host $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 capabilities $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 capabilities --format json $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 capabilities $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 capabilities --format json printf "capabilities with config file\n" # capabilities using config file ./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl1 capabilities ./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl2 capabilities ./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl3 capabilities ./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl1 capabilities --no-prefix ./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl2 capabilities --no-prefix ./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl3 capabilities --no-prefix ./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl1 capabilities --format json ./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl2 capabilities --format json ./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl3 capabilities --format json ./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl1 capabilities --format json --no-prefix ./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl2 capabilities --format json --no-prefix ./gnmic-rc1 --config configs/gnmic1.yaml -a clab-test1-srl3 capabilities --format json --no-prefix # multi host ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml capabilities ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml capabilities --no-prefix ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml capabilities --format json ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml capabilities --format json --no-prefix ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml capabilities ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml capabilities --no-prefix ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml capabilities --format json ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml capabilities --format json --no-prefix ## hosts from file ### target nodes in address field ./gnmic-rc1 --config configs/gnmic2.yaml capabilities ./gnmic-rc1 --config configs/gnmic2.yaml capabilities --format json ./gnmic-rc1 --config configs/gnmic2.yaml capabilities --format json --no-prefix ### target nodes in targets field ./gnmic-rc1 --config configs/gnmic3.yaml capabilities ./gnmic-rc1 --config configs/gnmic2.yaml capabilities --format json ./gnmic-rc1 --config configs/gnmic3.yaml capabilities --format json --no-prefix # set skip-verify value to false in the config file sed -i 's/^skip-verify: true/skip-verify: false/g' configs/gnmic1.yaml ./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml --skip-verify capabilities ./gnmic-rc1 -a clab-test1-srl2 --config configs/gnmic1.yaml --skip-verify capabilities ./gnmic-rc1 -a clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities --format json ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities --format json # comment out skip-verify value in the config file and change it to true sed -i 's/^skip-verify: false/#skip-verify: true/g' configs/gnmic1.yaml ./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml --skip-verify capabilities ./gnmic-rc1 -a clab-test1-srl2 --config configs/gnmic1.yaml --skip-verify capabilities ./gnmic-rc1 -a clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities --format json ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 --config configs/gnmic1.yaml --skip-verify capabilities --format json # use --tls-ca ./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml \ --tls-ca clab/clab-test1/.tls/ca/ca.pem \ capabilities ./gnmic-rc1 -a clab-test1-srl2 --config configs/gnmic1.yaml \ --tls-ca clab/clab-test1/.tls/ca/ca.pem \ capabilities ./gnmic-rc1 -a clab-test1-srl3 --config configs/gnmic1.yaml \ --tls-ca clab/clab-test1/.tls/ca/ca.pem \ capabilities # use --tls-server-name s ./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml --tls-server-name srl1 \ --tls-ca clab/clab-test1/.tls/ca/ca.pem \ capabilities ./gnmic-rc1 -a clab-test1-srl2 --config configs/gnmic1.yaml --tls-server-name srl2 \ --tls-ca clab/clab-test1/.tls/ca/ca.pem \ capabilities ./gnmic-rc1 -a clab-test1-srl3 --config configs/gnmic1.yaml --tls-server-name srl3 \ --tls-ca clab/clab-test1/.tls/ca/ca.pem \ capabilities ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 --config configs/gnmic1.yaml \ --tls-ca clab/clab-test1/.tls/ca/ca.pem \ capabilities # revert back skip-verify value to true sed -i 's/^#skip-verify: true/skip-verify: true/g' configs/gnmic1.yaml ================================================ FILE: tests/clab/labN.clab.yaml ================================================ name: lab{{ (ds "data").ID }} topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux:23.10.3 labels: test: telemetry nodes: super-spine1: super-spine2: spine1: spine2: spine3: spine4: leaf1: leaf2: leaf3: leaf4: leaf5: leaf6: leaf7: leaf8: links: # super-spine1 links - endpoints: ["super-spine1:e1-1", "spine1:e1-1"] - endpoints: ["super-spine1:e1-2", "spine2:e1-1"] - endpoints: ["super-spine1:e1-3", "spine3:e1-1"] - endpoints: ["super-spine1:e1-4", "spine4:e1-1"] # super-spine2 links - endpoints: ["super-spine2:e1-1", "spine1:e1-2"] - endpoints: ["super-spine2:e1-2", "spine2:e1-2"] - endpoints: ["super-spine2:e1-3", "spine3:e1-2"] - endpoints: ["super-spine2:e1-4", "spine4:e1-2"] # spine1 links - endpoints: ["spine1:e1-3", "leaf1:e1-1"] - endpoints: ["spine1:e1-4", "leaf2:e1-1"] - endpoints: ["spine1:e1-5", "leaf3:e1-1"] - endpoints: ["spine1:e1-6", "leaf4:e1-1"] # spine2 links - endpoints: ["spine2:e1-3", "leaf1:e1-2"] - endpoints: ["spine2:e1-4", "leaf2:e1-2"] - endpoints: ["spine2:e1-5", "leaf3:e1-2"] - endpoints: ["spine2:e1-6", "leaf4:e1-2"] # spine3 links - endpoints: ["spine3:e1-3", "leaf5:e1-1"] - endpoints: ["spine3:e1-4", "leaf6:e1-1"] - endpoints: ["spine3:e1-5", "leaf7:e1-1"] - endpoints: ["spine3:e1-6", "leaf8:e1-1"] # spine4 links - endpoints: ["spine4:e1-3", "leaf5:e1-2"] - endpoints: ["spine4:e1-4", "leaf6:e1-2"] - endpoints: ["spine4:e1-5", "leaf7:e1-2"] - endpoints: ["spine4:e1-6", "leaf8:e1-2"] ================================================ FILE: tests/clab/loaders/gnmic-agg.yaml ================================================ log: true insecure: true loader: type: consul address: clab-loaders-consul-agent:8500 debug: true on-add: - query services: - name: collectors-gnmi-server subscriptions: collectors: paths: - / stream-mode: on-change api-server: enable-metrics: true # clustering config clustering: cluster-name: aggregators targets-watch-timer: 10s locker: type: consul address: clab-loaders-consul-agent:8500 outputs: aggregator: type: prometheus service-registration: address: clab-loaders-consul-agent:8500 actions: query: name: query type: http url: http://cht.sh debug: true ================================================ FILE: tests/clab/loaders/gnmic-docker-loader.yaml ================================================ username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker debug: true filters: - containers: - label=clab-node-kind: srl on-add: - interfaces - enable_interfaces subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s sub2: paths: - /interface/subinterface/statistics stream-mode: sample sample-interval: 10s api-server: enable-metrics: true # clustering config clustering: cluster-name: collectors targets-watch-timer: 10s locker: type: consul address: clab-loaders-consul-agent:8500 gnmi-server: enable-metrics: true service-registration: address: clab-loaders-consul-agent:8500 outputs: collector: type: prometheus service-registration: address: clab-loaders-consul-agent:8500 event-processors: - trim-prefixes processors: trim-prefixes: event-strings: value-names: - ".*" transforms: - path-base: apply-on: "name" actions: interfaces: name: interfaces type: template debug: true template: | {{- if .Input | strings.Contains "srl1"}}ethernet-1/1,ethernet-1/2 {{- else -}}ethernet-1/1 {{- end -}} enable_interfaces: name: enable_interfaces type: gnmi target: '{{ .Input }}' rpc: set encoding: json_ietf debug: true paths: - / values: - | {{- $ifaces := coll.Slice -}} {{- range $iface := .Env.interfaces | strings.Split "," -}} {{- $ifaces = $ifaces | coll.Append (coll.Dict "name" $iface "admin-state" "enable") -}} {{- end -}} {{- ( coll.Dict "interface" $ifaces ) | data.ToJSON -}} ================================================ FILE: tests/clab/loaders/gnmic-file-loader.yaml ================================================ username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: file debug: true path: ./targets/targets.yaml on-add: - interfaces - enable_interfaces subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s sub2: paths: - /interface/subinterface/statistics stream-mode: sample sample-interval: 10s api-server: enable-metrics: true # clustering config clustering: cluster-name: collectors targets-watch-timer: 10s locker: type: consul address: clab-loaders-consul-agent:8500 gnmi-server: enable-metrics: true service-registration: address: clab-loaders-consul-agent:8500 outputs: collector: type: prometheus service-registration: address: clab-loaders-consul-agent:8500 event-processors: - trim-prefixes processors: trim-prefixes: event-strings: value-names: - ".*" transforms: - path-base: apply-on: "name" actions: interfaces: name: interfaces type: template debug: true template: | {{- if .Input | strings.Contains "srl1"}}ethernet-1/1,ethernet-1/2 {{- else -}}ethernet-1/1 {{- end -}} enable_interfaces: name: enable_interfaces type: gnmi target: '{{ .Input }}' rpc: set encoding: json_ietf debug: true paths: - / values: - | {{- $ifaces := coll.Slice -}} {{- range $iface := .Env.interfaces | strings.Split "," -}} {{- $ifaces = $ifaces | coll.Append (coll.Dict "name" $iface "admin-state" "enable") -}} {{- end -}} {{- ( coll.Dict "interface" $ifaces ) | data.ToJSON -}} ================================================ FILE: tests/clab/loaders/loaders.clab.yaml ================================================ name: loaders topology: defaults: kind: linux image: gnmic:0.0.0-rc1 kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux:23.10.3 nodes: srl1: kind: nokia_srlinux srl2: kind: nokia_srlinux srl3: kind: nokia_srlinux consul-agent: image: consul:1.15.4 ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' gnmic1: binds: - {{ .gnmic_config_file }}:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock - targets/targets.yaml:/app/targets/targets.yaml cmd: '--config /app/gnmic.yaml subscribe' ports: - 7890:7890 - 9804:9804 env: GNMIC_API: :7890 GNMIC_CLUSTERING_INSTANCE_NAME: clab-loaders-gnmic1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-loaders-gnmic1 GNMIC_GNMI_SERVER_ADDRESS: clab-loaders-gnmic1:57400 GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-loaders-gnmic1 GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-loaders-gnmic1:9804 gnmic2: binds: - {{ .gnmic_config_file }}:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock - targets/targets.yaml:/app/targets/targets.yaml cmd: '--config /app/gnmic.yaml subscribe' ports: - 7891:7891 - 9805:9805 env: GNMIC_API: :7891 GNMIC_CLUSTERING_INSTANCE_NAME: clab-loaders-gnmic2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-loaders-gnmic2 GNMIC_GNMI_SERVER_ADDRESS: clab-loaders-gnmic2:57400 GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-loaders-gnmic2 GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-loaders-gnmic2:9805 gnmic3: binds: - {{ .gnmic_config_file }}:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock - targets/targets.yaml:/app/targets/targets.yaml cmd: '--config /app/gnmic.yaml subscribe' ports: - 7892:7892 - 9806:9806 env: GNMIC_API: :7892 GNMIC_CLUSTERING_INSTANCE_NAME: clab-loaders-gnmic3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-loaders-gnmic3 GNMIC_GNMI_SERVER_ADDRESS: clab-loaders-gnmic3:57400 GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-loaders-gnmic3 GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-loaders-gnmic3:9806 agg-gnmic1: binds: - gnmic-agg.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7893:7893 - 9807:9807 env: GNMIC_API: :7893 GNMIC_CLUSTERING_INSTANCE_NAME: clab-loaders-agg-gnmic1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-loaders-agg-gnmic1 GNMIC_OUTPUTS_AGGREGATOR_LISTEN: clab-loaders-agg-gnmic1:9807 agg-gnmic2: binds: - gnmic-agg.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7894:7894 - 9808:9808 env: GNMIC_API: :7894 GNMIC_CLUSTERING_INSTANCE_NAME: clab-loaders-agg-gnmic2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-loaders-agg-gnmic2 GNMIC_OUTPUTS_AGGREGATOR_LISTEN: clab-loaders-agg-gnmic2:9808 agg-gnmic3: binds: - gnmic-agg.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7895:7895 - 9809:9809 env: GNMIC_API: :7895 GNMIC_CLUSTERING_INSTANCE_NAME: clab-loaders-agg-gnmic3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-loaders-agg-gnmic3 GNMIC_OUTPUTS_AGGREGATOR_LISTEN: clab-loaders-agg-gnmic3:9809 links: - endpoints: ["srl1:e1-1", "srl2:e1-1"] - endpoints: ["srl1:e1-2", "srl3:e1-1"] ================================================ FILE: tests/clab/loaders/loaders.clab_vars.yaml ================================================ gnmic_config_file: gnmic-docker-loader.yaml ================================================ FILE: tests/clab/loaders/targets/targets.yaml ================================================ clab-loaders-srl1: clab-loaders-srl3: ================================================ FILE: tests/clab/telemetry/gnmic-agg.yaml ================================================ log: true insecure: true loader: type: consul address: clab-telemetry-consul-agent:8500 debug: true services: - name: collectors-gnmi-server subscriptions: collectors: paths: - / stream-mode: on-change api-server: enable-metrics: true # clustering config clustering: cluster-name: aggregators targets-watch-timer: 60s locker: type: consul address: clab-telemetry-consul-agent:8500 outputs: aggregator: type: prometheus service-registration: address: clab-telemetry-consul-agent:8500 ================================================ FILE: tests/clab/telemetry/gnmic.yaml ================================================ username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker debug: true enable-metrics: true filters: - containers: - label=clab-node-kind: srl label=containerlab: lab1 - containers: - label=clab-node-kind: srl label=containerlab: lab2 - containers: - label=clab-node-kind: srl label=containerlab: lab3 - containers: - label=clab-node-kind: srl label=containerlab: lab4 - containers: - label=clab-node-kind: srl label=containerlab: lab5 on-add: - interfaces - enable_interfaces subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s sub2: paths: - /interface/subinterface/statistics stream-mode: sample sample-interval: 10s api-server: enable-metrics: true # clustering config clustering: cluster-name: collectors targets-watch-timer: 60s locker: type: consul address: clab-telemetry-consul-agent:8500 gnmi-server: enable-metrics: true service-registration: address: clab-telemetry-consul-agent:8500 outputs: collector: type: prometheus service-registration: address: clab-telemetry-consul-agent:8500 event-processors: - trim-prefixes influxdb-output: type: influxdb url: http://clab-telemetry-influxdb:8086 bucket: telemetry # db name token: gnmic:gnmic # username:password batch-size: 1000 flush-timer: 10s event-processors: - trim-prefixes kafka-output: type: kafka address: clab-telemetry-kafka-server:9092 topic: telemetry event-processors: - trim-prefixes nats-output: type: nats address: clab-telemetry-nats:4222 subject: telemetry event-processors: - trim-prefixes processors: trim-prefixes: event-strings: value-names: - ".*" transforms: - path-base: apply-on: "name" actions: interfaces: name: interfaces type: template debug: true template: | {{- if .Input | strings.Contains "super-spine"}}ethernet-1/1,ethernet-1/2,ethernet-1/3,ethernet-1/4 {{- else if .Input | strings.Contains "spine"}}ethernet-1/1,ethernet-1/2,ethernet-1/3,ethernet-1/4,ethernet-1/5,ethernet-1/6 {{- else if .Input | strings.Contains "leaf"}}ethernet-1/1,ethernet-1/2{{- end -}} enable_interfaces: name: enable_interfaces type: gnmi target: '{{ .Input }}' rpc: set encoding: json_ietf debug: true paths: - / values: - | {{- $ifaces := coll.Slice -}} {{- range $iface := .Env.interfaces | strings.Split "," -}} {{- $ifaces = $ifaces | coll.Append (coll.Dict "name" $iface "admin-state" "enable") -}} {{- end -}} {{- ( coll.Dict "interface" $ifaces ) | data.ToJSON -}} ================================================ FILE: tests/clab/telemetry/grafana/dashboards.yaml ================================================ apiVersion: 1 providers: - name: 'gNMIc Internal Metrics' orgId: 1 folder: '' type: file disableDeletion: false editable: true options: path: /var/lib/grafana/dashboards foldersFromFilesStructure: true ================================================ FILE: tests/clab/telemetry/grafana/datasources/datasource.yaml ================================================ apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-telemetry-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: tests/clab/telemetry/prometheus/prometheus.yaml ================================================ global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-telemetry-consul-agent:8500 services: - collectors-gnmic-api - aggregators-gnmic-api - prometheus-cluster - prometheus-collector ================================================ FILE: tests/clab/telemetry/telemetry.clab.yaml ================================================ name: telemetry topology: defaults: kind: linux image: gnmic:0.0.0-rc1 nodes: gnmic1: binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7890:7890 - 9804:9804 env: GNMIC_API: :7890 GNMIC_CLUSTERING_INSTANCE_NAME: clab-telemetry-gnmic1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-telemetry-gnmic1 GNMIC_GNMI_SERVER_ADDRESS: clab-telemetry-gnmic1:57400 GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-telemetry-gnmic1 GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-telemetry-gnmic1:9804 gnmic2: binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7891:7891 - 9805:9805 env: GNMIC_API: :7891 GNMIC_CLUSTERING_INSTANCE_NAME: clab-telemetry-gnmic2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-telemetry-gnmic2 GNMIC_GNMI_SERVER_ADDRESS: clab-telemetry-gnmic2:57400 GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-telemetry-gnmic2 GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-telemetry-gnmic2:9805 gnmic3: binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7892:7892 - 9806:9806 env: GNMIC_API: :7892 GNMIC_CLUSTERING_INSTANCE_NAME: clab-telemetry-gnmic3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-telemetry-gnmic3 GNMIC_GNMI_SERVER_ADDRESS: clab-telemetry-gnmic3:57400 GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-telemetry-gnmic3 GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-telemetry-gnmic3:9806 agg-gnmic1: binds: - ./gnmic-agg.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7893:7893 - 9807:9807 env: GNMIC_API: :7893 GNMIC_CLUSTERING_INSTANCE_NAME: clab-telemetry-agg-gnmic1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-telemetry-agg-gnmic1 GNMIC_OUTPUTS_AGGREGATOR_LISTEN: clab-telemetry-agg-gnmic1:9807 agg-gnmic2: binds: - ./gnmic-agg.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7894:7894 - 9808:9808 env: GNMIC_API: :7894 GNMIC_CLUSTERING_INSTANCE_NAME: clab-telemetry-agg-gnmic2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-telemetry-agg-gnmic2 GNMIC_OUTPUTS_AGGREGATOR_LISTEN: clab-telemetry-agg-gnmic2:9808 agg-gnmic3: binds: - ./gnmic-agg.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7895:7895 - 9809:9809 env: GNMIC_API: :7895 GNMIC_CLUSTERING_INSTANCE_NAME: clab-telemetry-agg-gnmic3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-telemetry-agg-gnmic3 GNMIC_OUTPUTS_AGGREGATOR_LISTEN: clab-telemetry-agg-gnmic3:9809 nats: kind: linux image: nats:latest ports: - 4222:4222 kafka-server: kind: linux image: bitnami/kafka:latest ports: - 9092:9092 env: KAFKA_CFG_ZOOKEEPER_CONNECT: clab-telemetry-zookeeper-server:2181 KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://clab-telemetry-kafka-server:9092 ALLOW_PLAINTEXT_LISTENER: "yes" JMX_PORT: 9000 zookeeper-server: kind: linux image: bitnami/zookeeper:latest ports: - 2181:2181 env: ALLOW_ANONYMOUS_LOGIN: "yes" influxdb: kind: linux image: influxdb:1.8.10 ports: - 8086:8086 env: INFLUXDB_DATA_ENGINE: tsm1 INFLUXDB_REPORTING_DISABLED: "false" INFLUXDB_USER: gnmic INFLUXDB_USER_PASSWORD: gnmic INFLUXDB_DB: telemetry consul-agent: image: consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' prometheus: image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug grafana: image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro - grafana/dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:ro - ../../dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 env: GF_AUTH_DISABLE_LOGIN_FORM: "true" GF_AUTH_ANONYMOUS_ENABLED: "true" GF_AUTH_ANONYMOUS_ORG_NAME: Main Org. GF_AUTH_ANONYMOUS_ORG_ROLE: Admin GF_USERS_ALLOW_SIGN_UP: "false" ================================================ FILE: tests/clab/test_lab1.clab.yaml ================================================ name: test1 topology: defaults: kind: nokia_srlinux kinds: nokia_srlinux: image: ghcr.io/nokia/srlinux:23.10.3 nodes: srl1: srl2: srl3: links: - endpoints: ["srl1:e1-1", "srl2:e1-1"] - endpoints: ["srl2:e1-2", "srl3:e1-1"] - endpoints: ["srl3:e1-2", "srl1:e1-2"] ================================================ FILE: tests/cleanup.sh ================================================ #!/bin/bash # cleanup rm -f gnmic-rc1 # delete downloaded yang files sudo rm -rf srl-latest-yang-models # destroy lab sudo clab destroy -t clab/$1.clab.yaml --cleanup ================================================ FILE: tests/cluster_checks.sh ================================================ #!/bin/bash source ./cluster_funcs.sh print_clusters ================================================ FILE: tests/cluster_funcs.sh ================================================ #!/bin/bash function check_num_locked_targets() { ## check number of locked targets locked_count=$(consul kv get -recurse gnmic/collectors/targets | wc -l) expected_node_count=$1 if [[ $locked_count -ne $expected_node_count ]] then printf "Number of locked nodes is not %s, it's %s... time to panic\n" $expected_node_count $locked_count exit 1 fi printf "Number of locked nodes : %s\n" $locked_count printf "Expected number of locked nodes : %s\n" $expected_node_count print_clusters } function print_clusters() { printf "Clusters:\n" consul kv get -recurse gnmic | awk -F: '{print $1}' | awk -F/ '{print $2}' | uniq | nl -w1 -s') ' printf "\n" print_single_cluster aggregators print_single_cluster collectors } function get_instance_api_endpoint() { service_instance=$1"-api" res=$(curl -s http://127.0.0.1:8500/v1/agent/services | jq --arg si "$service_instance" '.[$si]' | jq -r '(.Address+ ":" + (.Port|tostring))') protocol="http://" for t in $(curl -s http://127.0.0.1:8500/v1/agent/services | jq --arg si "$service_instance" '.[$si]' | jq -r .Tags[] ) do if [[ "$t" = protocol=* ]] then protocol=$(echo $t | awk -F= '{print $2}') fi done echo $protocol"://"$res } function print_single_cluster() { cluster_name=$1 printf "Cluster name : %s\n" $cluster_name printf "Number of locked nodes : %s\n" $(consul kv get -recurse gnmic/$cluster_name/targets | wc -l) printf "gNMIc cluster leader : %s\n" $(consul kv get -recurse gnmic/$cluster_name/leader | awk -F: '{print $2}') for instance in $(consul kv get -recurse gnmic/$cluster_name/targets | awk -F: '{print $2}' | sort | uniq) do api_endpoint=$(get_instance_api_endpoint $instance) printf "%s:\n" $instance printf "\t API endpoint : %s\n" $api_endpoint printf "\t locked nodes : %s\n" $(get_number_of_locked_nodes $cluster_name $instance) printf "\t nodes in config : %s\n" $(get_number_of_configured_nodes $api_endpoint) printf "\t handled nodes : %s\n" $(get_number_of_handled_nodes $api_endpoint) done printf "Instance to target mapping :\n" consul kv get -recurse gnmic/$cluster_name/targets | awk -F/ '{print $4}' | awk -F: '{print "\t"$2":\t"$1}' | sort | nl -w2 -s')' printf "\n" } function get_number_of_locked_nodes() { cluster_name=$1 instance=$2 echo $(consul kv get -recurse gnmic/$cluster_name/targets | grep $instance | wc -l) } function get_number_of_configured_nodes() { api_endpoint=$1 echo $(curl -s $api_endpoint/api/v1/config/targets | jq -r 'keys[]' | wc -l) } function get_number_of_handled_nodes() { api_endpoint=$1 echo $(curl -s $api_endpoint/api/v1/targets | jq -r 'keys[]' | wc -l) } ================================================ FILE: tests/configs/gnmic1.yaml ================================================ username: admin password: NokiaSrl1! skip-verify: true debug: true subscriptions: sub1: paths: - /system mode: once ================================================ FILE: tests/configs/gnmic2.yaml ================================================ username: admin password: NokiaSrl1! skip-verify: true debug: true address: - clab-test1-srl1 - clab-test1-srl2 - clab-test1-srl3 subscriptions: sub1: paths: - /system mode: once ================================================ FILE: tests/configs/gnmic3.yaml ================================================ username: admin password: NokiaSrl1! skip-verify: true debug: true targets: clab-test1-srl1: clab-test1-srl2: clab-test1-srl3: subscriptions: sub1: paths: - /system mode: once ================================================ FILE: tests/configs/gnmic4.yaml ================================================ username: admin password: NokiaSrl1! skip-verify: true debug: true targets: clab-test1-srl1: clab-test1-srl2: clab-test1-srl3: subscriptions: sub1: paths: - /system mode: once sub2: paths: - /acl mode: once ================================================ FILE: tests/configs/gnmic_env.yaml ================================================ address: $CUSTOM_ADDR skip-verify: $SKIPVER ================================================ FILE: tests/configs/node/interface.json ================================================ { "admin-state": "enable", "description": "dummy_description" } ================================================ FILE: tests/configs/node/interface.yaml ================================================ admin-state: enable description: "dummy description2" ================================================ FILE: tests/configs/node/replace_request_file.yaml ================================================ replaces: - path: /interface[name=ethernet-1/1] value: admin-state: enable description: dummy_description1 encoding: json_ietf - path: /interface[name=ethernet-1/2] value: admin-state: enable description: dummy_description2 encoding: json_ietf ================================================ FILE: tests/configs/node/update_request_file.yaml ================================================ updates: - path: /interface[name=ethernet-1/1] value: admin-state: enable description: dummy_description1 encoding: json_ietf - path: /interface[name=ethernet-1/2] value: admin-state: enable description: dummy_description2 encoding: json_ietf ================================================ FILE: tests/consul_templates/all_services.tpl ================================================ {{ range services -}} {{ .Name }}: {{- range service .Name }} {{ .Address }} {{- end }} {{ end -}} ================================================ FILE: tests/dashboards/gNMIc/gnmic_compute_metrics.json ================================================ { "annotations": { "list": [ { "builtIn": 1, "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "target": { "limit": 100, "matchAny": false, "tags": [], "type": "dashboard" }, "type": "dashboard" } ] }, "editable": true, "gnetId": null, "graphTooltip": 0, "links": [], "panels": [ { "datasource": null, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineStyle": { "fill": "solid" }, "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "always", "spanNulls": true, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "short" }, "overrides": [] }, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 0 }, "id": 16, "options": { "legend": { "calcs": [ "mean", "lastNotNull", "max", "min" ], "displayMode": "table", "placement": "bottom" }, "tooltip": { "mode": "single" } }, "pluginVersion": "8.1.3", "targets": [ { "expr": "process_open_fds", "interval": "", "legendFormat": "{{instance}}", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Open File Descriptors (#)", "type": "timeseries" }, { "datasource": null, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "always", "spanNulls": true, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "short" }, "overrides": [] }, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 0 }, "id": 4, "options": { "legend": { "calcs": [ "mean", "lastNotNull", "max", "min" ], "displayMode": "table", "placement": "bottom" }, "tooltip": { "mode": "single" } }, "pluginVersion": "8.1.3", "targets": [ { "expr": "go_goroutines", "interval": "", "legendFormat": "{{instance}}", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Go Routines (#)", "type": "timeseries" }, { "datasource": null, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "always", "spanNulls": true, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "short" }, "overrides": [] }, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 10 }, "id": 14, "options": { "legend": { "calcs": [ "mean", "lastNotNull", "max", "min" ], "displayMode": "table", "placement": "bottom" }, "tooltip": { "mode": "single" } }, "pluginVersion": "8.1.3", "targets": [ { "expr": "go_memstats_stack_inuse_bytes/1000000", "interval": "", "legendFormat": "{{instance}}", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Memory - Stack In Use (MB)", "type": "timeseries" }, { "datasource": null, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "always", "spanNulls": true, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "short" }, "overrides": [] }, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 10 }, "id": 6, "options": { "legend": { "calcs": [ "mean", "lastNotNull", "max", "min" ], "displayMode": "table", "placement": "bottom" }, "tooltip": { "mode": "single" } }, "pluginVersion": "8.1.3", "targets": [ { "expr": "go_memstats_alloc_bytes/1000000", "interval": "", "legendFormat": "{{instance}} mem alloc", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Memory Alloc (MB)", "type": "timeseries" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 20 }, "hiddenSeries": false, "id": 10, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "8.1.3", "pointradius": 2, "points": true, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "go_memstats_heap_inuse_bytes/1000000", "interval": "", "legendFormat": "{{instance}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Memory - Heap inUse (MB)", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "datasource": null, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "always", "spanNulls": true, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "short" }, "overrides": [] }, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 20 }, "id": 2, "options": { "legend": { "calcs": [ "mean", "lastNotNull", "max", "min" ], "displayMode": "table", "placement": "bottom" }, "tooltip": { "mode": "single" } }, "pluginVersion": "8.1.3", "targets": [ { "expr": "go_gc_duration_seconds*1000", "interval": "", "legendFormat": "{{instance}} quantile={{quantile}}", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Go GC duration (ms)", "type": "timeseries" }, { "datasource": null, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "always", "spanNulls": true, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "short" }, "overrides": [] }, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 30 }, "id": 12, "options": { "legend": { "calcs": [ "mean", "lastNotNull", "max", "min" ], "displayMode": "table", "placement": "bottom" }, "tooltip": { "mode": "single" } }, "pluginVersion": "8.1.3", "targets": [ { "expr": "rate(go_memstats_mallocs_total[1m])/1000000", "interval": "", "legendFormat": "", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Memory malloc MB/s", "type": "timeseries" }, { "datasource": null, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 10, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "always", "spanNulls": true, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] }, "unit": "short" }, "overrides": [] }, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 30 }, "id": 8, "options": { "legend": { "calcs": [ "mean", "lastNotNull", "max", "min" ], "displayMode": "table", "placement": "bottom" }, "tooltip": { "mode": "single" } }, "pluginVersion": "8.1.3", "targets": [ { "expr": "rate(go_memstats_alloc_bytes_total[1m])/1000000", "interval": "", "legendFormat": "{{instance}}", "refId": "A" } ], "timeFrom": null, "timeShift": null, "title": "Memory - alloc MB/s ", "type": "timeseries" } ], "refresh": "10s", "schemaVersion": 30, "style": "dark", "tags": [], "templating": { "list": [] }, "time": { "from": "now-30m", "to": "now" }, "timepicker": {}, "timezone": "", "title": "gNMIc Compute metrics", "uid": "EYxvhi77k", "version": 1 } ================================================ FILE: tests/dashboards/gNMIc/gnmic_grpc_metrics.json ================================================ { "annotations": { "list": [ { "builtIn": 1, "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", "name": "Annotations & Alerts", "target": { "limit": 100, "matchAny": false, "tags": [], "type": "dashboard" }, "type": "dashboard" } ] }, "editable": true, "gnetId": null, "graphTooltip": 0, "links": [], "panels": [ { "datasource": null, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 0 }, "id": 16, "options": { "legend": { "calcs": [ "lastNotNull", "first", "min", "max" ], "displayMode": "table", "placement": "bottom" }, "tooltip": { "mode": "single" } }, "targets": [ { "exemplar": true, "expr": "gnmic_docker_loader_number_of_loaded_targets", "interval": "", "legendFormat": "", "refId": "A" } ], "title": "Docker loader - number of loaded targets", "type": "timeseries" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 0 }, "hiddenSeries": false, "id": 2, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "8.1.3", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "rate(grpc_client_msg_received_total[1m])", "interval": "", "legendFormat": "{{instance}} {{grpc_method}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "gRPC Client Msg Rcv/second", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "datasource": null, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 10 }, "id": 14, "options": { "legend": { "calcs": [ "last" ], "displayMode": "table", "placement": "bottom" }, "tooltip": { "mode": "single" } }, "targets": [ { "exemplar": true, "expr": "rate(gnmic_docker_loader_number_of_docker_list_total[30s])", "interval": "", "legendFormat": "", "refId": "A" } ], "title": "Number of docker loader runs per second", "type": "timeseries" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 10 }, "hiddenSeries": false, "id": 4, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "8.1.3", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "grpc_client_started_total", "interval": "", "legendFormat": "{{instance}} {{grpc_method}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "gRPC Client started", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "datasource": null, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 20 }, "id": 12, "options": { "legend": { "calcs": [], "displayMode": "table", "placement": "bottom" }, "tooltip": { "mode": "single" } }, "targets": [ { "exemplar": true, "expr": "rate(gnmic_subscribe_number_of_received_subscribe_response_messages_total[1m])", "interval": "", "legendFormat": "", "refId": "A" } ], "title": "number of received subscribe response (msg/s)", "type": "timeseries" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 20 }, "hiddenSeries": false, "id": 6, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "8.1.3", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "rate(grpc_client_msg_sent_total[1m])", "interval": "", "legendFormat": "{{instance}} {{grpc_method}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "gRPC Client Msg Sent/s", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } }, { "datasource": null, "fieldConfig": { "defaults": { "color": { "mode": "palette-classic" }, "custom": { "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false }, "lineInterpolation": "linear", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "always", "spanNulls": false, "stacking": { "group": "A", "mode": "none" }, "thresholdsStyle": { "mode": "off" } }, "mappings": [], "thresholds": { "mode": "absolute", "steps": [ { "color": "green", "value": null }, { "color": "red", "value": 80 } ] } }, "overrides": [] }, "gridPos": { "h": 10, "w": 12, "x": 0, "y": 30 }, "id": 10, "options": { "legend": { "calcs": [ "lastNotNull", "first", "min", "max" ], "displayMode": "table", "placement": "bottom" }, "tooltip": { "mode": "single" } }, "targets": [ { "exemplar": true, "expr": "gnmic_cluster_number_of_locked_targets", "interval": "", "legendFormat": "", "refId": "A" } ], "title": "Cluster - number of locked targets (#)", "type": "timeseries" }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": null, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 12, "y": 30 }, "hiddenSeries": false, "id": 8, "legend": { "alignAsTable": true, "avg": true, "current": true, "max": true, "min": true, "show": true, "total": false, "values": true }, "lines": true, "linewidth": 1, "nullPointMode": "null", "options": { "alertThreshold": true }, "percentage": false, "pluginVersion": "8.1.3", "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { "expr": "grpc_server_started_total", "interval": "", "legendFormat": "{{instance}} {{grpc_method}}", "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "gRPC Server Started", "tooltip": { "shared": true, "sort": 0, "value_type": "individual" }, "type": "graph", "xaxis": { "buckets": null, "mode": "time", "name": null, "show": true, "values": [] }, "yaxes": [ { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { "format": "short", "label": null, "logBase": 1, "max": null, "min": null, "show": true } ], "yaxis": { "align": false, "alignLevel": null } } ], "schemaVersion": 30, "style": "dark", "tags": [], "templating": { "list": [] }, "time": { "from": "now-30m", "to": "now" }, "timepicker": {}, "timezone": "", "title": "gNMIc gRPC metrics", "uid": "9W_Qzi7nz", "version": 1 } ================================================ FILE: tests/deploy.sh ================================================ #!/bin/bash trap 'failure ${LINENO} "$BASH_COMMAND"' ERR # printf "Installing containerlab...\n" # bash -c "$(curl -sL https://get-clab.srlinux.dev)" sudo clab version printf "\n" printf "Deploying lab $1\n" sudo clab deploy -t clab/$1.clab.yaml --reconfigure ================================================ FILE: tests/env_vars.sh ================================================ #!/bin/bash trap 'failure ${LINENO} "$BASH_COMMAND"' ERR targets=clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 # create read only role ./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify --debug -a $targets -e json_ietf \ set \ --update-path /system/aaa/authorization \ --update-value '{"role": {"rolename":"readonly"}}' # craete readonly role ./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify --debug -a $targets -e json_ietf \ set \ --update-path /system/configuration/role[name=readonly]/rule[path-reference="/"]/action \ --update-value "read" \ --update-path /system/aaa/authorization/role[rolename=readonly] \ --update-value '{"services": ["gnmi"]}' # create a new user ./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify --debug -a $targets -e json_ietf \ set \ --update-path /system/aaa/authentication/user[username=user1]/password \ --update-value "|Bo|Z%TYe*&\$P33~" # assign readonly role to the new user ./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify --debug -a $targets -e json_ietf \ set \ --update-path /system/aaa/authentication/user[username=user1] \ --update-value '{"role": ["readonly"]}' # check user1 has access ./gnmic-rc1 -u user1 -p '|Bo|Z%TYe*&$P33~' --skip-verify --debug -a $targets -e json_ietf \ get \ --path /system/name # password from ENV GNMIC_PASSWORD="|Bo|Z%TYe*&\$P33~" ./gnmic-rc1 -u user1 --skip-verify --debug -a $targets -e json_ietf \ get \ --path /system/name # Username from ENV GNMIC_USERNAME=user1 ./gnmic-rc1 -p '|Bo|Z%TYe*&$P33~' --skip-verify --debug -a $targets -e json_ietf \ get \ --path /system/name # both username and password from env GNMIC_USERNAME=user1 GNMIC_PASSWORD='|Bo|Z%TYe*&$P33~' ./gnmic-rc1 --skip-verify --debug -a $targets -e json_ietf \ get \ --path /system/name # username, password and debug from env GNMIC_USERNAME=user1 GNMIC_PASSWORD='|Bo|Z%TYe*&$P33~' GNMIC_DEBUG=true ./gnmic-rc1 --skip-verify -a $targets -e json_ietf \ get \ --path /system/name # all global flags from env GNMIC_USERNAME=user1 GNMIC_PASSWORD='|Bo|Z%TYe*&$P33~' GNMIC_DEBUG=true GNMIC_SKIP_VERIFY=true GNMIC_ENCODING=json_ietf GNMIC_ADDRESS=$targets ./gnmic-rc1 \ get \ --path /system/name ## config file expansion CUSTOM_ADDR=$targets GNMIC_USERNAME=user1 GNMIC_PASSWORD='|Bo|Z%TYe*&$P33~' GNMIC_SKIP_VERIFY=true GNMIC_ENCODING=json_ietf ./gnmic-rc1 --config configs/gnmic_env.yaml --debug \ get \ --path /system/name CUSTOM_ADDR=$targets GNMIC_USERNAME=user1 GNMIC_PASSWORD='|Bo|Z%TYe*&$P33~' GNMIC_SKIP_VERIFY=true SKIPVER=false GNMIC_ENCODING=json_ietf ./gnmic-rc1 --config configs/gnmic_env.yaml --debug \ get \ --path /system/dns ================================================ FILE: tests/generate_cmd.sh ================================================ #!/bin/bash trap 'failure ${LINENO} "$BASH_COMMAND"' ERR DIR_NAME="$(pwd)/srl-latest-yang-models" docker pull ghcr.io/nokia/srlinux id=$(docker create ghcr.io/nokia/srlinux) mkdir -p $DIR_NAME sudo docker cp $id:/opt/srlinux/models/. $DIR_NAME sudo docker rm $id ls -l srl-latest-yang-models sudo sed -i 's|modifier "invert-match";|//modifier "invert-match";|g' srl-latest-yang-models/srl_nokia/models/common/srl_nokia-common.yang ./gnmic-rc1 generate --path /interface/subinterface --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude ".tools." ./gnmic-rc1 generate --path /interface/subinterface --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude ".tools." --camel-case ./gnmic-rc1 generate --path /interface/subinterface --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude ".tools." --snake-case ./gnmic-rc1 generate --path /network-instance/protocols/bgp --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude ".tools." #./gnmic-rc1 generate --path /network-instance/protocols/bgp --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models/ietf --exclude ".tools." --camel-case ./gnmic-rc1 generate --path /network-instance/protocols/bgp --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude ".tools." --snake-case ./gnmic-rc1 generate --path /interface/subinterface --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude ".tools." --config-only ./gnmic-rc1 generate --path /interface/subinterface --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude ".tools." --config-only --camel-case ./gnmic-rc1 generate --path /interface/subinterface --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --exclude ".tools." --config-only --snake-case ================================================ FILE: tests/generate_path_cmd.sh ================================================ #!/bin/bash trap 'failure ${LINENO} "$BASH_COMMAND"' ERR ./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models ./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --path-type gnmi ./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --config-only ./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --with-prefix ./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --types ./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --json ./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --json --config-only ./gnmic-rc1 generate path --file srl-latest-yang-models/srl_nokia/models --dir srl-latest-yang-models --json --path-type gnmi ================================================ FILE: tests/get_cmd.sh ================================================ #!/bin/bash trap 'failure ${LINENO} "$BASH_COMMAND"' ERR # get $gnmic_base_cmd -a clab-test1-srl1 -e json_ietf get \ --path /system/name/host-name $gnmic_base_cmd -a clab-test1-srl2 -e json_ietf get \ --path /system/name/host-name $gnmic_base_cmd -a clab-test1-srl3 -e json_ietf get \ --path /system/name/host-name # get multi paths $gnmic_base_cmd -a clab-test1-srl1 -e json_ietf get \ --path /system/name/host-name \ --path /system/gnmi-server $gnmic_base_cmd -a clab-test1-srl2 -e json_ietf get \ --path /system/name/host-name \ --path /system/gnmi-server $gnmic_base_cmd -a clab-test1-srl3 -e json_ietf get \ --path /system/name/host-name \ --path /system/gnmi-server # comma separated paths $gnmic_base_cmd -a clab-test1-srl1 -e json_ietf get \ --path /system/name/host-name \ --path /system/gnmi-server $gnmic_base_cmd -a clab-test1-srl2 -e json_ietf get \ --path /system/name/host-name \ --path /system/gnmi-server $gnmic_base_cmd -a clab-test1-srl3 -e json_ietf get \ --path /system/name/host-name \ --path /system/gnmi-server # get multi hosts $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf get \ --path /system/name/host-name $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf get \ --path /system/name/host-name # get multi hosts and paths $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf get \ --path /system/name/host-name \ --path /system/gnmi-server $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf get \ --path /system/name/host-name \ --path /system/gnmi-server $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf get \ --path /system/name/host-name \ --path /system/gnmi-server $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf get \ --path /system/name/host-name \ --path /system/gnmi-server ================================================ FILE: tests/loaders.sh ================================================ #!/bin/bash export SHELLOPTS set -eET failure() { local lineno=$1 local msg=$2 echo "Failed at line $lineno: $msg" } export -f failure function cleanup() { echo "gnmic_config_file: gnmic-docker-loader.yaml" > clab/loaders/loaders.clab_vars.yaml sudo clab des --cleanup -t clab/loaders/loaders.clab.yaml docker image prune -f } trap 'failure ${LINENO} "$BASH_COMMAND"' ERR trap cleanup EXIT trap cleanup SIGINT # build docker image docker build -t gnmic:0.0.0-rc1 ../ start=`date +%s` # docker loader echo "gnmic_config_file: gnmic-docker-loader.yaml" > clab/loaders/loaders.clab_vars.yaml sudo clab dep -t clab/loaders/loaders.clab.yaml --reconfigure sleep 45 sudo clab des -t clab/loaders/loaders.clab.yaml --cleanup # file loader # change gnmic config file echo "gnmic_config_file: gnmic-file-loader.yaml" > clab/loaders/loaders.clab_vars.yaml # deploy lab with file loader echo "clab-loaders-srl1:" > ./clab/loaders/targets/targets.yaml echo "clab-loaders-srl2:" >> ./clab/loaders/targets/targets.yaml echo "clab-loaders-srl3:" >> ./clab/loaders/targets/targets.yaml sudo clab dep -t clab/loaders/loaders.clab.yaml --reconfigure sleep 45 ./api.sh clab-loaders-gnmic1:7890 ./api.sh clab-loaders-gnmic2:7891 ./api.sh clab-loaders-gnmic3:7892 ./api.sh clab-loaders-agg-gnmic1:7893 ./api.sh clab-loaders-agg-gnmic2:7894 ./api.sh clab-loaders-agg-gnmic3:7895 echo "clab-loaders-srl1:" > ./clab/loaders/targets/targets.yaml echo "clab-loaders-srl2:" >> ./clab/loaders/targets/targets.yaml sleep 45 ./api.sh clab-loaders-gnmic1:7890 ./api.sh clab-loaders-gnmic2:7891 ./api.sh clab-loaders-gnmic3:7892 ./api.sh clab-loaders-agg-gnmic1:7893 ./api.sh clab-loaders-agg-gnmic2:7894 ./api.sh clab-loaders-agg-gnmic3:7895 echo "clab-loaders-srl1:" > ./clab/loaders/targets/targets.yaml echo "clab-loaders-srl3:" >> ./clab/loaders/targets/targets.yaml sleep 45 ./api.sh clab-loaders-gnmic1:7890 ./api.sh clab-loaders-gnmic2:7891 ./api.sh clab-loaders-gnmic3:7892 ./api.sh clab-loaders-agg-gnmic1:7893 ./api.sh clab-loaders-agg-gnmic2:7894 ./api.sh clab-loaders-agg-gnmic3:7895 sudo clab des -t clab/loaders/loaders.clab.yaml --cleanup ================================================ FILE: tests/metrics/gnmic.yaml ================================================ username: admin password: NokiaSrl1! skip-verify: true encoding: json_ietf log: true loader: type: docker debug: true enable-metrics: true filters: - containers: - label=clab-node-kind: srl label=containerlab: metrics subscriptions: # Add subscriptions configuration here # e.g: sub1: paths: - /interface/statistics stream-mode: sample sample-interval: 10s sub2: paths: - /interface/subinterface/statistics stream-mode: sample sample-interval: 10s api-server: enable-metrics: true # clustering config clustering: cluster-name: collectors targets-watch-timer: 60s locker: type: consul address: clab-metrics-consul-agent:8500 outputs: collector: type: prometheus service-registration: address: clab-metrics-consul-agent:8500 event-processors: - trim-prefixes processors: trim-prefixes: event-strings: value-names: - ".*" transforms: - path-base: apply-on: "name" ================================================ FILE: tests/metrics/grafana/dashboards.yaml ================================================ apiVersion: 1 providers: - name: 'gNMIc Internal Metrics' orgId: 1 folder: '' type: file disableDeletion: false editable: true options: path: /var/lib/grafana/dashboards foldersFromFilesStructure: true ================================================ FILE: tests/metrics/grafana/datasources/datasource.yaml ================================================ apiVersion: 1 deleteDatasources: - name: Prometheus orgId: 1 datasources: - name: Prometheus type: prometheus orgId: 1 url: http://clab-metrics-prometheus:9090 password: user: database: basicAuth: false basicAuthUser: basicAuthPassword: withCredentials: isDefault: true version: 1 editable: true ================================================ FILE: tests/metrics/metrics.clab.yaml ================================================ name: metrics topology: defaults: kind: linux image: gnmic:0.0.0-rc1 kinds: srl: image: ghcr.io/nokia/srlinux nodes: {{- range $id := seq 1 9}} srl{{ $id }}: kind: srl {{- end }} gnmic1: binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7890:7890 - 9804:9804 env: GNMIC_API: :7890 GNMIC_CLUSTERING_INSTANCE_NAME: clab-metrics-gnmic1 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-metrics-gnmic1 GNMIC_GNMI_SERVER_ADDRESS: clab-metrics-gnmic1:57400 GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-metrics-gnmic1 GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-metrics-gnmic1:9804 gnmic2: binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7891:7891 - 9805:9805 env: GNMIC_API: :7891 GNMIC_CLUSTERING_INSTANCE_NAME: clab-metrics-gnmic2 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-metrics-gnmic2 GNMIC_GNMI_SERVER_ADDRESS: clab-metrics-gnmic2:57400 GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-metrics-gnmic2 GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-metrics-gnmic2:9805 gnmic3: binds: - ./gnmic.yaml:/app/gnmic.yaml:ro - /var/run/docker.sock:/var/run/docker.sock cmd: '--config /app/gnmic.yaml subscribe' ports: - 7892:7892 - 9806:9806 env: GNMIC_API: :7892 GNMIC_CLUSTERING_INSTANCE_NAME: clab-metrics-gnmic3 GNMIC_CLUSTERING_SERVICE_ADDRESS: clab-metrics-gnmic3 GNMIC_GNMI_SERVER_ADDRESS: clab-metrics-gnmic3:57400 GNMIC_GNMI_SERVER_SERVICE_REGISTRATION_NAME: clab-metrics-gnmic3 GNMIC_OUTPUTS_COLLECTOR_LISTEN: clab-metrics-gnmic3:9806 consul-agent: image: consul:latest ports: - 8500:8500 - 8600:8600/udp cmd: 'agent -server -ui -bind=127.0.0.1 -node=server-1 -bootstrap-expect=1 -client=0.0.0.0' prometheus: image: prom/prometheus:latest user: 65534:65534 ports: - 9090:9090 binds: - ./prometheus/:/etc/prometheus/ cmd: | --config.file=/etc/prometheus/prometheus.yaml --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles --log.level=debug grafana: image: grafana/grafana:latest binds: - grafana/datasources/datasource.yaml:/etc/grafana/provisioning/datasources/datasource.yaml:ro - grafana/dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:ro - ../dashboards/:/var/lib/grafana/dashboards ports: - 3000:3000 ================================================ FILE: tests/metrics/prometheus/prometheus.yaml ================================================ global: scrape_interval: 10s evaluation_interval: 10s scrape_configs: - job_name: 'gnmic' scrape_interval: 10s consul_sd_configs: - server: clab-metrics-consul-agent:8500 services: - collectors-gnmic-api - prometheus-collector ================================================ FILE: tests/metrics/run.sh ================================================ #!/bin/bash case "$1" in "build") docker build -t gnmic:0.0.0-rc1 ../../ esac sudo clab dep -t metrics.clab.yaml --reconfigure sleep 60 curl http://clab-metrics-gnmic1:7890/metrics curl http://clab-metrics-gnmic2:7891/metrics curl http://clab-metrics-gnmic3:7892/metrics ================================================ FILE: tests/run.sh ================================================ #!/bin/bash export SHELLOPTS set -eET failure() { local lineno=$1 local msg=$2 echo "Failed at line $lineno: $msg" } export -f failure function cleanup() { printf "cleaning up...\n" ./cleanup.sh test_lab1 } export -f cleanup function contains() { if [[ "$1" != *"$2"* ]]; then exit 1 fi } export -f contains trap 'failure ${LINENO} "$BASH_COMMAND"' ERR trap cleanup EXIT trap cleanup SIGINT export gnmic_base_cmd="./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify --debug" function buildgNMIc() { printf "Building gnmic...\n" CGO_ENABLED=0 go build -o gnmic-rc1 -ldflags="-s -w -X 'github.com/openconfig/gnmic/pkg/version.Commit=$(git rev-parse --short HEAD)' -X 'github.com/openconfig/gnmic/pkg/version.Date=$(date)'" ../ } start=`date +%s` case "$1" in "all") # build gnmic buildgNMIc # run version cmd ./version_cmd.sh # run generate cmd ./generate_cmd.sh # run generate path cmd ./generate_path_cmd.sh # deploy basic 3 nodes lab ./deploy.sh test_lab1 # run capabilities cmd tests ./capabilities_cmd.sh # run get cmd tests ./get_cmd.sh # redeploy to avoid getting error: rpc error: code = ResourceExhausted desc = Max number of operations per minute (rate-limit) reached (max: 60) ./deploy.sh test_lab1 # run set md tests ./set_cmd.sh # redeploy to avoid getting error: rpc error: code = ResourceExhausted desc = Max number of operations per minute (rate-limit) reached (max: 60) ./deploy.sh test_lab1 # run subscribe once cmd tests ./subscribe_once_cmd.sh # cleanup test_lab1 cleanup test_lab1 # run loaders tests ./loaders.sh ;; "version") # build gnmic buildgNMIc # run version cmd ./version_cmd.sh ;; "generate") # build gnmic buildgNMIc # run generate cmd ./generate_cmd.sh ./generate_path_cmd.sh ;; "cap") # build gnmic buildgNMIc # deploy basic 3 nodes lab ./deploy.sh test_lab1 # run capabilities cmd tests ./capabilities_cmd.sh ;; "get") # build gnmic buildgNMIc # deploy basic 3 nodes lab ./deploy.sh test_lab1 # run get cmd tests ./get_cmd.sh ;; "set") # build gnmic buildgNMIc # deploy basic 3 nodes lab ./deploy.sh test_lab1 # run set md tests ./set_cmd.sh ;; "sub") # build gnmic buildgNMIc # deploy basic 3 nodes lab ./deploy.sh test_lab1 # run sub tests ./subscribe_once_cmd.sh ;; "env") # build gnmic buildgNMIc # deploy basic 3 nodes lab ./deploy.sh test_lab1 # run sub tests ./env_vars.sh ;; "loaders") ./loaders.sh ;; *) echo "./run.sh [ all | version | generate | cap | get | set | sub | loaders ]" exit 1 ;; esac # calculate runtime end=`date +%s` runtime=$((end-start)) printf "runtime=%ss\n" $runtime ================================================ FILE: tests/run_tests.sh ================================================ #!/bin/bash set -e function testmodule { cd $1 go test -cover ./... -v -count=1 cd $SCRIPTPATH/.. } declare -a modules=("." "pkg/api" "pkg/cache") SCRIPTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" cd $SCRIPTPATH/.. for i in "${modules[@]}" do echo "Running tests for module $i" testmodule "$i" done ================================================ FILE: tests/set_cmd.sh ================================================ #!/bin/bash trap 'failure ${LINENO} "$BASH_COMMAND"' ERR ######### ## SET ## ######### ################# ## SET REPLACE ## ################# ### set replace with value #### single host ######################## gnmic_base_cmd="./gnmic-rc1 -u admin -p NokiaSrl1! --skip-verify" $gnmic_base_cmd -a clab-test1-srl1 set \ -e json_ietf \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 out=$($gnmic_base_cmd -a clab-test1-srl1 get -e json_ietf \ --path /interface[name=ethernet-1/1]/description | jq -r '.[].updates[].values."srl_nokia-interfaces:interface/description"') contains $out "e1-1_dummy_desc1" ######################## $gnmic_base_cmd -a clab-test1-srl1 set \ --delimiter "CUSTOM_DELIMITER" \ --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1 set -e json_ietf \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl2 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl2 set \ --delimiter "CUSTOM_DELIMITER" \ --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl2 set -e json_ietf \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl3 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl3 set \ --delimiter "CUSTOM_DELIMITER" \ --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl3 set -e json_ietf \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 #### multi hosts $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf set \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set \ --delimiter "CUSTOM_DELIMITER" \ --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf set \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 ### set replace with multiple values #### single host $gnmic_base_cmd -a clab-test1-srl1 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \ --replace /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1 set \ --delimiter "CUSTOM_DELIMITER" \ --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1 \ --replace /interface[name=ethernet-1/2]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-2_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1 -e json_ietf set \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 \ --replace-path /interface[name=ethernet-1/2]/description \ --replace-value e1-2_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl2 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \ --replace /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl2 set \ --delimiter "CUSTOM_DELIMITER" \ --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1 \ --replace /interface[name=ethernet-1/2]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-2_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl2 -e json_ietf set \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 \ --replace-path /interface[name=ethernet-1/2]/description \ --replace-value e1-2_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl3 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \ --replace /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl3 set \ --delimiter "CUSTOM_DELIMITER" \ --replace /interface[name=ethernet-1/1]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1 \ --replace /interface[name=ethernet-1/2]/descriptionCUSTOM_DELIMITERjson_ietfCUSTOM_DELIMITERe1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl3 -e json_ietf set \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 \ --replace-path /interface[name=ethernet-1/2]/description \ --replace-value e1-2_dummy_desc1 #### multi hosts $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf set \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf set \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 ### set replace with file #### JSON file ##### single host cat configs/node/interface.json $gnmic_base_cmd -a clab-test1-srl1 -e json_ietf -d set \ --replace-path /interface[name=ethernet-1/1] \ --replace-file configs/node/interface.json $gnmic_base_cmd -a clab-test1-srl2 -e json_ietf -d set \ --replace-path /interface[name=ethernet-1/1] \ --replace-file configs/node/interface.json $gnmic_base_cmd -a clab-test1-srl3 -e json_ietf -d set \ --replace-path /interface[name=ethernet-1/1] \ --replace-file configs/node/interface.json ##### multi hosts $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf -d set \ --replace-path /interface[name=ethernet-1/1] \ --replace-file configs/node/interface.json $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf -d set \ --replace-path /interface[name=ethernet-1/1] \ --replace-file configs/node/interface.json #### YAML file ##### single host cat configs/node/interface.yaml $gnmic_base_cmd -a clab-test1-srl1 -e json_ietf -d set \ --replace-path /interface[name=ethernet-1/1] \ --replace-file configs/node/interface.yaml $gnmic_base_cmd -a clab-test1-srl2 -e json_ietf -d set \ --replace-path /interface[name=ethernet-1/1] \ --replace-file configs/node/interface.yaml $gnmic_base_cmd -a clab-test1-srl3 -e json_ietf -d set \ --replace-path /interface[name=ethernet-1/1] \ --replace-file configs/node/interface.yaml ##### multi hosts $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -e json_ietf -d set \ --replace-path /interface[name=ethernet-1/1] \ --replace-file configs/node/interface.yaml $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -e json_ietf -d set \ --replace-path /interface[name=ethernet-1/1] \ --replace-file configs/node/interface.yaml ### set replace with request file $gnmic_base_cmd -a clab-test1-srl1 set --request-file configs/node/replace_request_file.yaml $gnmic_base_cmd -a clab-test1-srl2 set --request-file configs/node/replace_request_file.yaml $gnmic_base_cmd -a clab-test1-srl3 set --request-file configs/node/replace_request_file.yaml $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --request-file configs/node/replace_request_file.yaml $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --request-file configs/node/replace_request_file.yaml ################ ## SET UPDATE ## ################ ### set update with value #### single host $gnmic_base_cmd -a clab-test1-srl1 set --update /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1 set --update-path /interface[name=ethernet-1/1]/description --update-value e1-1_dummy_desc2 -e json_ietf $gnmic_base_cmd -a clab-test1-srl2 set --update /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl2 set --update-path /interface[name=ethernet-1/1]/description --update-value e1-1_dummy_desc2 -e json_ietf $gnmic_base_cmd -a clab-test1-srl3 set --update /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl3 set --update-path /interface[name=ethernet-1/1]/description --update-value e1-1_dummy_desc2 -e json_ietf #### multi hosts $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --update /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --update-path /interface[name=ethernet-1/1]/description --update-value e1-1_dummy_desc1 -e json_ietf $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --update /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --update-path /interface[name=ethernet-1/1]/description --update-value e1-1_dummy_desc1 -e json_ietf ### set update with file #### JSON file ##### single host cat configs/node/interface.json $gnmic_base_cmd -a clab-test1-srl1 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.json -e json_ietf $gnmic_base_cmd -a clab-test1-srl2 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.json -e json_ietf $gnmic_base_cmd -a clab-test1-srl3 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.json -e json_ietf ##### multi hosts $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.json -e json_ietf $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.json -e json_ietf #### YAML file ##### single host cat configs/node/interface.yaml $gnmic_base_cmd -a clab-test1-srl1 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.yaml -e json_ietf $gnmic_base_cmd -a clab-test1-srl2 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.yaml -e json_ietf $gnmic_base_cmd -a clab-test1-srl3 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.yaml -e json_ietf ##### multi hosts $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.yaml -e json_ietf $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --update-path /interface[name=ethernet-1/1] --update-file configs/node/interface.yaml -e json_ietf ### set update with request file $gnmic_base_cmd -a clab-test1-srl1 set --request-file configs/node/update_request_file.yaml $gnmic_base_cmd -a clab-test1-srl2 set --request-file configs/node/update_request_file.yaml $gnmic_base_cmd -a clab-test1-srl3 set --request-file configs/node/update_request_file.yaml $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --request-file configs/node/replace_request_file.yaml $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --request-file configs/node/replace_request_file.yaml ## delete ### single host $gnmic_base_cmd -a clab-test1-srl1 set --delete /interface[name=ethernet-1/1]/description $gnmic_base_cmd -a clab-test1-srl2 set --delete /interface[name=ethernet-1/1]/description $gnmic_base_cmd -a clab-test1-srl3 set --delete /interface[name=ethernet-1/1]/description ### multi hosts $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set --delete /interface[name=ethernet-1/1]/description $gnmic_base_cmd -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 set --delete /interface[name=ethernet-1/1]/description ## combined update, replace and delete ### combined set with value $gnmic_base_cmd -a clab-test1-srl1 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \ --update /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 \ --delete /interface[name=ethernet-1/1]/admin-state $gnmic_base_cmd -a clab-test1-srl2 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \ --update /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 \ --delete /interface[name=ethernet-1/1]/admin-state $gnmic_base_cmd -a clab-test1-srl3 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \ --update /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 \ --delete /interface[name=ethernet-1/1]/admin-state # reset $gnmic_base_cmd -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 set \ --delete /interface[name=ethernet-1/1]/description \ --delete /interface[name=ethernet-1/2]/description $gnmic_base_cmd -a clab-test1-srl1 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \ --replace /interface[name=ethernet-1/1]/subinterface[index=0]/description:::json_ietf:::e1-1.0_dummy_desc1 \ --update /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 \ --update /interface[name=ethernet-1/2]/subinterface[index=0]/description:::json_ietf:::e1-2._dummy_desc1 \ --delete /interface[name=ethernet-1/1]/admin-state \ --delete /interface[name=ethernet-1/2]/admin-state $gnmic_base_cmd -a clab-test1-srl2 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \ --replace /interface[name=ethernet-1/1]/subinterface[index=0]/description:::json_ietf:::e1-1.0_dummy_desc1 \ --update /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 \ --update /interface[name=ethernet-1/2]/subinterface[index=0]/description:::json_ietf:::e1-2._dummy_desc1 \ --delete /interface[name=ethernet-1/1]/admin-state \ --delete /interface[name=ethernet-1/2]/admin-state $gnmic_base_cmd -a clab-test1-srl3 set \ --replace /interface[name=ethernet-1/1]/description:::json_ietf:::e1-1_dummy_desc1 \ --replace /interface[name=ethernet-1/1]/subinterface[index=0]/description:::json_ietf:::e1-1.0_dummy_desc1 \ --update /interface[name=ethernet-1/2]/description:::json_ietf:::e1-2_dummy_desc1 \ --update /interface[name=ethernet-1/2]/subinterface[index=0]/description:::json_ietf:::e1-2._dummy_desc1 \ --delete /interface[name=ethernet-1/1]/admin-state \ --delete /interface[name=ethernet-1/2]/admin-state $gnmic_base_cmd -a clab-test1-srl1 set -e json_ietf \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 \ --replace-path /interface[name=ethernet-1/1]/subinterface[index=0]/description \ --replace-value e1-1.0_dummy_desc1 \ --update-path /interface[name=ethernet-1/2]/description \ --update-value e1-2_dummy_desc1 \ --update-path /interface[name=ethernet-1/2]/subinterface[index=0]/description \ --update-value e1-2.0_dummy_desc1 \ --delete /interface[name=ethernet-1/1]/admin-state \ --delete /interface[name=ethernet-1/2]/admin-state $gnmic_base_cmd -a clab-test1-srl2 set -e json_ietf \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 \ --replace-path /interface[name=ethernet-1/1]/subinterface[index=0]/description \ --replace-value e1-1.0_dummy_desc1 \ --update-path /interface[name=ethernet-1/2]/description \ --update-value e1-2_dummy_desc1 \ --update-path /interface[name=ethernet-1/2]/subinterface[index=0]/description \ --update-value e1-2.0_dummy_desc1 \ --delete /interface[name=ethernet-1/1]/admin-state \ --delete /interface[name=ethernet-1/2]/admin-state $gnmic_base_cmd -a clab-test1-srl3 set -e json_ietf \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 \ --replace-path /interface[name=ethernet-1/1]/subinterface[index=0]/description \ --replace-value e1-1.0_dummy_desc1 \ --update-path /interface[name=ethernet-1/2]/description \ --update-value e1-2_dummy_desc1 \ --update-path /interface[name=ethernet-1/2]/subinterface[index=0]/description \ --update-value e1-2.0_dummy_desc1 \ --delete /interface[name=ethernet-1/1]/admin-state \ --delete /interface[name=ethernet-1/2]/admin-state \ --dry-run $gnmic_base_cmd -a clab-test1-srl3 set -e json_ietf \ --replace-path /interface[name=ethernet-1/1]/description \ --replace-value e1-1_dummy_desc1 \ --replace-path /interface[name=ethernet-1/1]/subinterface[index=0]/description \ --replace-value e1-1.0_dummy_desc1 \ --update-path /interface[name=ethernet-1/2]/description \ --update-value e1-2_dummy_desc1 \ --update-path /interface[name=ethernet-1/2]/subinterface[index=0]/description \ --update-value e1-2.0_dummy_desc1 \ --delete /interface[name=ethernet-1/1]/admin-state \ --delete /interface[name=ethernet-1/2]/admin-state ================================================ FILE: tests/subscribe_once_cmd.sh ================================================ #!/bin/bash trap 'failure ${LINENO} "$BASH_COMMAND"' ERR # single host, single path ./gnmic-rc1 -a clab-test1-srl1 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name ./gnmic-rc1 -a clab-test1-srl2 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name ./gnmic-rc1 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name # multiple hosts, single path ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name # ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name # multiple hosts, multiple paths ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name \ --path /interface[name=*] ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name \ --path /interface[name=*] ./gnmic-rc1 -a clab-test1-srl1 -a clab-test1-srl2 -a clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name \ --path /interface[name=*] # ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name \ --path /interface[name=*] ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name \ --path /interface[name=*] ./gnmic-rc1 -a clab-test1-srl1,clab-test1-srl2,clab-test1-srl3 -u admin -p NokiaSrl1! --skip-verify -d -e ascii subscribe \ --mode once \ --path /system/name \ --path /interface[name=*] # subscription config from file ./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml subscribe ./gnmic-rc1 -a clab-test1-srl2 --config configs/gnmic1.yaml subscribe ./gnmic-rc1 -a clab-test1-srl3 --config configs/gnmic1.yaml subscribe ./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml subscribe --format prototext ./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml subscribe --format protojson ./gnmic-rc1 -a clab-test1-srl1 --config configs/gnmic1.yaml subscribe --format event # hosts and config from file ./gnmic-rc1 --config configs/gnmic2.yaml subscribe ./gnmic-rc1 --config configs/gnmic2.yaml subscribe --format prototext ./gnmic-rc1 --config configs/gnmic2.yaml subscribe --format protojson ./gnmic-rc1 --config configs/gnmic2.yaml subscribe --format event # nodes from targets field ./gnmic-rc1 --config configs/gnmic3.yaml subscribe ./gnmic-rc1 --config configs/gnmic3.yaml subscribe --format prototext ./gnmic-rc1 --config configs/gnmic3.yaml subscribe --format protojson ./gnmic-rc1 --config configs/gnmic3.yaml subscribe --format event # multiple once subscriptions ./gnmic-rc1 --config configs/gnmic4.yaml subscribe ./gnmic-rc1 --config configs/gnmic4.yaml subscribe --format prototext ./gnmic-rc1 --config configs/gnmic4.yaml subscribe --format protojson ./gnmic-rc1 --config configs/gnmic4.yaml subscribe --format event ================================================ FILE: tests/telemetry_labs.sh ================================================ #!/bin/bash export SHELLOPTS set -eET failure() { local lineno=$1 local msg=$2 echo "Failed at line $lineno: $msg" } NUM_LABS=5 NUM_NODES_PER_LAB=14 export -f failure function cleanup() { printf "cleaning up...\n" cd clab/telemetry sudo clab destroy -t telemetry.clab.yaml --cleanup cd ../.. # for i in `seq 1 $NUM_LABS` do printf "destroying lab clab/lab%s.clab.yaml\n" $i sudo clab destroy -t clab/lab$i.clab.yaml --cleanup rm clab/lab$i.clab.yaml rm -rf .lab$i.clab.yaml done } source ./cluster_funcs.sh trap 'failure ${LINENO} "$BASH_COMMAND"' ERR trap cleanup EXIT trap cleanup SIGINT start=`date +%s` # generate lab files for i in `seq 1 $NUM_LABS` do echo 'ID: ' $i | gomplate -f clab/labN.clab.yaml -d data=stdin:///id.yaml -o clab/lab${i}.clab.yaml done # destroy labs if they are still up. for i in `seq 1 $NUM_LABS` do sudo clab destroy -t clab/lab${i}.clab.yaml --cleanup done # build docker image docker build -t gnmic:0.0.0-rc1 ../ # deploy telemetry lab echo "" cd clab/telemetry sudo clab deploy -t telemetry.clab.yaml --reconfigure cd ../.. echo "" # check all containers are running container_count=$(docker ps -f label=containerlab=telemetry -q | wc -l) if [ $container_count -ne 13 ] then printf "Number of telemetry containers is not 13, it's %s... time to panic\n" $container_count exit 1 fi printf "Found %s running containers\n" $container_count echo "" echo "Waiting for services to register..." sleep 30 printf "Consul services:\n" consul catalog services -tags echo "" printf "Consul services to instances:\n" consul-template -template="consul_templates/all_services.tpl:all_services.txt" -once cat all_services.txt rm all_services.txt ################################## # Deploying labs with SRL nodes # ################################## while true do printf "Waiting a bit before deploying the nodes\n" echo "" sleep 10 for i in `seq 1 $NUM_LABS` do echo "Deploying lab" $i sudo clab deploy -t clab/lab${i}.clab.yaml --reconfigure done echo "" sleep 30 nodes_count=$(docker ps -f label=clab-node-kind=srl -f label=test=telemetry -q | wc -l) printf "Found %s running SRL nodes\n" $nodes_count printf "Waiting for the next docker loader run before checking the number of locked targets...\n" sleep 30 check_num_locked_targets $(($NUM_NODES_PER_LAB * $NUM_LABS)) sleep 60 echo "Running API calls..." ./api.sh clab-telemetry-gnmic1:7890 ./api.sh clab-telemetry-gnmic2:7891 ./api.sh clab-telemetry-gnmic3:7892 ./api.sh clab-telemetry-agg-gnmic1:7893 ./api.sh clab-telemetry-agg-gnmic2:7894 ./api.sh clab-telemetry-agg-gnmic3:7895 echo "" #start adding and removing labs echo "Waiting a bit before starting to add and remove labs..." sleep 10 ## remove 2 labs sudo clab destroy -t clab/lab1.clab.yaml --cleanup sudo clab destroy -t clab/lab5.clab.yaml --cleanup sleep 60 check_num_locked_targets $(($NUM_NODES_PER_LAB * ((${NUM_LABS} - 2)))) sleep 60 ## add 1 lab echo "Re Deploying lab1" sudo clab deploy -t clab/lab1.clab.yaml --reconfigure sleep 60 check_num_locked_targets $(($NUM_NODES_PER_LAB * ((${NUM_LABS} - 1)))) sleep 60 ## add 1 lab and remove 1 echo "Destroying lab2, Adding back lab5" sudo clab deploy -t clab/lab5.clab.yaml --reconfigure sudo clab destroy -t clab/lab2.clab.yaml --cleanup sleep 60 check_num_locked_targets $(($NUM_NODES_PER_LAB * ((${NUM_LABS} - 1)))) sleep 60 echo "Running API calls..." ./api.sh clab-telemetry-gnmic1:7890 ./api.sh clab-telemetry-gnmic2:7891 ./api.sh clab-telemetry-gnmic3:7892 ./api.sh clab-telemetry-agg-gnmic1:7893 ./api.sh clab-telemetry-agg-gnmic2:7894 ./api.sh clab-telemetry-agg-gnmic3:7895 echo "Re Deploying lab2" sudo clab deploy -t clab/lab2.clab.yaml --reconfigure sleep 60 check_num_locked_targets $(($NUM_NODES_PER_LAB * $NUM_LABS)) for i in `seq 1 $NUM_LABS` do printf "destroying lab clab/lab%s.clab.yaml\n" $i sudo clab destroy -t clab/lab$i.clab.yaml --cleanup # rm clab/lab$i.clab.yaml # rm -rf .lab$i.clab.yaml done sleep 60 done ####### # END # ####### # calculate runtime end=`date +%s` runtime=$((end-start)) printf "runtime=%ss\n" $runtime ================================================ FILE: tests/version_cmd.sh ================================================ #!/bin/bash trap 'failure ${LINENO} "$BASH_COMMAND"' ERR # version ./gnmic-rc1 version ./gnmic-rc1 version --format json ./gnmic-rc1 version upgrade ./gnmic-rc1 version upgrade --use-pkg