Showing preview only (620K chars total). Download the full file or copy to clipboard to get everything.
Repository: Dabz/kafka-security-playbook
Branch: master
Commit: 24ee035b30cc
Files: 476
Total size: 515.5 KB
Directory structure:
gitextract_67i_esfb/
├── .gitignore
├── KerberosCheatsheet.md
├── README.md
├── TlsCheatsheet.md
├── acls/
│ ├── docker-compose.yaml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── admin.conf
│ │ ├── consumer.conf
│ │ ├── kafka.conf
│ │ ├── kafka.sasl.jaas.conf
│ │ ├── kafkacat.conf
│ │ ├── log4j.properties.template
│ │ └── producer.conf
│ ├── up
│ └── zookeeper.sasl.jaas.conf
├── apache-kafka-with-zk3.5-and-tls/
│ ├── .gitignore
│ ├── README.md
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ └── server.properties
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── tlsZkCli.sh
│ └── zoo.cfg
├── auditlog/
│ ├── README.md
│ ├── config/
│ │ └── delete-records.json
│ ├── data/
│ │ └── my_msgs.txt
│ ├── docker-compose.yml
│ ├── example-config.json
│ ├── kafka/
│ │ ├── consumer-user.properties
│ │ ├── kafka-user.properties
│ │ ├── kafka.properties
│ │ ├── kafka.sasl.jaas.config
│ │ ├── log4j.properties
│ │ ├── producer-user.properties
│ │ └── tools-log4j.properties
│ ├── scripts/
│ │ ├── create-topics.sh
│ │ ├── delete-records.sh
│ │ ├── describe-topics.sh
│ │ ├── explore-audit-topic.sh
│ │ └── write-msg.sh
│ ├── up
│ └── zookeeper/
│ ├── log4j.properties
│ ├── tools-log4j.properties
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── ca-builder-scripts/
│ ├── .gitignore
│ ├── README.md
│ ├── build-a-batch-of-certs.sh
│ ├── build-a-batch-of-stores.sh
│ ├── configs/
│ │ ├── batch-of-certs.txt
│ │ ├── batch-of-stores.txt
│ │ ├── ca-config-vars
│ │ ├── ca.config
│ │ └── intermediate-ca.config
│ ├── create-crl.sh
│ ├── create-pair-certs.sh
│ ├── del-cert.sh
│ ├── revoke-cert.sh
│ ├── setup-ca-with-intermediate-ca.sh
│ ├── support-scripts/
│ │ ├── build-ca.sh
│ │ └── create-cert.sh
│ └── utils/
│ ├── build-ca.sh
│ ├── build-intermediate-ca.sh
│ └── functions.sh
├── delegation_tokens/
│ ├── .gitignore
│ ├── ca.cnf
│ ├── client.cnf
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ ├── create_client_properties.sh
│ │ ├── kafka_server_jaas.conf
│ │ └── server.properties
│ ├── server.cnf
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── kafka-connect-mtls/
│ ├── .gitignore
│ ├── README.md
│ ├── check-ssl-client-auth.sh
│ ├── connect/
│ │ ├── config/
│ │ │ ├── ca.cnf
│ │ │ └── client.cnf
│ │ └── secrets/
│ │ ├── ca-chain.cert.pem
│ │ ├── connect.cert.pem
│ │ ├── connect.key.pem
│ │ ├── server.keystore
│ │ └── server.truststore
│ ├── docker-compose.yml
│ └── up
├── kerberos/
│ ├── README.md
│ ├── client/
│ │ ├── Dockerfile
│ │ ├── client.sasl.jaas.config
│ │ ├── command.properties
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ └── producer.properties
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── kafka.sasl.jaas.config
│ │ └── server.properties
│ ├── kdc/
│ │ ├── Dockerfile
│ │ └── krb5.conf
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── kerberos-multi-node/
│ ├── README.md
│ ├── docker-compose.yml
│ ├── down
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ ├── kafka.sasl.jaas.config
│ │ └── server.properties
│ ├── kafka1/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ ├── kafka.sasl.jaas.config
│ │ └── server.properties
│ ├── kdc/
│ │ ├── Dockerfile
│ │ └── krb5.conf
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── kerberos-multi-sasl/
│ ├── README.md
│ ├── client/
│ │ ├── Dockerfile
│ │ ├── client.sasl.jaas.config
│ │ ├── command.properties
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ ├── producer.properties
│ │ └── scram.properties
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── kafka.sasl.jaas.config
│ │ └── server.properties
│ ├── kdc/
│ │ ├── Dockerfile
│ │ └── krb5.conf
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── kraft/
│ └── none/
│ ├── docker-compose.yml
│ ├── image/
│ │ └── kafka-images/
│ │ └── kafka/
│ │ ├── Dockerfile
│ │ ├── Dockerfile.ubi8
│ │ ├── README.md
│ │ ├── include/
│ │ │ └── etc/
│ │ │ └── confluent/
│ │ │ └── docker/
│ │ │ ├── configure
│ │ │ ├── ensure
│ │ │ ├── kafka.properties.template
│ │ │ ├── launch
│ │ │ ├── log4j.properties.template
│ │ │ ├── run
│ │ │ └── tools-log4j.properties.template
│ │ ├── pom.xml
│ │ ├── requirements.txt
│ │ ├── setup.py
│ │ ├── test/
│ │ │ ├── fixtures/
│ │ │ │ ├── cluster-bridged-plain.yml
│ │ │ │ ├── cluster-bridged-sasl.yml
│ │ │ │ ├── cluster-bridged-ssl.yml
│ │ │ │ ├── cluster-host-plain.yml
│ │ │ │ ├── cluster-host-sasl.yml
│ │ │ │ ├── cluster-host-ssl.yml
│ │ │ │ ├── secrets/
│ │ │ │ │ ├── bridged.consumer.ssl.config
│ │ │ │ │ ├── bridged.consumer.ssl.sasl.config
│ │ │ │ │ ├── bridged.producer.ssl.config
│ │ │ │ │ ├── bridged.producer.ssl.sasl.config
│ │ │ │ │ ├── bridged_broker1_jaas.conf
│ │ │ │ │ ├── bridged_broker2_jaas.conf
│ │ │ │ │ ├── bridged_broker3_jaas.conf
│ │ │ │ │ ├── bridged_consumer_jaas.conf
│ │ │ │ │ ├── bridged_krb.conf
│ │ │ │ │ ├── bridged_producer_jaas.conf
│ │ │ │ │ ├── broker1-ca1-signed.crt
│ │ │ │ │ ├── broker1_keystore_creds
│ │ │ │ │ ├── broker1_sslkey_creds
│ │ │ │ │ ├── broker1_truststore_creds
│ │ │ │ │ ├── broker2-ca1-signed.crt
│ │ │ │ │ ├── broker2_keystore_creds
│ │ │ │ │ ├── broker2_sslkey_creds
│ │ │ │ │ ├── broker2_truststore_creds
│ │ │ │ │ ├── broker3-ca1-signed.crt
│ │ │ │ │ ├── broker3_keystore_creds
│ │ │ │ │ ├── broker3_sslkey_creds
│ │ │ │ │ ├── broker3_truststore_creds
│ │ │ │ │ ├── client-plain.config
│ │ │ │ │ ├── config_krb.conf
│ │ │ │ │ ├── config_server1_jaas.conf
│ │ │ │ │ ├── consumer-ca1-signed.crt
│ │ │ │ │ ├── consumer_keystore_creds
│ │ │ │ │ ├── consumer_sslkey_creds
│ │ │ │ │ ├── consumer_truststore_creds
│ │ │ │ │ ├── create-certs.sh
│ │ │ │ │ ├── host.consumer.ssl.config
│ │ │ │ │ ├── host.consumer.ssl.sasl.config
│ │ │ │ │ ├── host.producer.ssl.config
│ │ │ │ │ ├── host.producer.ssl.sasl.config
│ │ │ │ │ ├── host_broker1_jaas.conf
│ │ │ │ │ ├── host_broker2_jaas.conf
│ │ │ │ │ ├── host_broker3_jaas.conf
│ │ │ │ │ ├── host_consumer_jaas.conf
│ │ │ │ │ ├── host_krb.conf
│ │ │ │ │ ├── host_producer_jaas.conf
│ │ │ │ │ ├── host_zookeeper_1_jaas.conf
│ │ │ │ │ ├── host_zookeeper_2_jaas.conf
│ │ │ │ │ ├── host_zookeeper_3_jaas.conf
│ │ │ │ │ ├── kafka.broker1.keystore.jks
│ │ │ │ │ ├── kafka.broker1.truststore.jks
│ │ │ │ │ ├── kafka.broker2.keystore.jks
│ │ │ │ │ ├── kafka.broker2.truststore.jks
│ │ │ │ │ ├── kafka.broker3.keystore.jks
│ │ │ │ │ ├── kafka.broker3.truststore.jks
│ │ │ │ │ ├── kafka.consumer.keystore.jks
│ │ │ │ │ ├── kafka.consumer.truststore.jks
│ │ │ │ │ ├── kafka.producer.keystore.jks
│ │ │ │ │ ├── kafka.producer.truststore.jks
│ │ │ │ │ ├── kafkacat-ca1-signed.pem
│ │ │ │ │ ├── kafkacat.client.key
│ │ │ │ │ ├── krb_server.conf
│ │ │ │ │ ├── producer-ca1-signed.crt
│ │ │ │ │ ├── producer-ssl.config
│ │ │ │ │ ├── producer_keystore_creds
│ │ │ │ │ ├── producer_sslkey_creds
│ │ │ │ │ ├── producer_truststore_creds
│ │ │ │ │ ├── snakeoil-ca-1.crt
│ │ │ │ │ └── snakeoil-ca-1.key
│ │ │ │ ├── standalone-config.yml
│ │ │ │ └── standalone-network.yml
│ │ │ └── test_kafka.py
│ │ └── tox.ini
│ └── up
├── ldap/
│ ├── acls/
│ │ └── acls.csv
│ ├── add-user
│ ├── custom/
│ │ ├── 01_base.ldif
│ │ ├── 02_KafkaDevelopers.ldif
│ │ ├── 10_alice.ldif
│ │ ├── 11_barnie.ldif
│ │ ├── 12_charlie.ldif
│ │ └── 20_group_add.ldif
│ ├── docker-compose-with-ssl.yaml
│ ├── docker-compose.yaml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── alice.properties
│ │ ├── barnie.properties
│ │ ├── charlie.properties
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ ├── jks/
│ │ │ └── .gitignore
│ │ ├── kafka.jaas.config
│ │ ├── kafka.properties
│ │ ├── log4j.properties
│ │ ├── server-with-ssl.properties
│ │ ├── server.properties
│ │ └── users/
│ │ └── purbon.properties
│ ├── ldap/
│ │ ├── certs/
│ │ │ └── .gitignore
│ │ └── custom/
│ │ ├── 01_base.ldif
│ │ ├── 02_KafkaDevelopers.ldif
│ │ ├── 10_alice.ldif
│ │ ├── 11_barnie.ldif
│ │ ├── 12_charlie.ldif
│ │ └── 20_group_add.ldif
│ ├── scripts/
│ │ ├── .gitignore
│ │ └── certs-create.sh
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── ldap-auth/
│ ├── docker-compose.yaml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── alice.properties
│ │ ├── barnie.properties
│ │ ├── charlie.properties
│ │ ├── confluent.repo
│ │ ├── kafka.jaas.config
│ │ ├── kafka.properties
│ │ ├── log4j.properties
│ │ └── server.properties
│ ├── ldap/
│ │ └── custom/
│ │ ├── 01_base.ldif
│ │ ├── 02_KafkaDevelopers.ldif
│ │ ├── 03_ProjectA.ldif
│ │ ├── 04_ProjectB.ldif
│ │ ├── 10_alice.ldif
│ │ ├── 11_barnie.ldif
│ │ ├── 12_charlie.ldif
│ │ ├── 13_donald.ldif
│ │ ├── 14_eva.ldif
│ │ ├── 15_fritz.ldif
│ │ ├── 16_greta.ldif
│ │ ├── 17_kafka.ldif
│ │ └── 20_group_add.ldif
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── multi-sasl/
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── consumer.plain.properties
│ │ ├── consumer.properties
│ │ ├── kafka.sasl.jaas.config
│ │ └── server.properties
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── none/
│ ├── docker-compose.yml
│ └── up
├── oauth/
│ ├── .gitignore
│ ├── ca.cnf
│ ├── docker-compose.yml
│ ├── generate_certs.sh
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── client.properties
│ │ ├── confluent.repo
│ │ ├── kafka_server_jaas.conf
│ │ ├── oauthcallbackhandlers/
│ │ │ ├── .gitignore
│ │ │ ├── pom.xml
│ │ │ └── src/
│ │ │ ├── main/
│ │ │ │ └── java/
│ │ │ │ └── io/
│ │ │ │ └── confluent/
│ │ │ │ └── examples/
│ │ │ │ └── authentication/
│ │ │ │ └── oauth/
│ │ │ │ ├── JwtHelper.java
│ │ │ │ ├── MyOauthBearerToken.java
│ │ │ │ ├── OauthBearerLoginCallbackHandler.java
│ │ │ │ └── OauthBearerValidatorCallbackHandler.java
│ │ │ └── test/
│ │ │ └── java/
│ │ │ └── io/
│ │ │ └── confluent/
│ │ │ └── examples/
│ │ │ └── authentication/
│ │ │ └── oauth/
│ │ │ ├── JwtHelperTest.java
│ │ │ └── ProduceDataTest.java
│ │ ├── server.properties
│ │ └── test_produce_and_consume.sh
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ └── zookeeper.properties
├── plain/
│ ├── consumer.properties
│ ├── docker-compose.yml
│ ├── producer.properties
│ └── up
├── quotas/
│ ├── Client/
│ │ ├── Dockerfile
│ │ └── confluent.repo
│ ├── Grafana/
│ │ └── provisioning/
│ │ ├── dashboards/
│ │ │ ├── grafana-dashboard.json
│ │ │ └── one-quota.yaml
│ │ └── datasources/
│ │ └── prometheus.yaml
│ ├── JMX_Exporter/
│ │ ├── jmx_prometheus_javaagent-0.11.0.jar
│ │ ├── kafka_config.yml
│ │ └── zookeeper_config.yml
│ ├── Prometheus/
│ │ └── prometheus.yml
│ ├── docker-compose.yml
│ ├── secrets/
│ │ ├── admin.properties
│ │ ├── kafka_server_jaas.conf
│ │ ├── noquota.properties
│ │ └── quota.properties
│ └── up
├── rbac/
│ ├── README.md
│ ├── client-configs/
│ │ ├── alice.properties
│ │ ├── barnie.properties
│ │ ├── charlie.properties
│ │ ├── copy-props.sh
│ │ ├── donald.properties
│ │ ├── eva.properties
│ │ ├── fritz.properties
│ │ └── greta.properties
│ ├── conf/
│ │ ├── keypair.pem
│ │ └── public.pem
│ ├── create-role-bindings.sh
│ ├── docker-compose.yml
│ ├── functions.sh
│ ├── kafka-registered.sh
│ ├── ldap/
│ │ └── custom/
│ │ ├── 01_base.ldif
│ │ ├── 02_KafkaDevelopers.ldif
│ │ ├── 03_ProjectA.ldif
│ │ ├── 04_ProjectB.ldif
│ │ ├── 10_alice.ldif
│ │ ├── 11_barnie.ldif
│ │ ├── 12_charlie.ldif
│ │ ├── 13_donald.ldif
│ │ ├── 14_eva.ldif
│ │ ├── 15_fritz.ldif
│ │ ├── 16_greta.ldif
│ │ └── 20_group_add.ldif
│ └── up
├── schema-registry/
│ ├── with-basic-auth/
│ │ ├── docker-compose.yml
│ │ ├── jaas_config.file
│ │ ├── password-file
│ │ └── up
│ ├── with-basic-auth-and-ccloud/
│ │ ├── README.md
│ │ ├── docker-compose.yml
│ │ ├── jaas_config.file
│ │ ├── password-file
│ │ └── up
│ └── with-http_and_https/
│ ├── .gitignore
│ ├── README.md
│ ├── docker-compose.yml
│ ├── schema-registry/
│ │ ├── config/
│ │ │ ├── ca.cnf
│ │ │ └── client.cnf
│ │ └── secrets/
│ │ ├── ca-chain.cert.pem
│ │ ├── schema-registry.cert.pem
│ │ ├── schema-registry.key.pem
│ │ ├── schema-registry.keystore
│ │ └── schema-registry.truststore
│ ├── up
│ └── verify.sh
├── scram/
│ ├── admin.properties
│ ├── consumer.properties
│ ├── docker-compose.yml
│ ├── jline-2.14.6.jar
│ ├── kafka.sasl.jaas.config
│ ├── producer.properties
│ ├── up
│ └── zookeeper.sasl.jaas.config
├── secure-jmx/
│ ├── README.md
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ └── server.properties
│ ├── pull-jmx-kafka.sh
│ ├── pull-jmx-zookeeper.sh
│ ├── secrets/
│ │ ├── client.keystore
│ │ ├── client.truststore
│ │ ├── jmxremote.access
│ │ ├── jmxremote.password
│ │ ├── jmxremote.properties
│ │ ├── kafka.keystore
│ │ └── kafka.truststore
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── jmxremote.access
│ ├── jmxremote.password
│ ├── jmxremote.properties
│ └── zookeeper.properties
├── tls/
│ ├── .gitignore
│ ├── ca.cnf
│ ├── client.cnf
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ ├── kafkacat
│ │ ├── kafkacat.conf
│ │ └── server.properties
│ ├── kafkacat.conf
│ ├── local-client.cnf
│ ├── schema-registry/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ └── schema-registry.properties
│ ├── schema-registry-client.cnf
│ ├── server.cnf
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ └── zookeeper.properties
└── tls-with-ocrl/
├── .gitignore
├── README.md
├── certs/
│ ├── broker.keystore
│ ├── broker.truststore
│ ├── client.keystore
│ └── client.truststore
├── docker-compose.yml
├── kafka/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── consumer.properties
│ └── server.properties
├── up
├── web/
│ └── crls.pem
└── zookeeper/
├── Dockerfile
├── confluent.repo
└── zookeeper.properties
================================================
FILE CONTENTS
================================================
================================================
FILE: .gitignore
================================================
.envrc
.java-version
kerberos-multi-node/TODO
.idea
================================================
FILE: KerberosCheatsheet.md
================================================
# Kerberos Cheat Sheet
## Introduction
This cheat sheet contains common commands regarding Kerberos administration and troubleshooting.
## User commands
### List current principal and ticket held in credential cache
```bash
$> klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: kafka_producer/producer@TEST.CONFLUENT.IO
Valid starting Expires Service principal
05/23/18 08:56:59 05/24/18 08:56:59 krbtgt/TEST.CONFLUENT.IO@TEST.CONFLUENT.IO
```
### Obtaining and caches a token for principal
```bash
$> kinit kafka/admin
Password for kafka/admin@TEST.CONFLUENT.IO:
```
### Obtaining and caches a token for principal from a keytab
```bash
$> kinit -k -t /var/lib/secret/kafka.key kafka/admin
```
### List credentials contains in a keytab
```bash
$> klist -k -t /var/lib/secret/kafka.key
Keytab name: FILE:/var/lib/secret/kafka.key
KVNO Timestamp Principal
---- ----------------- --------------------------------------------------------
2 05/23/18 08:56:43 zookeeper/zookeeper.kerberos_default@TEST.CONFLUENT.IO
2 05/23/18 08:56:43 zookeeper/zookeeper.kerberos_default@TEST.CONFLUENT.IO
2 05/23/18 08:56:43 kafka/admin@TEST.CONFLUENT.IO
2 05/23/18 08:56:43 kafka/admin@TEST.CONFLUENT.IO
2 05/23/18 08:56:43 kafka/kafka.kerberos_default@TEST.CONFLUENT.IO
2 05/23/18 08:56:43 kafka/kafka.kerberos_default@TEST.CONFLUENT.IO
2 05/23/18 08:56:43 kafka/zookeeper@TEST.CONFLUENT.IO
2 05/23/18 08:56:43 kafka/zookeeper@TEST.CONFLUENT.IO
2 05/23/18 08:56:43 kafka_consumer/consumer@TEST.CONFLUENT.IO
2 05/23/18 08:56:43 kafka_consumer/consumer@TEST.CONFLUENT.IO
2 05/23/18 08:56:43 kafka_producer/producer@TEST.CONFLUENT.IO
2 05/23/18 08:56:43 kafka_producer/producer@TEST.CONFLUENT.IO
```
### Destroy credential cache
```baseh
$> kdestroy
```
## Administration commands
### Adding a new principal to the KDC database
```bash
$> kadmin.local -w password -q "add_principal -pw my_password kafka/zookeeper@TEST.CONFLUENT.IO"
WARNING: no policy specified for test@TEST.CONFLUENT.IO; defaulting to no policy
Principal "kafka/zookeeper@TEST.CONFLUENT.IO" created
```
### Adding a new principal to the KDC database with a random key
```bash
$> kadmin.local -w password -q "add_principal -randkey kafka/zookeeper@TEST.CONFLUENT.IO"
WARNING: no policy specified for test@TEST.CONFLUENT.IO; defaulting to no policy
Principal "kafka/zookeeper@TEST.CONFLUENT.IO" created
```
### Exporting principals to a keytab
```bash
$> kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka.key -glob kafka/*"
Entry for principal kafka/admin@TEST.CONFLUENT.IO with kvno 3, encryption type aes256-cts-hmac-sha1-96 added to keytab FILE:/etc/krb5.keytab.
Entry for principal kafka/admin@TEST.CONFLUENT.IO with kvno 3, encryption type aes128-cts-hmac-sha1-96 added to keytab FILE:/etc/krb5.keytab.
Entry for principal kafka/kafka.kerberos_default@TEST.CONFLUENT.IO with kvno 3, encryption type aes256-cts-hmac-sha1-96 added to keytab FILE:/etc/krb5.keytab.
Entry for principal kafka/kafka.kerberos_default@TEST.CONFLUENT.IO with kvno 3, encryption type aes128-cts-hmac-sha1-96 added to keytab FILE:/etc/krb5.keytab.
Entry for principal kafka/zookeeper@TEST.CONFLUENT.IO with kvno 3, encryption type aes256-cts-hmac-sha1-96 added to keytab FILE:/etc/krb5.keytab.
Entry for principal kafka/zookeeper@TEST.CONFLUENT.IO with kvno 3, encryption type aes128-cts-hmac-sha1-96 added to keytab FILE:/etc/krb5.keytab
```
================================================
FILE: README.md
================================================
# Kafka security playbook
This repository contains a set of docker images to demonstrate the security configuration of Kafka and the Confluent Platform. The purpose of this repository is **NOT** to provide production's ready images. It has been designed to be used as an example and to assist peoples configuring the security module of Apache Kafka.
All images has been created from scratch without reusing previously created images, this, to emphasize code and configuration readability over best-practices. For official images, I would recommend you to rely on the [Docker Images for the Confluent Platform](https://github.com/confluentinc/cp-docker-images)
## Plain authentication (challenge response)
Plain authentication is a simple mechanism based on username/password. It should be used with TLS for encryption to implement secure authentication. This playbook contains a simple configuration where SASL-Plain authentication is used for Kafka.
### Usage
```bash
cd plain
./up
kafka-console-producer --broker-list kafka:9093 --producer.config /etc/kafka/consumer.properties --topic test
kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /etc/kafka/consumer.properties --topic test --from-beginning
```
### Important configuration files
<details>
<summary><a href="plain/kafka/server.properties">kafka server.properties</a></summary>,
<pre>
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.inter.broker.protocol=PLAIN
allow.everyone.if.no.acl.found=false
super.users=User:kafka
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
</pre>
</details>
<details>
<summary><a href="plain/kafka/consumer.properties">kafka consumer and producer configuration</a></summary>
<pre>
sasl.mechanism=PLAIN
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="kafka" \
password="kafka";
</pre>
</details>
<details>
<summary><a href="plain/kafka/kafka.jaas.config">kafka server jaas configuration</a></summary>
<pre>
KafkaServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="kafka"
password="kafka"
user_kafka="kafka"
user_producer="producer-secret"
user_consumer="consumer-secret";
};
</pre>
</details>
#### For further information
* [Confluent documentation on SASL Plain](https://docs.confluent.io/current/kafka/authentication_sasl_plain.html)
## Scram authentication (challenge response)
Scram is an authentication mechanism that perform username/password authentication in a secure way. This playbook contains a simple configuration where SASL-Scram authentication is used for Zookeeper and Kafka. In it:
* kafka use a username/password to connect to zookeeper
* consumer and producer must use a username/password to access the cluster
### Usage
```bash
cd scram
# Scripts starting the docker services and generating the kafka user
./up
docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --producer.config /etc/kafka/consumer.properties --topic test
docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /etc/kafka/consumer.properties --topic test --from-beginning
```
### Important configuration files
<details>
<summary><a href="scram/kafka/server.properties">kafka server.properties</a></summary>
<pre>
sasl.enabled.mechanisms=SCRAM-SHA-256
sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256
security.inter.broker.protocol=SASL_PLAINTEXT
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
</pre>
</details>
<details>
<summary><a href="scram/kafka/consumer.properties">kafka consumer and producer configuration</a></summary>
<pre>
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka";
</pre>
</details>
<details>
<summary><a href="scram/kafka/kafka.sasl.jaas.config">kafka server jaas configuration</a></summary>
<pre>
KafkaServer {
org.apache.kafka.common.security.scram.ScramLoginModule required
username="kafka"
password="kafka";
};
</pre>
</details>
#### For further information
* [Confluent documentation on SASL Scram](https://docs.confluent.io/current/kafka/authentication_sasl_scram.html)
* [Zookeeper documentation on SASL Scram](https://cwiki.apache.org/confluence/display/ZOOKEEPER/Client-Server+mutual+authentication)
## TLS with x509 authentication
TLS, previously known as SSL, is a cryptography protocol providing network encryption via asymetric certificates and keys.
This playbook contains a basic configuration to enforce TLS between the broker and a client. Be aware that right now zookeeper didn't release TLS as an official feature, thus only the broker is configured for TLS. In this playbook, TLS is used for both encryption, authentication and authorization. the _up_ script generates the following file before starting docker-compose services:
1. __certs/ca.key, certs/ca.crt__ - public and private key of the certificate authority
2. __certs/server.keystore.jks__ - keystore containing the signed certificate of the kafka broker
3. __certs/client.keystore.jks__ - keystore containing the signed certificate of a kafka client. It has been granted super user permision
### Usage
```bash
cd tls
# Scripts generating the required certificate and starting docker-compose services
./up
docker-compose exec kafka kafka-console-producer --broker-list kafka.confluent.local:9093 --topic test --producer.config /etc/kafka/consumer.properties
docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka.confluent.local:9093 --topic test --consumer.config /etc/kafka/consumer.properties --from-beginning
#Avro consumer/producer using schema registry
docker-compose exec kafka kafka-avro-console-producer --broker-list kafka.confluent.local:9093 --topic avro_test --property value.schema='{"type":"record","name":"myrecord","fields":[{"name":"f1","type":"string"}]}' --property schema.registry.url=https://schema-registry.confluent.local:8443 --producer.config /etc/kafka/consumer.properties
#example message: {"f1": "value1"}
kafka-avro-console-consumer --topic avro_test --from-beginning --property schema.registry.url=https://schema-registry.confluent.local:8443 --consumer.config /etc/kafka/consumer.properties --bootstrap-server kafka.confluent.local:9093
```
To connect from a producer/consumer running on your local machine:
```bash
docker-compose exec kafka kafka-acls --authorizer-properties zookeeper.connect=zookeeper.confluent.local:2181 --add --allow-principal User:CN=<YOUR LOCAL HOSTNAME>,L=London,O=Confluent,C=UK --operation All --topic '*' --cluster;
```
Set the following JVM parameters:
```
-Djavax.net.ssl.keyStore=<kafka-security-playbook DIR>/tls/certs/local-client.keystore.jks
-Djavax.net.ssl.trustStore=<kafka-security-playbook DIR>/tls/certs/truststore.jks
-Djavax.net.ssl.keyStorePassword=test1234
-Djavax.net.ssl.trustStorePassword=test1234
```
### Important configuration files
<details>
<summary><a href="tls/kafka/server.properties"> kafka server.properties</a></summary>
<pre>
listeners=SSL://kafka.confluent.local:9093
advertised.listeners=SSL://kafka.confluent.local:9093
security.inter.broker.protocol=SSL
ssl.truststore.location=/var/lib/secret/truststore.jks
ssl.truststore.password=test1234
ssl.keystore.location=/var/lib/secret/server.keystore.jks
ssl.keystore.password=test1234
ssl.client.auth=required
# To use TLS based authorization
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
super.users=User:CN=kafka.confluent.local,L=London,O=Confluent,C=UK
</pre>
</details>
<details>
<summary><a href="tls/kafka/consumer.properties">kafka consumer and producer configuration</a></summary>
<pre>
bootstrap.servers=kafka.conflent.local:9093
security.protocol=SSL
ssl.truststore.location=/var/lib/secret/truststore.jks
ssl.truststore.password=test1234
ssl.keystore.location=/var/lib/secret/client.keystore.jks
ssl.keystore.password=test1234
ssl.key.password=test1234
</pre>
</details>
#### For further information
* [kafka documentation on TLS](http://kafka.apache.org/documentation.html#security_ssl)
* [Confluent documentation on TLS authentication](https://docs.confluent.io/current/kafka/authentication_ssl.html)
* [Confluent documentation on TLS key generation](https://docs.confluent.io/current/tutorials/security_tutorial.html#generating-keys-certs)
## Kerberos (GSSAPI) authentication without TLS
This example contains a basic KDC server and configure both zookeeper and kafka with Kerberos authentication and authorization. Credentials are created without password, a keytab containing credentials is available in a Docker volume named "secret". The following credential are automatically created in the KDC database:
1. __kafka/admin__ - to access zookeeper
2. __kafka_producer/producer__ - to access kafka as a producer
3. __kafka_consumer/consumer__ - to access kafka as a consumer
### Usage
```bash
cd kerberos
# Scripts orchestrating the docker-compose services
./up
# Using kinit with a keytab for authentication then invoking kafka interfaces
docker-compose exec kafka bash -c 'kinit -k -t /var/lib/secret/kafka.key kafka_producer/producer && kafka-console-producer --broker-list kafka:9093 --topic test --producer.config /etc/kafka/consumer.properties'
docker-compose exec kafka bash -c 'kinit -k -t /var/lib/secret/kafka.key kafka_consumer/consumer && kafka-console-consumer --bootstrap-server kafka:9093 --topic test --consumer.config /etc/kafka/consumer.properties --from-beginning'
```
### Important configuration files
<details>
<summary><a href="kerberos/zookeeper/zookeeper.properties">zookeeper properties</a></summary>
<pre>
authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
</pre>
</details>
<details>
<summary><a href="kerberos/zookeeper/zookeeper.sasl.jaas.config">zookeeper server and client jaas configuration</a></summary>
<pre>
Server {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/kafka.key"
principal="zookeeper/zookeeper.kerberos_default@TEST.CONFLUENT.IO";
};
</pre>
</details>
<details>
<summary><a href="kerberos/kafka/server.properties">kafka server.properties</a></summary>
<pre>
listeners=SASL_PLAINTEXT://kafka:9093
advertised.listeners=SASL_PLAINTEXT://kafka:9093
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.enabled.mechanisms=GSSAPI
sasl.mechanism.inter.broker.protocol=GSSAPI
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
allow.everyone.if.no.acl.found=false
super.users=User:admin;User:kafka
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
</pre>
</details>
<details>
<summary><a href="kerberos/kafka/kafka.sasl.jaas.config">kafka server and client jaas configuration</a></summary>
<pre>
/*
* Cluster kerberos services
*/
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="kafka/kafka.kerberos_default@TEST.CONFLUENT.IO";
};
/*
* For client and broker identificatoin
*/
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="admin/kafka.kerberos_default@TEST.CONFLUENT.IO";
};
/*
* For Zookeeper authentication
*/
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/kafka.key"
principal="kafka/kafka.kerberos_default@TEST.CONFLUENT.IO";
};
</pre>
</details>
<details>
<summary><a href="kerberos/kafka/consumer.properties">kafka consumer and producer configuration</a></summary>
<pre>
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useTicketCache=true
</pre>
</details>
#### For further information
* [Confluent documentation on GSSAPI authentication](https://docs.confluent.io/current/kafka/authentication_sasl_gssapi.html)
* [Confluent documentation on ACL](https://docs.confluent.io/current/kafka/authorization.html)
## Oauth authentication via TLS encryption
Kafka supports SASL authentication via Oauth bearer tokens. A sample playbook for secured oauth token authentication is contained in the oauth subfolder of this repository.
### Usage
Prerequisites: jdk8, maven, docker-compose, openssl.
```bash
cd oauth
./up
```
In this sample playbook both the identity of brokers (`sasl.mechanism.inter.broker.protocol=OAUTHBEARER` within server.properties) and the identity of clients (`sasl.mechanism=OAUTHBEARER` within consumer.properties) are verified by the brokers using oauth bearer tokens.
Within this sample playbook oauth bearer tokens are generated and validated using the `jjwt` library without communication to an authorization server. In real life, this would be different.
The class `OauthBearerLoginCallbackHandler` is used by the clients and by brokers to generate a JWT token using a shared secret. This class is configured within the `client.properties` file:
Note that the client does not need to have a keystore configured, since client authentication is achieved using bearer tokens.
Still it needs a truststore to store the brokers certificate authorities.
<details>
<summary><a href="oauth/kafka/client.properties">kafka consumer and producer configuration</a></summary>
<pre>
security.protocol=SASL_SSL
sasl.mechanism=OAUTHBEARER
sasl.login.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerLoginCallbackHandler
ssl.truststore.location=/etc/kafka/kafka.client.truststore.jks
ssl.truststore.password=secret
</pre>
</details>
The `OauthBearerLoginCallbackHandler` class is also configured for broker clients within the `server.properties` file (see below). The `server.properties` file must also include a reference to the token validator class (`OauthBearerValidatorCallbackHandler`):
<details>
<summary><a href="oauth/kafka/server.properties">kafka broker configuration</a></summary>
<pre>
listeners=SASL_SSL://kafka.confluent.local:9093
advertised.listeners=SASL_SSL://kafka.confluent.local:9093
security.inter.broker.protocol=SASL_SSL
sasl.mechanism.inter.broker.protocol=OAUTHBEARER
sasl.enabled.mechanisms=OAUTHBEARER
listener.name.sasl_ssl.oauthbearer.sasl.server.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerValidatorCallbackHandler
listener.name.sasl_ssl.oauthbearer.sasl.login.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerLoginCallbackHandler
ssl.truststore.location=/etc/kafka/kafka.server.truststore.jks
ssl.truststore.password=secret
ssl.keystore.location=/etc/kafka/kafka.server.keystore.jks
ssl.keystore.password=secret
ssl.key.password=secret
</pre>
</details>
Kafka brokers need a keystore to store its private certificate as well as a truststore to verify the identity of other brokers.
### Further information
* [Confluent documentation on Oauth authentication](https://docs.confluent.io/current/kafka/authentication_sasl/authentication_sasl_oauth.html)
* [Blog Post](https://medium.com/@jairsjunior/how-to-setup-oauth2-mechanism-to-a-kafka-broker-e42e72839fe)
## Schema registry basic security
According to documentation the schema registry plugin only supports SSL principals, but there is an undocumented separate authentication possibility via Jetty Authentication.
```bash
cd schema-registry-basic-auth
./up
```
Now you can access the schema registry REST interface on `http://localhost:8089`
Note that in order to test the schema registry properly, you need to either `curl` into it, or use the `kafka-avro-consule-producer` and consumer. The latter require special considerations.
First, access via `curl`:
```
curl -X GET http://localhost:8089 -u admin:admin
```
If you want to try out the console producer, you need to exec into the schema-registry docker image and then run the producer:
```
docker-compose exec schema-registry bash
kafka-avro-console-producer --broker-list kafka:9092 --topic avro-test --property \
value.schema='{"type":"record","name":"myrecord","fields":[{"name":"f1","type":"string"}]}' \
--property basic.auth.credentials.source=USER_INFO \
--property schema.registry.basic.auth.user.info=write:write
> {"f1": "value1"}
> {"f1": "value2"}
> ^D
```
Note that the official documentation is wrong on two accounts. First, to define the source, you need to use `basic.auth.credentials.source` without the `schema.registry` in front of it.
Second, user authentication via a property file gets ignored, you need to pass the credentials via `--property`.
## Schema registry semi-open security
This playbook is an example of configuration where Schema Registry is configured for accepting request on `http` and `https`.
Requests on the `http` endpoint are actually identified as the `ANONYMOUS` user. This is possible thanks to the `confluent.schema.registry.anonymous.principal=true` option.
The following ACLs are configured:
- `sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p 'ANONYMOUS' -o 'SUBJECT_READ'`
- `sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -p 'ANONYMOUS' -o 'GLOBAL_SUBJECTS_READ'`
- `sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -p 'ANONYMOUS' -o 'GLOBAL_COMPATIBILITY_READ'`
- `sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p 'C=UK,O=Confluent,L=London,CN=schema-registry' -o '*'`
With this configuration, ` curl -X GET http://localhost:8089/subjects/` is successful, but the `ANONYMOUS` user does not have the privileges to write new schemas.
Only the client with the TLS client certificate `C=UK,O=Confluent,L=London,CN=schema-registry` can write new schemas, this could be for example your CI tool or an admin user.
================================================
FILE: TlsCheatsheet.md
================================================
# TLS Cheat Sheet
## Introduction
This cheat sheet contains common commands regarding TLS certificate generation and TLS troubleshooting. If you are looking for a script to generate keystore , certificate authority and certificates, I recommend you to check out [confluent kafka-generate-ssl.sh script](https://github.com/confluentinc/confluent-platform-security-tools/blob/master/kafka-generate-ssl.sh)
## Generating self-signed certificates or a new Certificate Authority
```bash
openssl req -new -nodes -x509 -days 3650 -newkey rsa:2048 -keyout sever.key -out certs/server.crt -config $CONFIG_PATH
```
## Generating certificate signing request
```bash
openssl req -new -newkey rsa:2048 -keyout server.key -out server.csr -config $CONFIG_PATH -nodes
```
## Displaying content of a signing request
```bash
openssl req -text -in $CERT
```
## Displaying content of a certificate that a server presents
```bash
openssl s_client -showcerts -connect www.example.com:443
```
## Verifying that server certificate was signed by a CA
```bash
curl --cacert /var/lib/secret/ca.pem https://www.example:8443
```
## Signing certificate signing request
```bash
openssl x509 -req -days $DURATION -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -extfile $CONFIG_PATH
```
## Generate a signed certificate with keytool
```bash
# First create the keystore
keytool -keystore keystore.server.jks -alias server -validity $DURATION -genkey -keyalg RSA
# Generate a certificate signing request and export it to a file
keytool -keystore keystore.server.jks -alias server -certreq -file $KEYSTORE_SIGN_REQUEST
# Sign the certificate request with OpenSSL and a CA
openssl x509 -req -CA ca.crt -CAkey ca.key -in $KEYSTORE_SIGN_REQUEST -out $KEYSTORE_SIGNED_CERT -days $VALIDITY_IN_DAYS -CAcreateserial
# Importing the signed certificate to the keystore
keytool -keystore $KEYSTORE_WORKING_DIRECTORY/$KEYSTORE_FILENAME -alias localhost -import -file $KEYSTORE_SIGNED_CERT
```
## Displaying content of a signed certificate
```bash
openssl x509 -text -in $CERT
```
## Importing signed certificate with its private key into a keystore
```bash
# Exporting certificate to PKCS12 format
openssl pkcs12 -export -in server.crt -inkey server.key -chain -CAfile ca.pem -name "kafka.confluent.local" -out server.p12 -password pass:$PASSWORD
# Importing PKCS12 into another keystore (or create it)
keytool -importkeystore -deststorepass $PASSWORD -destkeystore server.keystore.jks -srckeystore server.p12 -deststoretype PKCS12 -srcstoretype PKCS12 -noprompt -srcstorepass $PASSWORD
```
## Import certificate into a keystore
```bash
keytool -keystore truststore.jks -alias $ALIAS -import -file $CRT_FILE -storepass $PASSWORD -noprompt -storetype PKCS12
```
## Example of OpenSSL configuration file to generate a CA
```
[ policy_match ]
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
x509_extensions = v3_ca
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName = kafka.confluent.local
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
```
## Example of OpenSSL configuration file to generate a server certificate
```
[req]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
req_extensions = v3_req
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName=kafka.confluent.local
[ v3_req ]
subjectKeyIdentifier = hash
basicConstraints = CA:FALSE
nsComment = "OpenSSL Generated Certificate"
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth, clientAuth
subjectAltName = @alt_names
[ alt_names ]
DNS.1=kafka.confluent.local
```
## Example of OpenSSL configuration file to generate a client certificate
```
[req]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
req_extensions = v3_req
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName=kafka.confluent.local
[ v3_req ]
subjectKeyIdentifier = hash
basicConstraints = CA:FALSE
nsComment = "OpenSSL Generated Certificate"
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
```
================================================
FILE: acls/docker-compose.yaml
================================================
---
version: '3'
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.4.0
hostname: zookeeper
container_name: zookeeper
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 2182
ZOOKEEPER_TICK_TIME: "2000"
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: localhost
KAFKA_OPTS: -Djava.security.auth.login.config=/tmp/zookeeper.sasl.jaas.conf
-Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
ports:
- 2182:2182
volumes:
- "$PWD/zookeeper.sasl.jaas.conf:/tmp/zookeeper.sasl.jaas.conf"
kafka-1:
build: kafka/
hostname: kafka-1
container_name: kafka-1
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2182
KAFKA_LISTENERS: INTERNAL://kafka-1:19093, EXTERNAL://0.0.0.0:9093
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:SASL_PLAINTEXT,EXTERNAL:SASL_PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka-1:19093, EXTERNAL://localhost:9093
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka-1:19093
KAFKA_CONFLUENT_METRICS_REPORTER_SASL_MECHANISM: "SCRAM-SHA-256"
KAFKA_CONFLUENT_METRICS_REPORTER_SECURITY_PROTOCOL: SASL_PLAINTEXT
KAFKA_CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_SASL_ENABLED_MECHANISMS: "SCRAM-SHA-256"
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: "SCRAM-SHA-256"
KAFKA_ZOOKEEPER_SET_ACL: "true"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "false"
KAFKA_SUPER_USERS: "User:kafka;User:admin"
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: kafka-1
KAFKA_OPTS: "-Djava.security.auth.login.config=/tmp/kafka.sasl.jaas.conf"
ports:
- 9093:9093
kafka-2:
build: kafka/
hostname: kafka-2
container_name: kafka-2
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2182
KAFKA_LISTENERS: INTERNAL://kafka-2:19094, EXTERNAL://0.0.0.0:9094
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:SASL_PLAINTEXT,EXTERNAL:SASL_PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka-2:19094, EXTERNAL://localhost:9094
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka-2:19094
KAFKA_CONFLUENT_METRICS_REPORTER_SASL_MECHANISM: "SCRAM-SHA-256"
KAFKA_CONFLUENT_METRICS_REPORTER_SECURITY_PROTOCOL: SASL_PLAINTEXT
KAFKA_CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_SASL_ENABLED_MECHANISMS: "SCRAM-SHA-256"
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: "SCRAM-SHA-256"
KAFKA_ZOOKEEPER_SET_ACL: "true"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "false"
KAFKA_SUPER_USERS: "User:kafka;User:admin"
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: kafka-2
KAFKA_OPTS: "-Djava.security.auth.login.config=/tmp/kafka.sasl.jaas.conf"
ports:
- 9094:9094
================================================
FILE: acls/kafka/Dockerfile
================================================
FROM confluentinc/cp-enterprise-kafka:5.4.0
MAINTAINER sven@confluent.io
# Make sure the log directory is world-writable
RUN echo "===> Creating authorizer logs dir ..." \
&& mkdir -p /var/log/kafka-auth-logs \
&& chmod -R ag+w /var/log/kafka-auth-logs
COPY log4j.properties.template /etc/confluent/docker/log4j.properties.template
COPY *.conf /tmp/
================================================
FILE: acls/kafka/admin.conf
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="admin" \
password="admin-pass";
================================================
FILE: acls/kafka/consumer.conf
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="consumer" \
password="consumer-pass";
================================================
FILE: acls/kafka/kafka.conf
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka-pass";
================================================
FILE: acls/kafka/kafka.sasl.jaas.conf
================================================
KafkaServer {
org.apache.kafka.common.security.scram.ScramLoginModule required
username="kafka"
password="kafka-pass";
};
KafkaClient {
org.apache.kafka.common.security.scram.ScramLoginModule required
username="kafka"
password="kafka-pass";
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="password";
};
================================================
FILE: acls/kafka/kafkacat.conf
================================================
security.protocol=SASL_PLAINTEXT
sasl.mechanisms=SCRAM-SHA-256
sasl.username=kafka
sasl.password=kafka-pass
================================================
FILE: acls/kafka/log4j.properties.template
================================================
log4j.rootLogger={{ env["KAFKA_LOG4J_ROOT_LOGLEVEL"] | default('INFO') }}, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.authorizerAppender.File=/var/log/kafka-auth-logs/kafka-authorizer.log
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.additivity.kafka.authorizer.logger=false
{% set loggers = {
'kafka': 'INFO',
'kafka.network.RequestChannel$': 'WARN',
'kafka.producer.async.DefaultEventHandler': 'DEBUG',
'kafka.request.logger': 'WARN',
'kafka.controller': 'TRACE',
'kafka.log.LogCleaner': 'INFO',
'state.change.logger': 'TRACE',
'kafka.authorizer.logger': 'DEBUG, authorizerAppender'
} -%}
{% if env['KAFKA_LOG4J_LOGGERS'] %}
{% set loggers = parse_log4j_loggers(env['KAFKA_LOG4J_LOGGERS'], loggers) %}
{% endif %}
{% for logger,loglevel in loggers.iteritems() %}
log4j.logger.{{logger}}={{loglevel}}
{% endfor %}
================================================
FILE: acls/kafka/producer.conf
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="producer" \
password="producer-pass";
================================================
FILE: acls/up
================================================
#!/bin/sh
docker-compose up -d --build
# Creating the user kafka
# kafka is configured as a super user, no need for additional ACL
docker-compose exec kafka-1 kafka-configs --zookeeper zookeeper:2182 --alter --add-config 'SCRAM-SHA-256=[password=kafka-pass],SCRAM-SHA-512=[password=kafka-pass]' --entity-type users --entity-name kafka
docker-compose exec kafka-1 kafka-configs --zookeeper zookeeper:2182 --alter --add-config 'SCRAM-SHA-256=[password=admin-pass],SCRAM-SHA-512=[password=admin-pass]' --entity-type users --entity-name admin
docker-compose exec kafka-1 kafka-configs --zookeeper zookeeper:2182 --alter --add-config 'SCRAM-SHA-256=[password=producer-pass],SCRAM-SHA-512=[password=producer-pass]' --entity-type users --entity-name producer
docker-compose exec kafka-1 kafka-configs --zookeeper zookeeper:2182 --alter --add-config 'SCRAM-SHA-256=[password=consumer-pass],SCRAM-SHA-512=[password=consumer-pass]' --entity-type users --entity-name consumer
# ACLs
docker-compose exec kafka-1 kafka-acls --authorizer-properties zookeeper.connect=zookeeper:2182 --add --allow-principal User:producer --producer --topic=*
docker-compose exec kafka-1 kafka-acls --authorizer-properties zookeeper.connect=zookeeper:2182 --add --allow-principal User:consumer --consumer --topic=* --group=*
echo "Example configuration:"
echo "-> kafka-console-producer --broker-list localhost:9093 --producer.config kafka/producer.conf --topic test"
echo "-> kafka-console-consumer --bootstrap-server localhost:9094 --consumer.config kafka/consumer.conf --topic test --from-beginning"
echo "ZooKeeper shell with authorization from host:"
echo "-> KAFKA_OPTS=\"-Djava.security.auth.login.config=zookeeper.sasl.jaas.conf\" zookeeper-shell localhost:2182"
echo "ZooKeeper shell with authorization within container (KAFKA_OPTS already set):"
echo "-> docker-compose exec kafka-1 zookeeper-shell zookeeper:2182"
echo "Kafkacat with authorization from host:"
echo "-> kafkacat -L -b localhost:9094 -F kafka/kafkacat.conf"
================================================
FILE: acls/zookeeper.sasl.jaas.conf
================================================
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_admin="password";
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="password";
};
================================================
FILE: apache-kafka-with-zk3.5-and-tls/.gitignore
================================================
bin/
certs/
certs-old/
tmp-dir
images/
zookeeper.properties
================================================
FILE: apache-kafka-with-zk3.5-and-tls/README.md
================================================
# Apache Kafka 2.4 (trunk) with Zookeeper 3.5.5
This playbook show the current (as of August 2019) necessary steps to enable a secured TLS connection between an Apache Kafka broker and his corresponding
Apache Zookeeper counter part.
As of today, this only covers using Zookeeper 3.5.5 with the upcoming Apache Kafka 2.4 version. Using it in earlier versions is not properly tested.
## Run the playbook.
To run the playbook you need installed in your machine, docker, docker-compose.
The playbook can be started by running the _$> ./up_ script.
### Configuration on Apache ZooKeeper
Required environment variables:
```bash
SERVER_JVMFLAGS=-Dzookeeper.serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory
````
zoo.cfg file:
```bash
secureClientPort=2182
authProvider.1=org.apache.zookeeper.server.auth.X509AuthenticationProvider
ssl.trustStore.location=/var/lib/secret/truststore.jks
ssl.trustStore.password=test1234
ssl.keyStore.location=/var/lib/secret/zookeeper.jks
ssl.keyStore.password=test1234
ssl.clientAuth=true
```
### Configuration for Apache Kafka
Required environment variables:
```bash
KAFKA_OPTS=-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true -Dzookeeper.ssl.keyStore.location=/var/lib/secret/kafka.jks -Dzookeeper.ssl.keyStore.password=confluent -Dzookeeper.ssl.trustStore.location=/var/lib/secret/truststore.jks -Dzookeeper.ssl.trustStore.password=confluent
```
server.properties file:
```
zookeeper.connect=zookeeper:2182
```
to use the secure port, a use can use both (but I would certainly not recommended as it water down security)
## Things pending..
* The current zookeeper migration tool works based on JAAS files, there is currently no option to set authentication in a different way. There is an issue open with Apache Kafka (https://issues.apache.org/jira/browse/KAFKA-8843) to fix this, as well as the required overall KIP https://cwiki.apache.org/confluence/display/KAFKA/KIP-515%3A+Enable+ZK+client+to+use+the+new+TLS+supported+authentication, currently under discussion.
* The https://cwiki.apache.org/confluence/display/KAFKA/KIP-515%3A+Enable+ZK+client+to+use+the+new+TLS+supported+authentication covers as well the challenge of configuring zookeeper TLS access, for the brokers, using environment variables. There is a change proposed to make things better.
*NOTE*: This playbook utilised a custom made Apache Kafka docker image, build from a trunk snapshot the 22 of August 2019. Currently Apache Kafka 2.4 is still not released. Changing based images will be easy when an official confluent image is released.
## Reference
* https://cwiki.apache.org/confluence/display/ZOOKEEPER/ZooKeeper+SSL+User+Guide
* https://cwiki.apache.org/confluence/display/KAFKA/KIP-515%3A+Enable+ZK+client+to+use+the+new+TLS+supported+authentication
* https://issues.apache.org/jira/browse/KAFKA-8843
* https://github.com/apache/kafka/commit/d67495d6a7f4c5f7e8736a25d6a11a1c1bef8d87
================================================
FILE: apache-kafka-with-zk3.5-and-tls/docker-compose.yml
================================================
version: '3'
services:
zookeeper:
build: zookeeper/
container_name: zookeeper
hostname: zookeeper
restart: on-failure
environment:
- SERVER_JVMFLAGS=-Dzookeeper.serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory
volumes:
- ./certs/zk-stores:/var/lib/secret
kafka:
build: kafka/
container_name: kafka
hostname: kafka
depends_on:
- zookeeper
restart: on-failure
volumes:
- ./certs/kafka-stores:/var/lib/secret
environment:
- KAFKA_OPTS=-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true -Dzookeeper.ssl.keyStore.location=/var/lib/secret/kafka.jks -Dzookeeper.ssl.keyStore.password=confluent -Dzookeeper.ssl.trustStore.location=/var/lib/secret/truststore.jks -Dzookeeper.ssl.trustStore.password=confluent
ports:
- 29092:29092
================================================
FILE: apache-kafka-with-zk3.5-and-tls/kafka/Dockerfile
================================================
FROM purbon/kafka
MAINTAINER pere.urbon@gmail.com
ENV container docker
# 1. Install openjdk
RUN yum install -y java-11-openjdk
# 2. Configure Kafka
COPY server.properties /etc/kafka/server.properties
EXPOSE 9092
CMD kafka-server-start.sh /etc/kafka/server.properties
================================================
FILE: apache-kafka-with-zk3.5-and-tls/kafka/server.properties
================================================
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
############################# Socket Server Settings #############################
# The address the socket server listens on. It will get the value returned from
# java.net.InetAddress.getCanonicalHostName() if not configured.
# FORMAT:
# listeners = listener_name://host_name:port
# EXAMPLE:
# listeners = PLAINTEXT://your.host.name:9092
listeners=PLAINTEXT://kafka:9092,EXT_PLAINTEXT://localhost:29092
# Hostname and port the broker will advertise to producers and consumers. If not set,
# it uses the value for "listeners" if configured. Otherwise, it will use the value
# returned from java.net.InetAddress.getCanonicalHostName().
advertised.listeners=PLAINTEXT://kafka:9092,EXT_PLAINTEXT://localhost:29092
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
listener.security.protocol.map=PLAINTEXT:PLAINTEXT,EXT_PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
#security.inter.broker.protocol=SSL
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
num.network.threads=3
# The number of threads that the server uses for processing requests, which may include disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma separated list of directories under which to store log files
log.dirs=/var/lib/kafka
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
#log.retention.bytes=1073741824
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=zookeeper:2182
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=6000
##################### Confluent Metrics Reporter #######################
# Confluent Control Center and Confluent Auto Data Balancer integration
#
# Uncomment the following lines to publish monitoring data for
# Confluent Control Center and Confluent Auto Data Balancer
# If you are using a dedicated metrics cluster, also adjust the settings
# to point to your metrics kakfa cluster.
#metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter
#confluent.metrics.reporter.bootstrap.servers=localhost:9092
#
# Uncomment the following line if the metrics cluster has a single broker
#confluent.metrics.reporter.topic.replicas=1
##################### Confluent Proactive Support ######################
# If set to true, and confluent-support-metrics package is installed
# then the feature to collect and report support metrics
# ("Metrics") is enabled. If set to false, the feature is disabled.
#
#confluent.support.metrics.enable=false
# The customer ID under which support metrics will be collected and
# reported.
#
# When the customer ID is set to "anonymous" (the default), then only a
# reduced set of metrics is being collected and reported.
#
# Confluent customers
# -------------------
# If you are a Confluent customer, then you should replace the default
# value with your actual Confluent customer ID. Doing so will ensure
# that additional support metrics will be collected and reported.
#
#confluent.support.customer.id=anonymous
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0
# TLS Configuration
#ssl.truststore.location=/var/lib/secret/truststore.jks
#ssl.truststore.password=test1234
#ssl.keystore.location=/var/lib/secret/server.keystore.jks
#ssl.keystore.password=test1234
#ssl.client.auth=required
#authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
#super.users=User:CN=kafka.confluent.local,L=London,O=Confluent,C=UK;User:CN=schema-registry.confluent.local,L=London,O=Confluent,C=UK
================================================
FILE: apache-kafka-with-zk3.5-and-tls/up
================================================
#!/usr/bin/env bash
set -e
function gencert() {
if [ -a $1.jks ];
then
echo "The keystore $1.jks already exists";
exit;
fi
echo "Creating keystore $1.jks with a certificate and a key-pair for CN $1"
keytool -keystore $1.jks -alias $1 -validity $VALIDITY -genkey -storepass $PASSWORD -keypass $PASSWORD -dname "CN=$1,OU=kafka,O=confluent,L=MS,ST=Berlin,C=DE"
echo "Creating a Certificate-Signing-Request for the generated certificate"
keytool -keystore $1.jks -alias $1 -certreq -file cert-file -storepass $PASSWORD
echo "Signing the Certificate-Signing-Request and adding an additional DNS-entry for localhost"
openssl x509 -req -CA ca-cert -CAkey ca-key -in cert-file -out cert-signed -days $VALIDITY -CAcreateserial -passin pass:$PASSWORD -extensions SAN -extfile <(printf "\n[SAN]\nsubjectAltName=DNS:$1,DNS:localhost")
echo "Importing the root-certificate for the CA into the keystore $1.jks"
keytool -keystore $1.jks -alias CARoot -import -file ca-cert -storepass $PASSWORD -noprompt
echo "Importing the signed certificate for CN $1 into the keystore $1.jks"
keytool -keystore $1.jks -alias $1 -import -file cert-signed -storepass $PASSWORD
echo "Removing obsolet files..."
rm -v cert-file cert-signed
}
function gentruststore() {
if [ -a ca-cert ];
then
echo "The root-certificate for the CA already exists...";
else
echo "Creating a x509-certificate for the CA...";
openssl req -new -x509 -subj "/C=DE/ST=Berlin/L=MS/O=confluent/OU=kafka/CN=Root-CA" -keyout ca-key -out ca-cert -days $VALIDITY -passout pass:$PASSWORD
fi
#
if [ -a truststore.jks ];
then
echo "The keystore truststore.jks already exists!";
else
echo "Importing the root-certificate of the CA into truststore.jks..."
keytool -keystore truststore.jks -storepass $PASSWORD -alias CARoot -import -file ca-cert -noprompt
fi
}
rm -rf certs
rm -rf tmp-dir
mkdir tmp-dir
mkdir -p certs/kafka-stores
mkdir -p certs/zk-stores
VALIDITY=365
PASSWORD=confluent
(cd tmp-dir; gentruststore)
hosts=( "zookeeper" "client" "kafka")
for host in "${hosts[@]}"
do
(cd tmp-dir; gencert $host )
done
cp tmp-dir/truststore.jks certs/kafka-stores
cp tmp-dir/truststore.jks certs/zk-stores
cp tmp-dir/zookeeper.jks certs/zk-stores
cp tmp-dir/kafka.jks certs/kafka-stores
# Starting docker-compose services
docker-compose up -d --build
echo "Example configuration to access kafka:"
echo "-> docker-compose exec kafka kafka-topics.sh --bootstrap-server kafka:9092 --create --topic foo --partitions 1 --replication-factor 1"
echo "-> docker-compose exec kafka kafka-console-producer.sh --broker-list kafka:9092 --topic foo"
echo "-> docker-compose exec kafka kafka-console-consumer.sh --bootstrap-server kafka:9092 --topic foo --from-beginning"
================================================
FILE: apache-kafka-with-zk3.5-and-tls/zookeeper/Dockerfile
================================================
FROM purbon/zookeeper:3.5.5
MAINTAINER pere.urbon@gmail.com
ENV container docker
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
# 3. Configure zookeeper
COPY zoo.cfg "${ZK_HOME}/conf/zoo.cfg"
# 4. Add extra utility scripts
ENV PATH="/opt/tlsZkCli.sh:${PATH}"
COPY tlsZkCli.sh /opt/tlsZkCli.sh
EXPOSE 2182
CMD zkServer.sh start-foreground
================================================
FILE: apache-kafka-with-zk3.5-and-tls/zookeeper/tlsZkCli.sh
================================================
##!/usr/bin/env bash
export CLIENT_JVMFLAGS="-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true
-Dzookeeper.ssl.keyStore.location=/var/lib/secret/zookeeper.jks
-Dzookeeper.ssl.keyStore.password=confluent
-Dzookeeper.ssl.trustStore.location=/var/lib/secret/truststore.jks
-Dzookeeper.ssl.trustStore.password=confluent"
zkCli.sh -server $1
================================================
FILE: apache-kafka-with-zk3.5-and-tls/zookeeper/zoo.cfg
================================================
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
#clientPort=2181
secureClientPort=2182
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
authProvider.1=org.apache.zookeeper.server.auth.X509AuthenticationProvider
ssl.trustStore.location=/var/lib/secret/truststore.jks
ssl.trustStore.password=confluent
ssl.keyStore.location=/var/lib/secret/zookeeper.jks
ssl.keyStore.password=confluent
# This option is commented out only as an example of what is possible for the
# SSL authentication. In a production environment this should be set as
# here, with ssl.clientAuth=need
#ssl.clientAuth=need
================================================
FILE: auditlog/README.md
================================================
# Kafka Audit Log
This playbook add an example of using the confluent audit log trail.
The present example works with SASL/SCRAM but this example can be extended to other authentication methods such as RBAC, other SASL flavours or TLS.
## Playbook.
1.- start all the components running the _./up_ script.
```bash
./up
Creating zookeeper ... done
Creating kafka ... done
Completed updating config for entity: user-principal 'kafka'.
Completed updating config for entity: user-principal 'consumer'.
Completed updating config for entity: user-principal 'producer'.
[2020-05-12 12:20:50,405] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Adding ACLs for resource `ResourcePattern(resourceType=TOPIC, name=*, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:producer, host=*, operation=WRITE, permissionType=ALLOW)
(principal=User:producer, host=*, operation=CREATE, permissionType=ALLOW)
[2020-05-12 12:20:51,026] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Current ACLs for resource `ResourcePattern(resourceType=TOPIC, name=*, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:producer, host=*, operation=CREATE, permissionType=ALLOW)
(principal=User:producer, host=*, operation=WRITE, permissionType=ALLOW)
[2020-05-12 12:20:53,986] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Adding ACLs for resource `ResourcePattern(resourceType=TOPIC, name=*, patternType=LITERAL)`:
(principal=User:consumer, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:consumer, host=*, operation=READ, permissionType=ALLOW)
Adding ACLs for resource `ResourcePattern(resourceType=GROUP, name=*, patternType=LITERAL)`:
(principal=User:consumer, host=*, operation=READ, permissionType=ALLOW)
[2020-05-12 12:20:54,538] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Current ACLs for resource `ResourcePattern(resourceType=TOPIC, name=*, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=CREATE, permissionType=ALLOW)
(principal=User:producer, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:consumer, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:producer, host=*, operation=WRITE, permissionType=ALLOW)
(principal=User:consumer, host=*, operation=READ, permissionType=ALLOW)
Current ACLs for resource `ResourcePattern(resourceType=GROUP, name=*, patternType=LITERAL)`:
(principal=User:consumer, host=*, operation=READ, permissionType=ALLOW)
[2020-05-12 12:20:57,354] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Adding ACLs for resource `ResourcePattern(resourceType=TOPIC, name=confluent-audit-log-events, patternType=PREFIXED)`:
(principal=User:confluent-audit, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:confluent-audit, host=*, operation=WRITE, permissionType=ALLOW)
(principal=User:confluent-audit, host=*, operation=CREATE, permissionType=ALLOW)
[2020-05-12 12:20:57,928] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Current ACLs for resource `ResourcePattern(resourceType=TOPIC, name=confluent-audit-log-events, patternType=PREFIXED)`:
(principal=User:confluent-audit, host=*, operation=WRITE, permissionType=ALLOW)
(principal=User:confluent-audit, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:confluent-audit, host=*, operation=CREATE, permissionType=ALLOW)
Example configuration:
-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9092 --producer.config /etc/kafka/producer-user.properties --topic test
-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 --consumer.config /etc/kafka/consumer-user.properties --topic test --from-beginning
```
2.- Explore the currently created topics.
```
./scripts/describe-topics.sh
[2020-05-12 12:21:55,868] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Topic: _confluent-license PartitionCount: 1 ReplicationFactor: 1 Configs: min.insync.replicas=1,cleanup.policy=compact
Topic: _confluent-license Partition: 0 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: __confluent.support.metrics PartitionCount: 1 ReplicationFactor: 1 Configs: retention.ms=31536000000
Topic: __confluent.support.metrics Partition: 0 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events PartitionCount: 12 ReplicationFactor: 1 Configs: retention.ms=7776000000,message.timestamp.type=CreateTime,retention.bytes=-1,segment.ms=14400000
Topic: confluent-audit-log-events Partition: 0 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 1 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 2 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 3 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 4 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 5 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 6 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 7 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 8 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 9 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 10 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 11 Leader: 1 Replicas: 1 Isr: 1 Offline:
```
3.- Explore the audit log topics
```
./scripts/explore-audit-topic.sh
```
empty at the beginning.
Keep this open and it will start showing the generated events as we're issuing them.
4.- Create some topics and acls.
```
./scripts/create-topics.sh
Create topic foo with User:kafka
NOTE: this topic creation will be ignored because uses a user inside the ignore list.
Created topic foo.
Create topic bar with User:producer
NOTE: This action will be noted in the audit log.
Created topic bar.
Adding ACLs for resource `ResourcePattern(resourceType=TOPIC, name=bar, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=ALTER_CONFIGS, permissionType=ALLOW)
Current ACLs for resource `ResourcePattern(resourceType=TOPIC, name=bar, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=ALTER_CONFIGS, permissionType=ALLOW)
Adding ACLs for resource `ResourcePattern(resourceType=TOPIC, name=bar, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=DELETE, permissionType=ALLOW)
Current ACLs for resource `ResourcePattern(resourceType=TOPIC, name=bar, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=ALTER_CONFIGS, permissionType=ALLOW)
(principal=User:producer, host=*, operation=DELETE, permissionType=ALLOW)
Change of a configuration
NOTE: This action will be noted in the audit log.
Completed updating config for topic bar.
```
Now the audit log topic should reflect the information about the generated actions.
```
./scripts/explore-audit-topic.sh
{"data":{"serviceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","methodName":"kafka.CreateTopics","resourceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","authenticationInfo":{"principal":"User:producer"},"authorizationInfo":{"granted":true,"operation":"Create","resourceType":"Topic","resourceName":"bar","patternType":"LITERAL","aclAuthorization":{"permissionType":"ALLOW","host":"*"}},"request":{"correlation_id":"4","client_id":"adminclient-1"},"requestMetadata":{"client_address":"/172.27.0.3"}},"id":"b17cc9c7-96f0-413a-b94c-124e21834a55","source":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","specversion":"0.3","type":"io.confluent.kafka.server/authorization","time":"2020-05-12T12:24:37.838Z","datacontenttype":"application/json","subject":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","confluentRouting":{"route":"confluent-audit-log-events"}}
{"data":{"serviceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","methodName":"kafka.IncrementalAlterConfigs","resourceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","authenticationInfo":{"principal":"User:producer"},"authorizationInfo":{"granted":true,"operation":"AlterConfigs","resourceType":"Topic","resourceName":"bar","patternType":"LITERAL","aclAuthorization":{"permissionType":"ALLOW","host":"*"}},"request":{"correlation_id":"4","client_id":"adminclient-1"},"requestMetadata":{"client_address":"/172.27.0.3"}},"id":"93e94659-8b45-4f44-b691-f284192ebe42","source":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","specversion":"0.3","type":"io.confluent.kafka.server/authorization","time":"2020-05-12T12:24:47.700Z","datacontenttype":"application/json","subject":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","confluentRouting":{"route":"confluent-audit-log-events"}}
```
5.- Write some messages
```
./scripts/write-msg.sh bar
Write messages to topic bar
```
More messages coming into the audit log.
```
{"data":{"serviceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","methodName":"kafka.Produce","resourceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","authenticationInfo":{"principal":"User:producer"},"authorizationInfo":{"granted":true,"operation":"Write","resourceType":"Topic","resourceName":"bar","patternType":"LITERAL","aclAuthorization":{"permissionType":"ALLOW","host":"*"}},"request":{"correlation_id":"6","client_id":"rdkafka"},"requestMetadata":{"client_address":"/172.27.0.4"}},"id":"7789d492-df1c-404a-b494-3dc44fb01b24","source":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","specversion":"0.3","type":"io.confluent.kafka.server/authorization","time":"2020-05-12T12:26:49.353Z","datacontenttype":"application/json","subject":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","confluentRouting":{"route":"confluent-audit-log-events"}}
```
6.- Delete of messages
```
./scripts/delete-records.sh
Executing records delete operation
Records delete operation completed:
partition: bar-0 low_watermark: 3
```
new messages in the audit trail.
```
{"data":{"serviceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","methodName":"kafka.DeleteRecords","resourceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","authenticationInfo":{"principal":"User:producer"},"authorizationInfo":{"granted":true,"operation":"Delete","resourceType":"Topic","resourceName":"bar","patternType":"LITERAL","aclAuthorization":{"permissionType":"ALLOW","host":"*"}},"request":{"correlation_id":"4","client_id":"adminclient-1"},"requestMetadata":{"client_address":"/172.27.0.3"}},"id":"f6664ede-fbd4-4425-873a-d31df5eb0b7f","source":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","specversion":"0.3","type":"io.confluent.kafka.server/authorization","time":"2020-05-12T12:27:34.425Z","datacontenttype":"application/json","subject":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","confluentRouting":{"route":"confluent-audit-log-events"}}
```
## More information
This is only a summary and playbook of this functionality, more intel can be found in the reference documentation.
1.- https://docs.confluent.io/current/security/audit-logs.html
================================================
FILE: auditlog/config/delete-records.json
================================================
{
"partitions": [
{
"topic": "bar",
"partition": 0,
"offset": 3
}
],
"version": 1
}
================================================
FILE: auditlog/data/my_msgs.txt
================================================
This is a message
This is another message
Abracadabra
================================================
FILE: auditlog/docker-compose.yml
================================================
version: "2"
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.5.0
hostname: zookeeper
container_name: zookeeper
volumes:
- ./zookeeper:/etc/kafka
ports:
- 2181:2181
environment:
ZOOKEEPER_CLIENT_PORT: 2181
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/zookeeper.sasl.jaas.config
-Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
kafka:
image: confluentinc/cp-server:5.5.0
container_name: kafka
hostname: kafka
depends_on:
- zookeeper
volumes:
- ./kafka:/etc/kafka
- ./config:/tmp/config
ports:
- 9092:9092
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_ADVERTISED_LISTENERS: "SASL_PLAINTEXT://kafka:9092"
KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1"
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: "1"
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: "1"
KAFKA_SASL_ENABLED_MECHANISMS: "SCRAM-SHA-256"
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: "SCRAM-SHA-256"
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: "SASL_PLAINTEXT"
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "false"
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/kafka.sasl.jaas.config"
KAFKA_SUPER_USERS: "User:kafka"
KAFKA_ZOOKEEPER_SET_ACL: "true"
KAFKA_AUTHORIZER_CLASS_NAME: io.confluent.kafka.security.authorizer.ConfluentServerAuthorizer
KAFKA_CONFLUENT_SECURITY_EVENT_ROUTER_CONFIG: "{\"routes\":{\"crn:///kafka=*/group=*\":{\"consume\":{\"allowed\":\"confluent-audit-log-events\",\"denied\":\"confluent-audit-log-events\"}},\"crn:///kafka=*/topic=*\":{\"produce\":{\"allowed\":\"confluent-audit-log-events\",\"denied\":\"confluent-audit-log-events\"},\"consume\":{\"allowed\":\"confluent-audit-log-events\",\"denied\":\"confluent-audit-log-events\"}}},\"destinations\":{\"topics\":{\"confluent-audit-log-events\":{\"retention_ms\":7776000000}}},\"default_topics\":{\"allowed\":\"confluent-audit-log-events\",\"denied\":\"confluent-audit-log-events\"},\"excluded_principals\":[\"User:kafka\",\"User:ANONYMOUS\"]}"
================================================
FILE: auditlog/example-config.json
================================================
{
"routes": {
"crn:///kafka=*/group=*": {
"consume": {
"allowed": "confluent-audit-log-events",
"denied": "confluent-audit-log-events"
}
},
"crn:///kafka=*/topic=*": {
"produce": {
"allowed": "confluent-audit-log-events",
"denied": "confluent-audit-log-events"
},
"consume": {
"allowed": "confluent-audit-log-events",
"denied": "confluent-audit-log-events"
}
}
},
"destinations": {
"topics": {
"confluent-audit-log-events": {
"retention_ms": 7776000000
}
}
},
"default_topics": {
"allowed": "confluent-audit-log-events",
"denied": "confluent-audit-log-events"
},
"excluded_principals": ["User:kafka", "User:ANONYMOUS"]
}
================================================
FILE: auditlog/kafka/consumer-user.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="consumer" \
password="consumerpass";
================================================
FILE: auditlog/kafka/kafka-user.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka";
================================================
FILE: auditlog/kafka/kafka.properties
================================================
broker.id=1
advertised.listeners=SASL_PLAINTEXT://kafka:9092
offsets.topic.replication.factor=1
allow.everyone.if.no.acl.found=false
zookeeper.connect=zookeeper:2181
security.inter.broker.protocol=SASL_PLAINTEXT
authorizer.class.name=io.confluent.kafka.security.authorizer.ConfluentServerAuthorizer
log.dirs=/var/lib/kafka/data
confluent.security.event.router.config={"routes":{"crn:///kafka=*/group=*":{"consume":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"}},"crn:///kafka=*/topic=*":{"produce":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"},"consume":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"}}},"destinations":{"topics":{"confluent-audit-log-events":{"retention_ms":7776000000}}},"default_topics":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"},"excluded_principals":["User:kafka","User:ANONYMOUS"]}
listeners=SASL_PLAINTEXT://0.0.0.0:9092
zookeeper.set.acl=true
super.users=User:kafka
offsets.topic.num.partitions=1
sasl.enabled.mechanisms=SCRAM-SHA-256
transaction.state.log.replication.factor=1
sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256
confluent.license.topic.replication.factor=1
-%}
================================================
FILE: auditlog/kafka/kafka.sasl.jaas.config
================================================
KafkaServer {
org.apache.kafka.common.security.scram.ScramLoginModule required
username="kafka"
password="kafka";
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="password";
};
================================================
FILE: auditlog/kafka/log4j.properties
================================================
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.logger.kafka.authorizer.logger=WARN
log4j.logger.kafka.log.LogCleaner=INFO
log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG
log4j.logger.kafka.controller=TRACE
log4j.logger.kafka.network.RequestChannel$=WARN
log4j.logger.kafka.request.logger=WARN
log4j.logger.state.change.logger=TRACE
log4j.logger.kafka=INFO
================================================
FILE: auditlog/kafka/producer-user.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="producer" \
password="producerpass";
================================================
FILE: auditlog/kafka/tools-log4j.properties
================================================
log4j.rootLogger=WARN, stderr
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stderr.Target=System.err
================================================
FILE: auditlog/scripts/create-topics.sh
================================================
#!/usr/bin/env bash
echo "Create topic foo with User:kafka"
echo "NOTE: this topic creation will be ignored because uses a user inside the ignore list."
echo
docker exec kafka kafka-topics --bootstrap-server kafka:9092 \
--command-config /etc/kafka/kafka-user.properties \
--create --topic foo --replication-factor 1 --partitions 1
sleep 1
echo "Create topic bar with User:producer"
echo "NOTE: This action will be noted in the audit log."
echo
docker exec kafka kafka-topics --bootstrap-server kafka:9092 \
--command-config /etc/kafka/producer-user.properties \
--create --topic bar --replication-factor 1 --partitions 1
## Add extra ACLs need to handle the topic bar
docker exec kafka kafka-acls --bootstrap-server kafka:9092 \
--command-config /etc/kafka/kafka-user.properties \
--add --allow-principal User:producer --operation AlterConfigs \
--topic "bar"
docker exec kafka kafka-acls --bootstrap-server kafka:9092 \
--command-config /etc/kafka/kafka-user.properties \
--add --allow-principal User:producer --operation Delete \
--topic "bar"
sleep 1
echo "Change of a configuration"
echo "NOTE: This action will be noted in the audit log."
echo
docker exec kafka kafka-configs --bootstrap-server kafka:9092 \
--topic bar --add-config retention.ms=2592000001 \
--alter --command-config /etc/kafka/producer-user.properties
================================================
FILE: auditlog/scripts/delete-records.sh
================================================
#!/usr/bin/env bash
docker exec kafka kafka-delete-records --bootstrap-server kafka:9092 \
--command-config /etc/kafka/producer-user.properties \
--offset-json-file /tmp/config/delete-records.json
================================================
FILE: auditlog/scripts/describe-topics.sh
================================================
#!/usr/bin/env bash
docker exec kafka kafka-topics --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --describe
================================================
FILE: auditlog/scripts/explore-audit-topic.sh
================================================
#!/usr/bin/env bash
TOPIC="confluent-audit-log-events"
docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 \
--consumer.config /etc/kafka/kafka-user.properties \
--topic $TOPIC --from-beginning
================================================
FILE: auditlog/scripts/write-msg.sh
================================================
#!/usr/bin/env bash
PWD=`pwd`
topic=$1
network="auditlog_default"
USERNAME=producer
PASSWORD=producerpass
echo "Write messages to topic $1"
docker run --network $network \
--volume $PWD/data/my_msgs.txt:/data/my_msgs.txt \
confluentinc/cp-kafkacat \
kafkacat -b kafka:9092 \
-t $topic \
-X security.protocol=SASL_PLAINTEXT -X sasl.mechanisms=SCRAM-SHA-256 -X sasl.username=$USERNAME -X sasl.password=$PASSWORD \
-P -l /data/my_msgs.txt
================================================
FILE: auditlog/up
================================================
#!/usr/bin/env bash
docker-compose up -d
docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka
docker-compose exec zookeeper kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=consumerpass],SCRAM-SHA-512=[password=consumerpass]' --entity-type users --entity-name consumer
docker-compose exec zookeeper kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=producerpass],SCRAM-SHA-512=[password=producerpass]' --entity-type users --entity-name producer
# ACLs
docker-compose exec kafka kafka-acls --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --add --allow-principal User:producer --producer --topic=*
docker-compose exec kafka kafka-acls --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --add --allow-principal User:consumer --consumer --topic=* --group=*
docker-compose exec kafka kafka-acls --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --add --allow-principal User:confluent-audit --producer --topic confluent-audit-log-events --resource-pattern-type prefixed
echo "Example configuration:"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9092 --producer.config /etc/kafka/producer-user.properties --topic test"
echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 --consumer.config /etc/kafka/consumer-user.properties --topic test --from-beginning"
================================================
FILE: auditlog/zookeeper/log4j.properties
================================================
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
================================================
FILE: auditlog/zookeeper/tools-log4j.properties
================================================
log4j.rootLogger=WARN, stderr
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stderr.Target=System.err
================================================
FILE: auditlog/zookeeper/zookeeper.properties
================================================
dataDir=/var/lib/zookeeper/data
dataLogDir=/var/lib/zookeeper/log
clientPort=2181
================================================
FILE: auditlog/zookeeper/zookeeper.sasl.jaas.config
================================================
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_admin="password";
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="password";
};
================================================
FILE: ca-builder-scripts/.gitignore
================================================
tmp-certs/
stores
legacy/
## remove from git the generated CA files
ca/
================================================
FILE: ca-builder-scripts/README.md
================================================
# Building a CA with OpenSSL
This is a collection is scripts useful to generated a local CA setup. While the PKI could be set in different ways, for this example
we generate:
* A root CA identity, for example for your mother company.
* An intermediate CA identity, for example the one generated for your department or smaller company.
* And then the client certificates.
All the scripts are wrapping openssl to generate the required structures.
## Commands
A collection of scripts are provided to generate any of the required steps to build this CA.
*IMPORTANT:* This scripts set a default password for the CA certs, this password is: __confluent__ . You should change it.
### Building the root CA
To construct the root CA setup, you can run the script:
```bash
./utils/build-ca.sh
```
this script will generate the default directory structure for the CA, including the root certificate for your authority. After the execution you should see a directory structure like this:
```bash
➜ ca-builder-scripts git:(ca-builder-scripts) ✗ ls -la ca
total 48
drwxr-xr-x 13 pere staff 416 3 May 16:33 .
drwxr-xr-x 16 pere staff 512 3 May 16:32 ..
drwxr-xr-x 3 pere staff 96 3 May 16:33 certs
drwxr-xr-x 2 pere staff 64 3 May 16:32 crl
-rw-r--r-- 1 pere staff 97 3 May 16:33 index.txt
-rw-r--r-- 1 pere staff 21 3 May 16:33 index.txt.attr
drwxr-xr-x 3 pere staff 96 3 May 16:33 newcerts
-rw-r--r-- 1 pere staff 4117 3 May 16:32 openssl.cnf
drwx------ 3 pere staff 96 3 May 16:32 private
-rw-r--r-- 1 pere staff 5 3 May 16:33 serial
```
*NOTE*: This script sets a default password for the root certificate, change it if you require to have another one.
### Building the intermediate CA.
Once the main CA structure is created, you need to create the intermediate CA, for this you can use this script:
```bash
./utils/build-intermediate-ca.sh
```
Once the script is run, you should see a directory structure like this:
```bash
➜ ca-builder-scripts git:(ca-builder-scripts) ✗ ls -la ca/intermediate
total 80
drwxr-xr-x 16 pere staff 512 3 May 17:22 .
drwxr-xr-x 13 pere staff 416 3 May 16:33 ..
drwxr-xr-x 5 pere staff 160 3 May 16:34 certs
drwxr-xr-x 3 pere staff 96 3 May 17:21 crl
-rw-r--r-- 1 pere staff 5 3 May 17:22 crlnumber
drwxr-xr-x 4 pere staff 128 3 May 16:34 csr
-rw-r--r-- 1 pere staff 109 3 May 17:20 index.txt
-rw-r--r-- 1 pere staff 21 3 May 17:20 index.txt.attr
drwxr-xr-x 3 pere staff 96 3 May 16:34 newcerts
-rw-r--r-- 1 pere staff 4328 3 May 16:33 openssl.cnf
drwx------ 4 pere staff 128 3 May 16:33 private
-rw-r--r-- 1 pere staff 5 3 May 16:34 serial
```
*NOTE*: This script sets a default password for the certificate, change it if you require to have another one.
### Generating an end user certificate
Once the full CA is setup, next step is to generate end user certificates, to do this you can use a command that look like:
```bash
./create-pair-certs.sh kafka.confluent.local server_cert
```
where the first parameter is the certificate name and the second is the extension being used. For this CA we support server_certs and usr_cert. See the [configs/](configs/) directory for details of the configuration.
### revoke certs
A common process in any CA is to revoke certificates, in with this scripts you can do it like this:
```bash
./revoke-cert.sh kafka.confluent.local
```
this command will revoke a certificate with the name _kakfa.confluent.local_.
Once this command is run, you should an update in the intermediate CA text db like this:
```bash
➜ ca-builder-scripts git:(ca-builder-scripts) ✗ cat ca/intermediate/index.txt
R 200512143408Z 190503152037Z 1000 unknown /C=DE/ST=Berlin/L=Berlin/O=Confluent Ltd/CN=kafka.confluent.local
```
this means this cert is revoked, so no longer valid
## create certificate revocation lists
To revoke a cert is nice, but you need to announce this to the world, for this you need to create a list of revoked certificates. This you can do using this script:
```bash
./create-crl.sh
```
Once this is run, there will be a new file being created under
```bash
➜ ca-builder-scripts git:(ca-builder-scripts) ✗ ls ca/intermediate/crl
intermediate.crl.pem
```
that will contain the list of revoked certs, this can be used then as part of your distribution points list, to inform clients of the CA which identities are being revoked.
## Common errors
> error 20 at 0 depth lookup:unable to get local issuer certificate
could not find the original file, paths to cerfiticates CA is wrong.
> TXT_DB error number 2 failed to update database
Because you have generated your own self signed certificate with the same CN (Common Name) information that the CA certificate that you’ve generated before.
Enter another Common Name.
================================================
FILE: ca-builder-scripts/build-a-batch-of-certs.sh
================================================
#!/usr/bin/env bash
input=$1
while IFS= read -r line
do
fields=($(echo $line | tr "," "\n"))
#./support-scripts/create-cert.sh ${fields[0]} ${fields[1]}
echo "./support-scripts/create-cert.sh ${fields[0]} ${fields[1]}"
done < "$input"
================================================
FILE: ca-builder-scripts/build-a-batch-of-stores.sh
================================================
#!/usr/bin/env bash
DEFAULT_PASSWORD=${2:-confluent}
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
CA_CERT="$CA_ROOT_DIR/ca/certs/ca.cert.pem"
CA_KEY="$CA_ROOT_DIR/ca/private/ca.key.pem"
INT_CA_CERT="$CA_ROOT_DIR/ca/intermediate/certs/intermediate.cert.pem"
INT_CA_KEY="$CA_ROOT_DIR/ca/intermediate/private/intermediate.key.pem"
function build_store {
cert_name=$1
store_type=$2
CERT_PATH="$CA_ROOT_DIR/ca/intermediate/certs/$cert_name.cert.pem"
KEY_PATH="$CA_ROOT_DIR/ca/intermediate/private/$cert_name.key.pem"
openssl pkcs12 -export -in $CERT_PATH -inkey $KEY_PATH -passin pass:$DEFAULT_PASSWORD -passout pass:$DEFAULT_PASSWORD -name $cert_name -out tmp-certs/$cert_name.p12
sleep 1
## build keystore and truststores
keytool -noprompt -importkeystore -deststorepass $DEFAULT_PASSWORD -destkeystore stores/$store_type.keystore -srckeystore tmp-certs/$cert_name.p12 -srcstorepass $DEFAULT_PASSWORD -storepass $DEFAULT_PASSWORD -keypass $DEFAULT_PASSWORD -srcstoretype PKCS12 -deststoretype pkcs12
openssl pkcs12 -export -in $CA_CERT -inkey $CA_KEY -passin pass:$DEFAULT_PASSWORD -passout pass:$DEFAULT_PASSWORD -name 'ca' -out tmp-certs/ca.p12
sleep 1
openssl pkcs12 -export -in $INT_CA_CERT -inkey $INT_CA_KEY -passin pass:$DEFAULT_PASSWORD -passout pass:$DEFAULT_PASSWORD -name 'intermediate-ca' -out tmp-certs/inter-ca.p12
sleep 1
keytool -noprompt -importkeystore -deststorepass $DEFAULT_PASSWORD -destkeystore stores/$store_type.truststore -srckeystore tmp-certs/ca.p12 -srcstorepass $DEFAULT_PASSWORD -srcstoretype PKCS12 -storepass $DEFAULT_PASSWORD -keypass $DEFAULT_PASSWORD -deststoretype pkcs12
sleep 1
keytool -noprompt -importkeystore -deststorepass $DEFAULT_PASSWORD -destkeystore stores/$store_type.truststore -srckeystore tmp-certs/inter-ca.p12 -srcstorepass $DEFAULT_PASSWORD -srcstoretype PKCS12 -storepass $DEFAULT_PASSWORD -keypass $DEFAULT_PASSWORD -deststoretype pkcs12
}
## buildind stores for the brokers
mkdir -p stores tmp-certs
CONFIG_FILE=$1
while read data; do
fields=($(echo $data | tr "," "\n"))
echo "Building a store for ${fields[0]} and ${fields[1]}"
build_store "${fields[0]}" "${fields[1]}"
done <$CONFIG_FILE
rm -rf temp-certs
================================================
FILE: ca-builder-scripts/configs/batch-of-certs.txt
================================================
consumer,machine0.example.com
producer,machine1.example.com
kafka,machine2.example.com
zookeeper,machine3.example.com
================================================
FILE: ca-builder-scripts/configs/batch-of-stores.txt
================================================
consumer,machine0.example.com
producer,machine1.example.com
kafka,machine2.example.com
zookeeper,machine3.example.com
================================================
FILE: ca-builder-scripts/configs/ca-config-vars
================================================
DE
Berlin
Berlin
Confluent Germany
================================================
FILE: ca-builder-scripts/configs/ca.config
================================================
[ ca ]
default_ca = CA_default
[ CA_default ]
# Directory and file locations.
dir = .
certs = $dir/certs
crl_dir = $dir/crl
new_certs_dir = $dir/newcerts
database = $dir/index.txt
serial = $dir/serial
RANDFILE = $dir/private/.rand
# The root key and root certificate.
private_key = $dir/private/ca.key.pem
certificate = $dir/certs/ca.cert.pem
# For certificate revocation lists.
crlnumber = $dir/crlnumber
crl = $dir/crl/ca.crl.pem
crl_extensions = crl_ext
default_crl_days = 30
# SHA-1 is deprecated, so use SHA-2 instead.
default_md = sha256
name_opt = ca_default
cert_opt = ca_default
default_days = 375
preserve = no
policy = policy_strict
[ policy_strict ]
# The root CA should only sign intermediate certificates that match.
# See the POLICY FORMAT section of `man ca`.
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ policy_loose ]
# Allow the intermediate CA to sign a more diverse range of certificates.
# See the POLICY FORMAT section of the `ca` man page.
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
# Options for the `req` tool (`man req`).
default_bits = 2048
distinguished_name = req_distinguished_name
string_mask = utf8only
# SHA-1 is deprecated, so use SHA-2 instead.
default_md = sha256
# Extension to add when the -x509 option is used.
x509_extensions = v3_ca
[ req_distinguished_name ]
# See <https://en.wikipedia.org/wiki/Certificate_signing_request>.
countryName = Country Name (2 letter code)
stateOrProvinceName = State or Province Name
localityName = Locality Name
0.organizationName = Organization Name
organizationalUnitName = Organizational Unit Name
commonName = Common Name
emailAddress = Email Address
# Optionally, specify some defaults.
countryName_default = DE
stateOrProvinceName_default = Berlin
localityName_default = Berlin
0.organizationName_default = Confluent Ltd
#organizationalUnitName_default =
#emailAddress_default =
[ v3_ca ]
# Extensions for a typical CA (`man x509v3_config`).
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
basicConstraints = critical, CA:true
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
[ v3_intermediate_ca ]
# Extensions for a typical intermediate CA (`man x509v3_config`).
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
basicConstraints = critical, CA:true, pathlen:0
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
[ usr_cert ]
# Extensions for client certificates (`man x509v3_config`).
basicConstraints = CA:FALSE
nsCertType = client, email
nsComment = "OpenSSL Generated Client Certificate"
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth, emailProtection
[ server_cert ]
# Extensions for server certificates (`man x509v3_config`).
basicConstraints = CA:FALSE
nsCertType = server
nsComment = "OpenSSL Generated Server Certificate"
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer:always
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
[ crl_ext ]
# Extension for CRLs (`man x509v3_config`).
authorityKeyIdentifier=keyid:always
[ ocsp ]
# Extension for OCSP signing certificates (`man ocsp`).
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
keyUsage = critical, digitalSignature
extendedKeyUsage = critical, OCSPSigning
================================================
FILE: ca-builder-scripts/configs/intermediate-ca.config
================================================
[defaults]
crl_url = http://httpd:80/crls.pem # CRL distribution point
[ ca ]
# `man ca`
default_ca = CA_default
[ CA_default ]
# Directory and file locations.
dir = intermediate/
certs = $dir/certs
crl_dir = $dir/crl
new_certs_dir = $dir/newcerts
database = $dir/index.txt
serial = $dir/serial
RANDFILE = $dir/private/.rand
# The root key and root certificate.
private_key = $dir/private/intermediate.key.pem
certificate = $dir/certs/intermediate.cert.pem
# For certificate revocation lists.
crlnumber = $dir/crlnumber
crl = $dir/crl/intermediate.crl.pem
crl_extensions = crl_ext
default_crl_days = 30
# SHA-1 is deprecated, so use SHA-2 instead.
default_md = sha256
name_opt = ca_default
cert_opt = ca_default
default_days = 375
preserve = no
policy = policy_loose
[ policy_strict ]
# The root CA should only sign intermediate certificates that match.
# See the POLICY FORMAT section of `man ca`.
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ policy_loose ]
# Allow the intermediate CA to sign a more diverse range of certificates.
# See the POLICY FORMAT section of the `ca` man page.
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
# Options for the `req` tool (`man req`).
default_bits = 2048
distinguished_name = req_distinguished_name
string_mask = utf8only
# SHA-1 is deprecated, so use SHA-2 instead.
default_md = sha256
# Extension to add when the -x509 option is used.
x509_extensions = v3_ca
[ req_distinguished_name ]
# See <https://en.wikipedia.org/wiki/Certificate_signing_request>.
countryName = Country Name (2 letter code)
stateOrProvinceName = State or Province Name
localityName = Locality Name
0.organizationName = Organization Name
organizationalUnitName = Organizational Unit Name
commonName = Common Name
emailAddress = Email Address
# Optionally, specify some defaults.
countryName_default = DE
stateOrProvinceName_default = Berlin
localityName_default = Berlin
0.organizationName_default = Confluent Ltd
organizationalUnitName_default =
emailAddress_default =
[ v3_ca ]
# Extensions for a typical CA (`man x509v3_config`).
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
basicConstraints = critical, CA:true
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
[ v3_intermediate_ca ]
# Extensions for a typical intermediate CA (`man x509v3_config`).
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
basicConstraints = critical, CA:true, pathlen:0
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
[ usr_cert ]
# Extensions for client certificates (`man x509v3_config`).
basicConstraints = CA:FALSE
nsCertType = client, email
nsComment = "OpenSSL Generated Client Certificate"
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth, emailProtection
[ server_cert ]
# Extensions for server certificates (`man x509v3_config`).
basicConstraints = CA:FALSE
nsCertType = client, server
nsComment = "OpenSSL Generated Server Certificate"
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer:always
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth, serverAuth
crlDistributionPoints = URI:http://httpd:80/crls.pem
subjectAltName = @alt_names
[ crl_ext ]
# Extension for CRLs (`man x509v3_config`).
authorityKeyIdentifier=keyid:always
[ ocsp ]
# Extension for OCSP signing certificates (`man ocsp`).
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
keyUsage = critical, digitalSignature
extendedKeyUsage = critical, OCSPSigning
================================================
FILE: ca-builder-scripts/create-crl.sh
================================================
#!/usr/bin/env bash
DEFAULT_PASSWORD=${1:-confluent}
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
source $CA_ROOT_DIR/utils/functions.sh
(cd $CA_ROOT_DIR/ca; create_certificate_revokation_list )
================================================
FILE: ca-builder-scripts/create-pair-certs.sh
================================================
#!/usr/bin/env bash
#HOSTNAME="www.example.com"
#EXTENSION="server_cert" #usr_cert for client auth, server_cert for for backend
#HOSTNAME="my.kafka.consumer"
#EXTENSION="usr_cert"
set -e
HOSTNAME=$1
MACHINE=${2:-""}
EXTENSION=${3:-server_cert}
DEFAULT_PASSWORD=${4:-confluent}
echo "Building a part of certificates for $HOSTNAME using $EXTENSION"
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
ITERMEDIATE_CA_DIR=$CA_ROOT_DIR/ca/intermediate
CERT_FILE="$ITERMEDIATE_CA_DIR/certs/$HOSTNAME.cert.pem"
if test -f "$CERT_FILE"; then
RED='\033[0;31m'
NC='\033[0m' # No Color
printf "${RED}Cert $CERT_FILE exist! exiting...${NC}"
exit 1
fi
source $CA_ROOT_DIR/utils/functions.sh
(cd $CA_ROOT_DIR; refresh_openssl_file "$CA_ROOT_DIR" "$ITERMEDIATE_CA_DIR" )
(cd $CA_ROOT_DIR/ca; generate_final_certificate "$MACHINE" )
================================================
FILE: ca-builder-scripts/del-cert.sh
================================================
#!/usr/bin/env bash
NAME=$1
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
echo "Deleting CERT $NAME"
rm "$CA_ROOT_DIR/ca/intermediate/private/$NAME.key.pem"
rm "$CA_ROOT_DIR/ca/intermediate/certs/$NAME.cert.pem"
rm "$CA_ROOT_DIR/ca/intermediate/csr/$NAME.csr.pem"
================================================
FILE: ca-builder-scripts/revoke-cert.sh
================================================
#!/usr/bin/env bash
CERT=$1
DEFAULT_PASSWORD=${2:-confluent}
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
source $CA_ROOT_DIR/utils/functions.sh
(cd $CA_ROOT_DIR/ca; revoke_cert $CERT )
================================================
FILE: ca-builder-scripts/setup-ca-with-intermediate-ca.sh
================================================
#!/usr/bin/env bash
##
# This script builds a Certificate Authority of the form:
# Root CA -> intermediate CA
#
# In the CA_ROOT_DIR, this script will create the necessary directory strucures
# and generate the certificates, all signed using the value provided as an
# argument to this script, or confluent by default.
##
DEFAULT_PASSWORD=${1:-confluent}
export CA_ROOT_DIR=`pwd`
echo -e "Building the CA root setup\n"
./utils/build-ca.sh $DEFAULT_PASSWORD
echo -e "Building the intemedite CA root setup:\n"
./utils/build-intermediate-ca.sh $DEFAULT_PASSWORD
================================================
FILE: ca-builder-scripts/support-scripts/build-ca.sh
================================================
#!/usr/bin/expect
proc slurp {file} {
set fh [open $file r]
set ret [read $fh]
close $fh
return $ret
}
set timeout 20
set configslurp [slurp configs/ca-config-vars]
set lines [split $configslurp \n]
set COUNTRY_NAME [lrange $lines 0 0]
set STATE [lrange $lines 1 1]
set LOCALITY [lrange $lines 2 2]
set ORGANIZATION [lrange $lines 3 3]
eval spawn ./setup-ca-with-intermediate-ca.sh
## Generating the data for the CA setup.
expect "Country Name (2 letter code)"
send "$COUNTRY_NAME\r";
expect "State or Province Name"
send "$STATE\r";
expect "Locality Name"
send "$LOCALITY\r";
expect "Organization Name"
send "$ORGANIZATION\r";
expect "Organizational Unit Name"
send "\r";
expect "Common Name"
send "CA\r";
expect "Email Address"
send "\r";
## Generating the data for the Intermediate setup.
expect "Country Name (2 letter code)"
send "$COUNTRY_NAME\r";
expect "State or Province Name"
send "$STATE\r";
expect "Locality Name"
send "$LOCALITY\r";
expect "Organization Name"
send "$ORGANIZATION\r";
expect "Organizational Unit Name"
send "\r";
expect "Common Name"
send "Intermediate-CA\r";
expect "Email Address"
send "\r";
# Sign the certificate and commit
expect "Sign the certificate?"
send "y\r";
expect "1 out of 1 certificate requests certified, commit?"
send "y\r";
interact
================================================
FILE: ca-builder-scripts/support-scripts/create-cert.sh
================================================
#!/usr/bin/expect -f
proc slurp {file} {
set fh [open $file r]
set ret [read $fh]
close $fh
return $ret
}
proc create_certs {cert_name,machine} {
eval spawn ./create-pair-certs.sh $cert_name $machine
}
set timeout 20
set configslurp [slurp configs/ca-config-vars]
set lines [split $configslurp \n]
set COUNTRY_NAME [lrange $lines 0 0]
set STATE [lrange $lines 1 1]
set LOCALITY [lrange $lines 2 2]
set ORGANIZATION [lrange $lines 3 3]
set cert_name [lindex $argv 0]
set machine [lrange $argv 1 end]
spawn ./create-pair-certs.sh $cert_name $machine
## Generating the data for the CA setup.
expect "Country Name (2 letter code)"
send "$COUNTRY_NAME\r";
expect "State or Province Name"
send "$STATE\r";
expect "Locality Name"
send "$LOCALITY\r";
expect "Organization Name"
send "$ORGANIZATION\r";
expect "Organizational Unit Name"
send "\r";
expect "Common Name"
send "$cert_name\r";
expect "Email Address"
send "\r";
# Sign the certificate and commit
expect "Sign the certificate?"
send "y\r";
expect "1 out of 1 certificate requests certified, commit"
send "y\r";
interact
================================================
FILE: ca-builder-scripts/utils/build-ca.sh
================================================
#!/usr/bin/env bash
DEFAULT_PASSWORD=${1:-confluent}
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
source $CA_ROOT_DIR/utils/functions.sh
mkdir $CA_ROOT_DIR/ca;
setup_ca_dir_structure "$CA_ROOT_DIR/ca"
cp $CA_ROOT_DIR/configs/ca.config $CA_ROOT_DIR/ca/openssl.cnf
(cd $CA_ROOT_DIR/ca; generate_ca_keys_and_certs )
## Verify the CA certificate
openssl x509 -noout -text -in $CA_ROOT_DIR/ca/certs/ca.cert.pem
================================================
FILE: ca-builder-scripts/utils/build-intermediate-ca.sh
================================================
#!/usr/bin/env bash
DEFAULT_PASSWORD=${1:-confluent}
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
ITERMEDIATE_CA_DIR=$CA_ROOT_DIR/ca/intermediate
source $CA_ROOT_DIR/utils/functions.sh
mkdir -p $ITERMEDIATE_CA_DIR
setup_intermediate_ca_dir_structure $ITERMEDIATE_CA_DIR
cp $CA_ROOT_DIR/configs/intermediate-ca.config $ITERMEDIATE_CA_DIR/openssl.cnf
(cd $ITERMEDIATE_CA_DIR; generate_intermediate_keys_and_certs)
(cd $CA_ROOT_DIR/ca; sign_intermediate_cert_authority; verify_generate_intermediate_ca)
(cd $CA_ROOT_DIR/ca; create_ca_chain)
================================================
FILE: ca-builder-scripts/utils/functions.sh
================================================
#!/usr/bin/env bash
generate_ca_keys_and_certs () {
openssl genrsa -aes256 -passout pass:$DEFAULT_PASSWORD -out private/ca.key.pem 4096
chmod 400 private/ca.key.pem
openssl req -config openssl.cnf \
-key private/ca.key.pem \
-new -x509 -days 7300 -sha256 -extensions v3_ca \
-passin pass:$DEFAULT_PASSWORD -passout pass:$DEFAULT_PASSWORD \
-out certs/ca.cert.pem
chmod 444 certs/ca.cert.pem
}
setup_ca_dir_structure() {
mkdir -p $1/certs $1/crl $1/newcerts $1/private
chmod 700 $1/private
touch $1/index.txt
echo 1000 > $1/serial
}
setup_intermediate_ca_dir_structure() {
setup_ca_dir_structure $1
mkdir -p $1/csr
echo 1000 > $1/crlnumber
}
generate_intermediate_keys_and_certs () {
openssl genrsa -aes256 -passout pass:$DEFAULT_PASSWORD -out private/intermediate.key.pem 4096
chmod 400 private/intermediate.key.pem
openssl req -config openssl.cnf -new -sha256 \
-passin pass:$DEFAULT_PASSWORD -passout pass:$DEFAULT_PASSWORD \
-key private/intermediate.key.pem \
-out csr/intermediate.csr.pem
}
sign_intermediate_cert_authority () {
# signature
openssl ca -config openssl.cnf -extensions v3_intermediate_ca \
-days 3650 -notext -md sha256 \
-in intermediate/csr/intermediate.csr.pem \
-passin pass:$DEFAULT_PASSWORD \
-out intermediate/certs/intermediate.cert.pem
chmod 444 intermediate/certs/intermediate.cert.pem
}
verify_generate_intermediate_ca () {
# verification
openssl x509 -noout -text -in intermediate/certs/intermediate.cert.pem
openssl verify -CAfile certs/ca.cert.pem intermediate/certs/intermediate.cert.pem
}
create_ca_chain () {
# create the CA chain
cat intermediate/certs/intermediate.cert.pem certs/ca.cert.pem > intermediate/certs/ca-chain.cert.pem
chmod 444 intermediate/certs/ca-chain.cert.pem
}
refresh_openssl_file() {
ca_root_dir=$1
intermediate_dir=$2
cp $ca_root_dir/configs/intermediate-ca.config $intermediate_dir/openssl.cnf
}
generate_final_certificate () {
alt_name=$1
echo "$DEFAULT_PASSWORD"
# create a private key
openssl genrsa -aes256 -passout pass:$DEFAULT_PASSWORD -out intermediate/private/$HOSTNAME.key.pem 2048
chmod 400 intermediate/private/$HOSTNAME.key.pem
echo -e "" >> intermediate/openssl.cnf
echo -e "[ alt_names ]" >> intermediate/openssl.cnf
echo -e "DNS.1=localhost" >> intermediate/openssl.cnf
echo -e "DNS.2=$alt_name" >> intermediate/openssl.cnf
# create a csr
openssl req -config intermediate/openssl.cnf \
-passin pass:confluent -passout pass:$DEFAULT_PASSWORD \
-key intermediate/private/$HOSTNAME.key.pem \
-new -sha256 -out intermediate/csr/$HOSTNAME.csr.pem
# create the cert
openssl ca -config intermediate/openssl.cnf -extensions $EXTENSION -days 375 -notext -md sha256 \
-in intermediate/csr/$HOSTNAME.csr.pem \
-passin pass:$DEFAULT_PASSWORD \
-out intermediate/certs/$HOSTNAME.cert.pem
chmod 444 intermediate/certs/$HOSTNAME.cert.pem
# verify the cert
openssl x509 -noout -text -in intermediate/certs/$HOSTNAME.cert.pem
# verify the chain trust
openssl verify -CAfile intermediate/certs/ca-chain.cert.pem intermediate/certs/$HOSTNAME.cert.pem
}
create_certificate_revokation_list () {
openssl ca -config intermediate/openssl.cnf -gencrl \
-passin pass:$DEFAULT_PASSWORD \
-out intermediate/crl/intermediate.crl.pem
}
revoke_cert () {
openssl ca -config intermediate/openssl.cnf -passin pass:$DEFAULT_PASSWORD -revoke "intermediate/certs/$1.cert.pem"
}
================================================
FILE: delegation_tokens/.gitignore
================================================
certs/
================================================
FILE: delegation_tokens/ca.cnf
================================================
[ policy_match ]
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
x509_extensions = v3_ca
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName = kafka.confluent.local
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
================================================
FILE: delegation_tokens/client.cnf
================================================
[req]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
req_extensions = v3_req
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName=kafka.confluent.local
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
[ v3_req ]
subjectKeyIdentifier = hash
basicConstraints = CA:FALSE
nsComment = "OpenSSL Generated Certificate"
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
subjectAltName = @alt_names
[ alt_names ]
DNS.1=kafka.confluent.local
================================================
FILE: delegation_tokens/docker-compose.yml
================================================
version: '3'
services:
zookeeper:
build: zookeeper/
container_name: zookeeper
hostname: zookeeper
domainname: confluent.local
restart: on-failure
volumes:
- ./certs/:/var/lib/secret
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf
networks:
default:
aliases:
- zookeeper.confluent.local
kafka:
build: kafka/
container_name: kafka
hostname: kafka
domainname: confluent.local
depends_on:
- zookeeper
restart: on-failure
volumes:
- ./certs/:/var/lib/secret
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
networks:
default:
aliases:
- kafka.confluent.local
ports:
- "9093:9093"
volumes:
secret: {}
networks:
default:
================================================
FILE: delegation_tokens/kafka/Dockerfile
================================================
FROM centos
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-1.8.0-openjdk
RUN yum install -y confluent-platform-2.12
# 3. Configure Kafka
COPY server.properties /etc/kafka/server.properties
COPY kafka_server_jaas.conf /etc/kafka/kafka_server_jaas.conf
COPY consumer.properties /etc/kafka/consumer.properties
COPY create_client_properties.sh /etc/kafka/create_client_properties.sh
EXPOSE 9093
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: delegation_tokens/kafka/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: delegation_tokens/kafka/consumer.properties
================================================
sasl.mechanism=SCRAM-SHA-256
# Configure SASL_SSL if SSL encryption is enabled, otherwise configure SASL_PLAINTEXT
security.protocol=SASL_SSL
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka";
ssl.truststore.location=/var/lib/secret/truststore.jks
ssl.truststore.password=test1234
ssl.keystore.location=/var/lib/secret/client.keystore.jks
ssl.keystore.password=test1234
================================================
FILE: delegation_tokens/kafka/create_client_properties.sh
================================================
#!/bin/bash
set -e
set -u
RESPONSE=$(kafka-delegation-tokens \
--bootstrap-server kafka.confluent.local:9093 \
--create \
--command-config /etc/kafka/consumer.properties \
--max-life-time-period -1 | tail -1)
TOKENID=$(echo $RESPONSE | cut -d " " -f1)
HMAC=$(echo $RESPONSE | cut -d " " -f2)
echo "Received token id: $TOKENID"
echo "Received message authentication code: $HMAC"
echo 'sasl.mechanism=SCRAM-SHA-256
# Configure SASL_SSL if SSL encryption is enabled, otherwise configure SASL_PLAINTEXT
security.protocol=SASL_SSL
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="'$TOKENID'" \
password="'$HMAC'" \
tokenauth="true";
ssl.truststore.location=/var/lib/secret/truststore.jks
ssl.truststore.password=test1234
ssl.keystore.location=/var/lib/secret/client.keystore.jks
ssl.keystore.password=test1234' > /tmp/delegation_token_client.properties
================================================
FILE: delegation_tokens/kafka/kafka_server_jaas.conf
================================================
// Username and password are used by the broker to initiate connections to other brokers
// admin is another user allowed to connect to the broker.
KafkaServer {
org.apache.kafka.common.security.scram.ScramLoginModule required
username="kafka"
password="kafka"
user_admin="admin";
};
// The client section is used by kafka to connect to zookeeper.
// This must match the zookeeper jaas configuration.
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="kafka"
password="kafka";
};
================================================
FILE: delegation_tokens/kafka/server.properties
================================================
############################# Server Basics #############################
broker.id=0
listeners=SASL_SSL://kafka.confluent.local:9093
advertised.listeners=SASL_SSL://kafka.confluent.local:9093
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
zookeeper.connect=zookeeper.confluent.local:2181
# TLS Configuration
security.inter.broker.protocol=SASL_SSL
ssl.truststore.location=/var/lib/secret/truststore.jks
ssl.truststore.password=test1234
ssl.keystore.location=/var/lib/secret/server.keystore.jks
ssl.keystore.password=test1234
ssl.client.auth=required
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
delegation.token.master.key=foo
sasl.enabled.mechanisms=SCRAM-SHA-256
sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256
super.users=User:kafka
================================================
FILE: delegation_tokens/server.cnf
================================================
[req]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
req_extensions = v3_req
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName=kafka.confluent.local
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
[ v3_req ]
subjectKeyIdentifier = hash
basicConstraints = CA:FALSE
nsComment = "OpenSSL Generated Certificate"
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth, clientAuth
subjectAltName = @alt_names
[ alt_names ]
DNS.1=kafka.confluent.local
================================================
FILE: delegation_tokens/up
================================================
#!/bin/sh
set -e
# Creating TLS CA, Certificates and keystore / truststore
rm -rf certs
mkdir -p certs
# Generate CA certificates
openssl req -new -nodes -x509 -days 3650 -newkey rsa:2048 -keyout certs/ca.key -out certs/ca.crt -config ca.cnf
cat certs/ca.crt certs/ca.key > certs/ca.pem
# Generate kafka server certificates
openssl req -new -newkey rsa:2048 -keyout certs/server.key -out certs/server.csr -config server.cnf -nodes
openssl x509 -req -days 3650 -in certs/server.csr -CA certs/ca.crt -CAkey certs/ca.key -CAcreateserial -out certs/server.crt -extfile server.cnf -extensions v3_req
openssl pkcs12 -export -in certs/server.crt -inkey certs/server.key -chain -CAfile certs/ca.pem -name "kafka.confluent.local" -out certs/server.p12 -password pass:test1234
# Generate client certificates
openssl req -new -newkey rsa:2048 -keyout certs/client.key -out certs/client.csr -config client.cnf -nodes
openssl x509 -req -days 3650 -in certs/client.csr -CA certs/ca.crt -CAkey certs/ca.key -CAcreateserial -out certs/client.crt -extfile client.cnf -extensions v3_req
openssl pkcs12 -export -in certs/client.crt -inkey certs/client.key -chain -CAfile certs/ca.pem -name "kafka.confluent.local" -out certs/client.p12 -password pass:test1234
# Import server certificate to keystore and CA to truststore
keytool -importkeystore -deststorepass test1234 -destkeystore certs/server.keystore.jks \
-srckeystore certs/server.p12 \
-deststoretype PKCS12 \
-srcstoretype PKCS12 \
-noprompt \
-srcstorepass test1234
keytool -importkeystore -deststorepass test1234 -destkeystore certs/client.keystore.jks \
-srckeystore certs/client.p12 \
-deststoretype PKCS12 \
-srcstoretype PKCS12 \
-noprompt \
-srcstorepass test1234
keytool -keystore certs/truststore.jks -alias CARoot -import -file certs/ca.crt -storepass test1234 -noprompt -storetype PKCS12
# Starting docker-compose services
docker-compose up -d --build
docker-compose exec kafka kafka-configs \
--zookeeper zookeeper.confluent.local:2181 \
--alter --add-config 'SCRAM-SHA-256=[password=kafka],SCRAM-SHA-512=[password=kafka]' \
--entity-type users --entity-name kafka
docker-compose exec kafka kafka-acls \
--authorizer kafka.security.auth.SimpleAclAuthorizer \
--authorizer-properties zookeeper.connect=zookeeper.confluent.local:2181 \
--add --allow-principal User:kafka --operation All --topic test
docker exec kafka /etc/kafka/create_client_properties.sh
echo "Example configuration to access kafka:"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka.confluent.local:9093 --topic test --producer.config /tmp/delegation_token_client.properties"
echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka.confluent.local:9093 --topic test --consumer.config /tmp/delegation_token_client.properties --from-beginning"
================================================
FILE: delegation_tokens/zookeeper/Dockerfile
================================================
FROM centos
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-1.8.0-openjdk
RUN yum install -y confluent-platform-2.12
# 3. Configure zookeeper
COPY zookeeper.properties /etc/kafka/zookeeper.properties
COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf
EXPOSE 2181
CMD zookeeper-server-start /etc/kafka/zookeeper.properties
================================================
FILE: delegation_tokens/zookeeper/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: delegation_tokens/zookeeper/zookeeper.properties
================================================
dataDir=/var/lib/zookeeper
clientPort=2181
maxClientCnxns=0
authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
================================================
FILE: delegation_tokens/zookeeper/zookeeper.sasl.jaas.config
================================================
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_kafka="kafka";
};
================================================
FILE: kafka-connect-mtls/.gitignore
================================================
connect/secrets/client-*.pem
connect/secrets/client.p12
================================================
FILE: kafka-connect-mtls/README.md
================================================
# Kafka Connect REST api ssl client auth
One of the common question regarding security on Kafka Connect REST api is how to prevent unwanted access.
This playbook show one of the possible methods currently possible (as of November 2019) using the SSL mTLS feature.
## Requirements
To be able to execute this playbook you require:
* Docker (19.03 or later)
* Docker compose (1.24.1 or later)
* curl
## Bootstrap the playbook
The playbook bootstrap can be done by executing the ```./up``` script.
### Prepared TLS certificates and keystores
A set of prepared TLS certificates and keystores are available within the _connect/secrets_ directory.
Most relevant ones are:
* _certificate.p12_: TLS certificate to verify the failure of mTLS (this is a self sign certificate)
* _rest-client.p12_: TLS certificate to verify the positive verification using mTLS (this cert is sign by the same CA as the server identity)
* _server.keystore_ and _server.truststore_: keystores prepared for the Kafka Connect REST server identity.
All this certs has been created with the ca-builder-scripts.
## Verify the connectivity
To verify the connectivity there is a prepared script ```check-ssl-client-auth.sh```.
This script uses curl to verify a success and a failure authentication using mTLS
================================================
FILE: kafka-connect-mtls/check-ssl-client-auth.sh
================================================
#!/usr/bin/env bash
verify_ok_ssl_client_auth () {
cp -f ../ca-builder-scripts/ca/intermediate/private/$1.key.pem connect/secrets/$1.key.pem
cp -f ../ca-builder-scripts/ca/intermediate/certs/ca-chain.cert.pem connect/secrets/ca-chain.cert.pem
cp -f ../ca-builder-scripts/ca/intermediate/certs/$1.cert.pem connect/secrets/$1.cert.pem
curl --key connect/secrets/$1.key.pem --cacert connect/secrets/ca-chain.cert.pem --cert connect/secrets/$1.cert.pem:confluent https://localhost:18083
}
verify_ko_ssl_client_auth() {
mkdir connect/certs
openssl req -new -nodes -x509 -days 3650 -newkey rsa:2048 -keyout connect/certs/ca.key -out connect/certs/ca.crt -config connect/config/ca.cnf
cat connect/certs/ca.crt connect/certs/ca.key > connect/certs/ca.pem
openssl req -new -newkey rsa:2048 -keyout connect/certs/client.key -out connect/certs/client.csr -config connect/config/client.cnf -nodes
openssl x509 -req -days 3650 -in connect/certs/client.csr -CA connect/certs/ca.crt -CAkey connect/certs/ca.key -CAcreateserial -out connect/certs/client.crt -extfile connect/config/client.cnf -extensions v3_req
openssl pkcs12 -export -in connect/certs/client.crt -inkey connect/certs/client.key -chain -CAfile connect/certs/ca.pem -name "connect" -out connect/certs/client.p12 -password pass:confluent
cp connect/certs/client.p12 connect/secrets/client.p12
rm -rf connect/certs
openssl pkcs12 -in connect/secrets/client.p12 -out connect/secrets/client-ca.pem -cacerts -nokeys -passin pass:confluent -passout pass:confluent
openssl pkcs12 -in connect/secrets/client.p12 -out connect/secrets/client-client.pem -clcerts -nokeys -passin pass:confluent -passout pass:confluent
openssl pkcs12 -in connect/secrets/client.p12 -out connect/secrets/client-key.pem -nocerts -passin pass:confluent -passout pass:confluent
curl --insecure --key connect/secrets/client-key.pem --cacert connect/secrets/client-ca.pem --cert connect/secrets/client-client.pem:confluent https://localhost:18083
}
echo "Check SSL client auth with an unknown certificate"
verify_ko_ssl_client_auth
echo ""
echo ""
echo "Check SSL client auth with a valid client"
verify_ok_ssl_client_auth "connect"
================================================
FILE: kafka-connect-mtls/connect/config/ca.cnf
================================================
[ policy_match ]
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
x509_extensions = v3_ca
[ dn ]
countryName = DE
organizationName = Confluent
localityName = Berlin
commonName = connect.confluent.local
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
================================================
FILE: kafka-connect-mtls/connect/config/client.cnf
================================================
[req]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
req_extensions = v3_req
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName=connect.client
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
[ v3_req ]
subjectKeyIdentifier = hash
basicConstraints = CA:FALSE
nsComment = "OpenSSL Generated Certificate"
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
subjectAltName = @alt_names
[ alt_names ]
DNS.1=localhost
================================================
FILE: kafka-connect-mtls/connect/secrets/ca-chain.cert.pem
================================================
-----BEGIN CERTIFICATE-----
MIIF4TCCA8mgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgYExCzAJBgNVBAYTAkRF
MQ8wDQYDVQQIDAZCZXJsaW4xDzANBgNVBAcMBkJlcmxpbjEWMBQGA1UECgwNQ29u
Zmx1ZW50IEx0ZDELMAkGA1UECwwCUFMxCzAJBgNVBAMMAmNhMR4wHAYJKoZIhvcN
AQkBFg9jYUBjb25mbHVlbnQuaW8wHhcNMTkxMTEyMTExMTMxWhcNMjkxMTA5MTEx
MTMxWjB9MQswCQYDVQQGEwJERTEPMA0GA1UECAwGQmVybGluMRYwFAYDVQQKDA1D
b25mbHVlbnQgTHRkMQswCQYDVQQLDAJQUzEYMBYGA1UEAwwPSW50ZXJtZWRpYXRl
LUNBMR4wHAYJKoZIhvcNAQkBFg9jYUBjb25mbHVlbnQuaW8wggIiMA0GCSqGSIb3
DQEBAQUAA4ICDwAwggIKAoICAQDMTnQGumpx9Byuq+z1APGFFp3lYDIY8rbScRkk
dXQ1tukad9aOtQtGWfShOeQ7wndKvdYzcPN+AnGoszjd/gVeRDiXo0rEMGYgyglW
vC1C88LWy/Wg26mgGTkMpeUfgN25lpfOUlzh0bVtPfcw154DXd5HZiHOiJ6CXytx
bf+8M99SkQ0+X6MH2EBXMbOJBGGzZycMON7ONhdGhBrmClMHPMomMnprdL/W2TKa
6SGXpxh3lLGYREqBSUlP6Owt0SMf/V5enbNrtllzmliBZbiUraGLohKDz8c8rcCX
zz3zoBGkEGAJEFbXqoMa7OcVb2InWJTkfN2E4Z2akU6+9u6pvQ+q2e3KxHR42ure
P4ilI6wzJ43UaPyFPzFpRd16akvIEDkdACU1FmlajW69Haye7Ey1HoI2tiJHDPAN
JJbWUl59FS2DeFt7KvlBHdy81Z3D/G9QjSkOGCItabQLF8Jum2d5qMdekb3X8fTi
2SPeDcgPyRat+6gKqsINzM6ChnXA0CFMbSGN6XpxARcxd95HR9zIStGdVKcPtnaN
Q8o3/ehTNC8DVuT2LLlSwdAAElgZ/EHtfW7rrOBDgIMV8946m5r2tmf/K/A1vEUU
4+QD66IX4nleK+OGuUexF1xv2UTzGhmk/VJsvx3AjPQgQLGqT2aRm6I6NEeWK/CY
jB597QIDAQABo2YwZDAdBgNVHQ4EFgQUJscsGzI7qLGpXsGdjRsvE6NXG0EwHwYD
VR0jBBgwFoAU1K1JidhNRLmWIeRKobCfr8QhHQUwEgYDVR0TAQH/BAgwBgEB/wIB
ADAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBAMCpI/ojEZc28fYF
VbM9XYfPY6M8WRU39SSPZO5dknVmP16r6TInD2ZECfW/5XkqXPtvj3wcvXJ4/tLN
iS0DIzXbUlsO9sedDSCcAq251c2MIfROKj3E93X66PAVSn1SWVRgqlyD62G3Nv2E
Z+V6vnE6oEXdrIucM4dUdvoz9Qnr0pZCchMYyFVdydAfvHu7wJ8PqVGgNCSgStuI
aQQUiKLFSnWOr+EJ8rZ8eGpgCWbeQ34ww8SlyjEPbkXAblsv0rH/R17GITIkFa69
SHjOChvpOWcvZ0SLwisDShVBlRcO2ypwfo5avQCYBmj27D2U7htUmn+QwhntOSAB
sAGUGeeoBgCKI7uNBuP3lXeaSXNWqsNXm7wSqsNJAPGVQzgA7kM1yjzdawCuAEzR
JdFp0Y5juUaZ1FL6xnpvzSpBYBBs2o9CwtuBkBAnuG88AUwC7JR4URb0qb7/DbS0
efbFPvUDohMAfYgUPXu1FemIuRicN99JoQLArEutKteGw4tX6W5oGckb6iBeQMe/
C+Aw+RLEOyVwbfeF6lVfn4iZfn8A0SCqYRD+vStgKrb7LxPPl8/vTpgoiDH3qulD
mV1vqeT3ESFvEnEXVd/QozDTn/AZsna3C2cp902GPEH3vV+h3ECRDZoaxuSdSzlg
0d/kGEPjjbgBDD5IJ6mNgsiIBfiX
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIF6jCCA9KgAwIBAgIJAIopqcNHix7tMA0GCSqGSIb3DQEBCwUAMIGBMQswCQYD
VQQGEwJERTEPMA0GA1UECAwGQmVybGluMQ8wDQYDVQQHDAZCZXJsaW4xFjAUBgNV
BAoMDUNvbmZsdWVudCBMdGQxCzAJBgNVBAsMAlBTMQswCQYDVQQDDAJjYTEeMBwG
CSqGSIb3DQEJARYPY2FAY29uZmx1ZW50LmlvMB4XDTE5MTExMjExMTA1OVoXDTM5
MTEwNzExMTA1OVowgYExCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZCZXJsaW4xDzAN
BgNVBAcMBkJlcmxpbjEWMBQGA1UECgwNQ29uZmx1ZW50IEx0ZDELMAkGA1UECwwC
UFMxCzAJBgNVBAMMAmNhMR4wHAYJKoZIhvcNAQkBFg9jYUBjb25mbHVlbnQuaW8w
ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDOZyDRn9Zmv8WqhR6a8Sr5
GXa3QO4yNB5F9PSAe4xXPOkDj14Om0cS/oXYfy/uJyVb3zxPMcF4Mz/WSQTATh2Y
IunkYWqsrDZbgMG2ERdmSKvALKTd8mVFpCSzrhAvv8lZcRX0/jegjyMo2fFjRHfb
dtwE341Ywgvf5gm0HH6UzbPIY9Xd/PmHJ1kRb+wJnRxs7T00l0pXZMYafs9tbJPv
8tfrYjIM5PLxLAtlMZ1yLR1Ay8QMm5q4ZtTOQjlGOovWpPHHEhM3RealpYk1dC84
ngDpTI5zpf/Q0o4cn8aOACiy7fmm1dNMBR0Yu1JSLINxU+P0g3fNZu1PQjF13crm
hGkcNAm6k0AMYwspNLthsPdOTgHfnv6/L/VkqGIBYU+gd0G2ZjbgpEHesQM91A42
jBneY1uFhcTjmAmx+MV63BWutFrkl1ErG4raYNIdUtGlsGhJ71AmOtGj3A8YyQAm
LuzjZTQmkyP0saDa0wpsI7+lSZ5K5cJSQBMUf0bOQPspMmHfsO7H//RbmxdCiIHx
g0fCfKX3oyungProDglptDYaNwE8oF0B1vl28cwlOJm7N9DVUsDc1bZwnfdquYD7
q3rBkjZHyqmnsSVAOHvkToaHkkS7x+pFz/mPj/2Yo6h4Ujgrk4GO+vqQszk2Rp4Y
58WbQ4Nq1qyrVIF0FgYjFwIDAQABo2MwYTAdBgNVHQ4EFgQU1K1JidhNRLmWIeRK
obCfr8QhHQUwHwYDVR0jBBgwFoAU1K1JidhNRLmWIeRKobCfr8QhHQUwDwYDVR0T
AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBABmT
rHBFhz55quEG+/h3CiMKXpdld3cf27zgO2liEw/F3Xu04QaSpwsVZEOhuWjmzUA/
lSfrbWeV/jNQz0Gs/rDdMUEDbNN+vsC4+u4xbAe9OYLsRJ5v1/PB7P/JxfhnRQLr
3vQSUm7oEkhVxmTO0mFxpbrkcglAgA0siSWI2Kj0qTNeSxOW9PKOQuPqH9bTHRBt
KoiR5CAealT7EoWUhZaNgLLOJ0fsIlPbXfAeat2BVRZAYQFQOJIOouZES49Yc+qF
r6T7AVysk7pRilQWVovBOqSKzHTOKBKTJvbO23lYAOREc+UM694dPh480i6TcGEI
4iYyC2/GCK4aUCCE0WTTqSErL0fNmjVz68lpuBjUS06+dGy2p6bW8fso6ttqAry+
jbEq6Ir+P9vZbvHOPc3JyaBKGyTRwA4jcrXQ5KD5LJwBd+9/h3elciNsJtd8mHdE
2T3IqqsXdqN7gyaqwNQsk59iSkCr3cJGqvpsdVPXseadv9rcV5hxAjvWa4mLRiTL
Cxhs4CuaXz9kyss9oM1kWD0Dz3sIGwxtOcSREIqc8vclztjnpPMdly2lsK16hyfs
q2P7yFurz6ruIxLR/f8SEalQ/Eqn8kOgwwHNuhaMHsaeS3o3Guwr+KHRm0jGNO/N
ZSm2YS9n3ghEYhUqfzuJCWuaeFELeOBb5wUx01Va
-----END CERTIFICATE-----
================================================
FILE: kafka-connect-mtls/connect/secrets/connect.cert.pem
================================================
-----BEGIN CERTIFICATE-----
MIIGIDCCBAigAwIBAgICEAEwDQYJKoZIhvcNAQELBQAwfTELMAkGA1UEBhMCREUx
DzANBgNVBAgMBkJlcmxpbjEWMBQGA1UECgwNQ29uZmx1ZW50IEx0ZDELMAkGA1UE
CwwCUFMxGDAWBgNVBAMMD0ludGVybWVkaWF0ZS1DQTEeMBwGCSqGSIb3DQEJARYP
Y2FAY29uZmx1ZW50LmlvMB4XDTE5MTExMzExMjMxOVoXDTIwMTEyMjExMjMxOVow
gYwxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZCZXJsaW4xDzANBgNVBAcMBkJlcmxp
bjEWMBQGA1UECgwNQ29uZmx1ZW50IEx0ZDELMAkGA1UECwwCUFMxFjAUBgNVBAMM
DWthZmthLWNvbm5lY3QxHjAcBgkqhkiG9w0BCQEWD2NhQGNvbmZsdWVudC5pbzCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMjwmO0YAZV/k5DgfC48DZsp
djQixKEi6TLAdt2CovemFGe38DhltaIs+BiGl9ZpTC1WTbNnkwqFm3N+7+cg4I3d
v4nYQw4RN68os9vQ+Jp/jIdVSXDP7n17pw8YOhrhQx5XMnudtghCHtp+rwG8a5KF
IKe9zpWeu2mXlF1LxrWybbaBzJ8E7u4Gsr+suhED5dH9ckFCnLk0/9NMym7XSMnK
/158JKhmElCanZmLLGwq+38ko6C/BgPbdaRwlKG+tHWY9Iqrt+tRHgvXclutMHci
ZgiAApS4pqOey5MWisb8yZs5SP14x8wAzyygLeNGr7+CcP+Ubcn71FvwhSodQ7EC
AwEAAaOCAZgwggGUMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgbAMDMGCWCG
SAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUw
HQYDVR0OBBYEFAYy4GQt1eh6QaxmF4OyFBsK8xGWMIGvBgNVHSMEgacwgaSAFCbH
LBsyO6ixqV7BnY0bLxOjVxtBoYGHpIGEMIGBMQswCQYDVQQGEwJERTEPMA0GA1UE
CAwGQmVybGluMQ8wDQYDVQQHDAZCZXJsaW4xFjAUBgNVBAoMDUNvbmZsdWVudCBM
dGQxCzAJBgNVBAsMAlBTMQswCQYDVQQDDAJjYTEeMBwGCSqGSIb3DQEJARYPY2FA
Y29uZmx1ZW50LmlvggIQADAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB
BQUHAwIGCCsGAQUFBwMBMCkGA1UdHwQiMCAwHqAcoBqGGGh0dHA6Ly9odHRwZDo4
MC9jcmxzLnBlbTAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQAD
ggIBAIpE7+Cu5jwv1u+TpQb2qxmR5kt6W90GySiSW0Ky+NQUL9WpbTYuGE6avpO/
83hIf620JGTwgnwVULBaC9i8+wux7stM2vPLOaC54eDYtKgY91wWnmt061UZsie6
FgqpNnev2U8WukxdFWdjvNQ3sWnz/aeTiaeQYmuZjSjm/sgwh92pSGqpYnuGdyc+
e/dXxkV+lRHbMdRMoQi2ONdNQ+UZ1tJVBLjpRRaa4mq6/uemYCZ8T/BLs6Tuob2C
NmC/x6LAntbMUaHD8PGWNlqoX//XGKwZlUvn9h+eVHgjBBbdl+69IJG6UXx7T+sV
i3dY1RhQewfOAAdh6nFHL3tHfpkH+RnsEswq5DJXQtwQXGwtUv6fu9Aq/J+dGlua
YpXDwdTZYCh6i2H8WfbPFAAgZfJEupNuC5EJh/BMoYEcCkLABs7ZHgQs5xVzHL/i
li51D8gu1IqcH3+JQfQ74knDp5tWxvhQe+4m5rwct1D8IvTEOeqclvbPI4tzHmNF
rNbk7lSCicq9axGuyrUB/Y8zDnCNThhnxjQdHm1rhJYi1IXV4BvwSBpzLocjVpRa
T8C+fzAtk/VejGBf4zHjnu8xy7AO5hrF+ho7lAhIWZ7bVe7sZCe9D/+xgkxLP1uA
KKhCtMklziYuNoRMpZ+be9e2OHu7p05Yj4AjhHTyMvizQLm2
-----END CERTIFICATE-----
================================================
FILE: kafka-connect-mtls/connect/secrets/connect.key.pem
================================================
-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-256-CBC,03578ECB28A28BA14408AF4EB2F82DCB
hQkZlH8VnKBOMDtLMQT85fvYzoBYCEyjif/1/6QPid1asfxp7LGAiIEVM/sSecRz
axGneC1B9dp1gEY09XISa2ChZwGY+vUfmpOdpyi+aeSLUeF05jsPS16fbvDfo8t4
JnMk8XsvXs1SmXBtMvcFDnqVxWYp/kBNk2CrM24/aRgN14pykSEu0Uy5NhUGbQ/D
vdhFq1BdWOapQQN0Oi7O5vtttOapiLKPPKaewGGV1LhyG2rJEoxj5JzrpIj2N6mB
yjzGaHkNdvFi2BoHb/UIAaKSX5Kn+B1MW4m7eJ3YuogvBnlmr6pZaa9xxYOI+fV8
eM1JG0U1P477dqLJLRCRTtYZACmXbRy5/WV1TewKjw/ij7QHQ68ISQa2748X1gL6
09jL5Grux/gJSDuJrhuSMyPwYSJNx5585/HLLQseKOFdvqFbHAjfd2/ZptaBpxp4
jbkylbVxvroPZQjRhj1p0v3CkUCWYUg3CSkzNLR5Y21AvqH5ZHCbjerfZvrPp3Wc
nHShzc18wUruHmT8dwdDSb7s5OJRFkEDLNFsCsijtl47yDwaQoazeJ8UXkQ/q+FR
iIfctz0JZrWXbH89nr3i3cjwGOxQmPuMiCypYov0YezuWwyiqQMM2r62nyFTCUQQ
ooqh3OV02suBNn2GXnrXwzdCoCgcL61a2l4+rHu8rsKHKX3VzEk+SP/WsgO65KAg
jCSRV3zVTWTRbXmvFM+tv6ARDKgvhJUYAC4zuP5ZQJHsoLXhsJ/nHjlUVFVub9aT
+BieN64UHih7lfKzK3OZJAuu5hSMY8vA8JuAkBoNNKB+CEwQnakhEQB6u87s9xgO
GBu6med8u0isWI9uSwZ0u2/MaELRmcx6MvjdOFh8TIWU1vdtcf8F8avyP9wxGBMq
PeFxYJ+qCx8tRUHgO6QmE/cZKmQr//mtZgOMjVwVnysmQLh8Shn1WW2FHhqWauAo
FC2PJPw4aribkG8/O/mVx0P5/bcgHL8N7S1DWLUFsMzMYJJ35CNOG94cOFSAWFRr
2mJLRzJFwxuh1S9S/SwqMhdz96I4OsmKAVp6iKDVusR6qBoAAjkbLCAJqF883FHo
Urfgr5lLx/9wG6E+zGgQOC1bKsFTlSEVQ7vVLizG2etDytOvGM70Gz5ecyDY1zFl
1D9hGLhoJtFWE8U4CLPI6pQrKXRftnUV2RbvmsgRBuHBm/HeBLbNBsuDNK3WCzvI
YRzBpyXOblcENvj258yVtfqjRAR2b5hWeRjdyCZjxNq2S7f1Qow0nhPxe2Fq3JLu
nGstpUt1gwKNstoMEVwYI8TzFP4kRzx5H3w2EgjzxWoybJXqANW3XHySBMcim6NR
QMnn30bcjMI8vIe1AaL+AKskNBf4aVj+4IzvC6L+1yrzI5l4KfWbcJJk+q/rTXdQ
mwy4DW5LfenlZoh8zQIGdHKAbdrFwI0gk0pX3Bjy69+1QAy1gNPqe5L9IUMmbsZE
hueSRSsPgI7PDT2hv8XeoWuy+Un6/l4E34F2WvtR802kaYwgeRZIcJrFV8+yALvt
awVcFBkjmWFRjGLFG7/f29+n998g31FqynKU9NmPL49aB8UfQBrtLY07f6snYPA2
-----END RSA PRIVATE KEY-----
================================================
FILE: kafka-connect-mtls/docker-compose.yml
================================================
---
version: '3'
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.3.1
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
kafka:
image: confluentinc/cp-enterprise-kafka:5.3.1
depends_on:
- zookeeper
ports:
- 9092:9092
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 100
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka:29092
CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:2181
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
schema-registry:
image: confluentinc/cp-schema-registry:5.3.1
depends_on:
- zookeeper
- kafka
ports:
- 8081:8081
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper:2181
kafka-connect-cp:
image: confluentinc/cp-kafka-connect:5.3.1
container_name: kafka-connect
depends_on:
- zookeeper
- kafka
- schema-registry
ports:
- 18083:18083
volumes:
- ./connect/secrets:/etc/kafka-connect/secrets
environment:
CONNECT_BOOTSTRAP_SERVERS: "kafka:29092"
CONNECT_REST_PORT: 18083
CONNECT_GROUP_ID: kafka-connect-cp
CONNECT_CONFIG_STORAGE_TOPIC: docker-kafka-connect-cp-configs
CONNECT_OFFSET_STORAGE_TOPIC: docker-kafka-connect-cp-offsets
CONNECT_STATUS_STORAGE_TOPIC: docker-kafka-connect-cp-status
CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_REST_ADVERTISED_HOST_NAME: "kafka-connect-cp"
CONNECT_LOG4J_ROOT_LOGLEVEL: "INFO"
CONNECT_LOG4J_LOGGERS: "org.apache.kafka.connect.runtime.rest=WARN,org.reflections=ERROR"
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_PLUGIN_PATH: '/usr/share/java'
CONNECT_LISTENERS: 'https://0.0.0.0:18083'
CONNECT_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: "HTTPS"
CONNECT_LISTENERS_HTTPS_SSL_TRUSTSTORE_LOCATION: /etc/kafka-connect/secrets/server.truststore
CONNECT_LISTENERS_HTTPS_SSL_TRUSTSTORE_PASSWORD: confluent
CONNECT_LISTENERS_HTTPS_SSL_KEYSTORE_LOCATION: /etc/kafka-connect/secrets/server.keystore
CONNECT_LISTENERS_HTTPS_SSL_KEYSTORE_PASSWORD: confluent
CONNECT_LISTENERS_HTTPS_SSL_CLIENT_AUTH: required
================================================
FILE: kafka-connect-mtls/up
================================================
#!/usr/bin/env bash
docker-compose up -d
echo "to verify the connection use the check-ssl-client-auth.sh script"
================================================
FILE: kerberos/README.md
================================================
# Kerberos configuration demo
This demo sets up a basic Kafka cluster secured with Kerberos authentication, and sets up some basic ACLs to demonstrate authorisation.
The documentation below introduces the relevant components you need to understand to set up Kerberos in a Linux / JVM environment.
## Kerberos authentication process
Before configuring Kafka for Kerberos authentication, it is useful to understand the basics of Kerberos; the authentication process and some key terms.
_A note on what Kerberos is and is not: Kerberos is a *network authentication protocol* which allows a client application to connect to a network service in a way that allows the components to mutually verify each other's identities._
_It is put to good use in and integrated with network directory services, notably Windows Active Directory._
_Here, Kerberos identities are bound to network accounts and access privileges and, in the case of Windows, the SSPI API supports single sign-on and privilege impersonation natively in the OS._
_This is enabled by Kerberos, but Kerberos itself is not bound to such accounts and does not provide any such capability._
With that, let's work through the process for a client application making a connection into Kafka.
Kerberos involves three parties:
- a Kerberos Client, in this case our client application.
- a Kerberized Service, in this case Kafka.
- the Kerberos **Key Distribution Center (KDC)**
An important point to understand in this process is that the Client and Service each shares their own cryptographic key with the KDC.
By using this key to encrypt/decrypt tokens passed over the network, two network systems can verify each other's identities.
The Client and Service trust that they have only shared their secret with the KDC and so any correctly signed token must have originated from the KDC.
This is crucial.
During the Kerberos process the Client requests a token from the KDC _signed with the Service's key_ and presents this when making a connection.
The Service can then trust that the Client has valid credentials with the KDC and can be authenticated.
Other information is shared during the process to enable integrity checking and protection against various spoofing attacks.
For example, each signed token is:
* timestamped to bound the window for which it is valid
* linked to a network IP so that it is valid only from a single host
The first stage is that the Client application must authenticate itself with the KDC by proving that the Client knows the private credentials relating to the Client's Kerberos **Principal**.
The Principal is a a unique identity in the form {primary}/{instance}@{REALM} (more on these later).
The KDC authenticates the client using their shared cryptographic key and results in the client receiving a **Ticket Granting Ticket (TGT)**.
This is a cryptographic token that the Client may now use to prove that it has recently authenticated with the KDC.
The TGT is timestamped and includes an expiry time, typically a day.
The TGT is cached by the client to avoid having to re-authenticate unnecessarily.
Next, the Client wants to authenticate itself to the Kerberized Service.
For this to happen, the client must get a cryptographic token encrypted with the Kerberized Service's key - this token is a **Service Ticket** and is requested by the client from the KDC using the TGT and the requested service's principal name.
Including the TGT in this request is sufficient to prove that the client has already authenticated with the KDC allowing the service ticket to be returned.
Here is an important point to note - how does the client know the service principal name?
Very simply, it builds the principal with:
- {primary} = a client-side configured name for the service
- {instance} = the network address used to connect to the service
- {REALM} = the realm of the client and KDC.
In our example, our Client attempts to connect to the `kafka` Service on the host `kafka.kerberos-demo.local` in the realm `TEST.CONFLUENT.IO`.
Therefore, the service must be configured with a Service Principal Name of `kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO`.
Now the Client can connect directly to the Kerberized Service, and include the Service Ticket.
As the Service ticket is signed with the Service principal's key, the Service can decrypt the token to authenticate the request.
Based on the above, each connection in the cluster must be established with the following in place:
* On the Kerberos Client:
* A client principal and key to authenticate with the KDC, `{client name}@REALM`
* This is the *User Principal Name*.
* a configured name for the service to connect to, `{service name}`
* the network address for the service, `{network address}`.
* On the server:
* a principal name & key in the form `{service name}/{network address}@REALM`.
* This is the *Service Principal Name*.
As can be seen, the service principal must be constructed correctly to work.
However, the `{client name}` format is not mandated in the same way and is not bound to a network address.
Often the client name is a simple alphanumeric username, let's say 'john'.
However, you may sometimes see a client principal such as 'john/admin'.
In this form, 'admin' is called an _instance_ of the 'john' principal and can be used by 'john' to run services on the system with different credentials and privileges from the main account.
From the Kerberos perspective, the two principals are completely separate, but it can nonetheless be convenient to use this naming convention.
# Technical Components
## KDC
The KDC could be provided by MIT Kerberos, Windows Active Directory, Redhat Identity Manager and many others.
In this demo we use MIT kerberos.
## Kerberos libraries and tools
All the hosts must include Kerberos libraries and a shared configuration (krb5.conf) in order to use and trust the same KDC.
`kinit` is used to authenticate to the Kerberos server as principal, or if none is given, a system generated default (typically your login name at the default realm), and acquire a ticket granting ticket that can later be used to obtain tickets for other services.
`klist` reads and displays the current tickets in the credential cache (also known as the ticket file).
`kvno` acquires a service ticket for the specified Kerberos principals and prints out the key version numbers of each.
`kadmin` is an admin utility for working with the Kerberos database.
A common task when configuring for Kerberos is to build *keytab* files (short for Key Table).
Keytabs are files containing one or more Kerberos principal/credential pairs.
By having these in a file, services can automatically authenticate with the KDC without prompting the user and it is common to build and distribute keytabs as part of a deployment.
However, _as these files contain secret credentials, it is important to take care to protect against loss of these files_.
See [kerberos cheatsheet](../KerberosCheatsheet.md) for examples of using the Kerberos toolset.
## Simple Authentication and Security Layer (SASL)
SASL is a framework for authentication in network communications which in principle decouples authentication concerns from the application protocol.
Kafka and Zookeeper can use SASL as the authentication layer in communications (Mutual TLS being the notable alternative).
When SASL has been enabled, you must further specify a SASL *mechanism* to use - the process and protocol to use when authenticating a connection.
Applications must build support for each SASL mechanism - Kafka supports SCRAM(-SHA-256 | -SHA-512), PLAIN, OAUTHBEARER and GSSAPI.
*GSSAPI is the SASL mechanism which implements Kerberos*.
## Java Authentication and Authorization Services (JAAS)
JAAS is a Java's integrated, pluggable security service and Kafka uses the JAAS APIs to implement SASL authentication.
SASL authentication is configured using JAAS.
Kerberos is configured using the JAAS *LoginModule* `com.sun.security.auth.module.Krb5LoginModule`.
JAAS may be configured in a couple of places:
* By default it uses a .jaas file, a reference to which is passed in the `-Djava.security.auth.login.config=<file path>` JVM flag.
Each jaas file includes multiple named stanzas, representing different login contexts.
* An application can override this configuration and configure JAAS from application config.
Kafka configurations expose this option using properties `sasl.jaas.config`, which can variously be prefixed.
The value is the inline configuration for a single login context and, in Kafka, takes precedence over entries in a .jaas files.
https://docs.oracle.com/javase/8/docs/jre/api/security/jaas/spec/com/sun/security/auth/module/Krb5LoginModule.html
A Kerberos enabled Client or Service can be initiated in two ways:
1. Use `kinit` to cache a TGT locally, and then launch the process with this shared cache.
2. Configure a keytab to be used directly.
Configuration of the former is straight-forward as follows:
```
SomeLoginContext {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache = true;
};
```
The `useTicketCache = true` setting specifies that the TGT cache should be used.
By comparison, the latter approach has `useTicketCache = false` (the default) and then continues to specify details for using a keytab file:
```
SomeLoginContext {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka1.key"
principal="kafka/kafka1.kerberos-demo.local@TEST.CONFLUENT.IO";
};
```
The login context, as identified with `SomeLoginContext` above, can be used by a Client, a Service or both.
For Kafka, the names are defined in the application code as we will describe later.
# Kerberizing Kafka
To fully understand the steps required to Kerberize Kafka, we should understand each Client → Service connection which we wish to configure.
Each of these connections has a prototypical set of configurations required on the Client side and on the Service side.
The following are values you must decide upon at the cluster level:
* `{kafka-kerberos-service-name}` - name for the Kerberized Kafka service.
Typically `kafka` or `cp-kafka`.
* `{zookeeper-kerberos-service-name}` - name for the Kerberized Zookeeper service.
By default this is `zookeeper`.
* `{security-protocol}` - either `SASL_PLAINTEXT` of `SASL_SSL` if using in conjunction with TLS.
## Service Configurations
In a single node Broker/Zookeeper environment there are just two Kerberized services running.
We will configure these first and then the clients.
### Kafka Service
* Broker JAAS:
* Login Context: `KafkaServer`
* Use *keytab* method.
* Ensure that the principal is a correctly formed service principal for each node: `{kafka-kerberos-service-name}/{FQDN}@{realm}`.
* Example: [kafka/kafka.sasl.jaas.config](kafka/kafka.sasl.jaas.config)
* Broker Server Properties:
* `sasl.enabled.mechanisms=GSSAPI` (more SASL mechanisms may be specified in a comma-separated list)
* `sasl.kerberos.service.name={kafka-kerberos-service-name}`
* `{listener_name}.{sasl_mechanism}.sasl.jaas.config` - jaas configuration on a per-listener basis.
* Example: [kafka/server.properties](kafka/server.properties)
### Zookeeper Service
* Zookeeper JAAS:
* Client API - Kerberize access to ZooKeeper data.
* Login Context: `Server`.
* Use *keytab* method.
* Ensure that the principal is a correctly formed service principal for each node: `{zookeeper-kerberos-service-name}/{FQDN}@{realm}`.
* Example: [zookeeper/zookeeper.sasl.jaas.config](zookeeper/zookeeper.sasl.jaas.config)
* Zookeeper Properties:
* authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
* requireClientAuthScheme=sasl
* Example: [zookeeper/zookeeper.properties](zookeeper/zookeeper.properties)
## Client Configurations
### Kafka Broker → Zookeeper Service
Brokers connect to Zookeeper for cluster operations.
* Broker JAAS:
* Login Context: `Client`
* Use *keytab* method.
* *Ensure that the same principal is configured for use on each broker.*
* Example: [kafka/kafka.sasl.jaas.config](kafka/kafka.sasl.jaas.config).
* Broker JVM flags:
* `-Dzookeeper.sasl.client.username={zookeeper-kerberos-service-name}` (OPTIONAL)
### Client → Kafka Service
Clients connecting in to Kafka may be any of:
* A Kafka producer
* A Kafka consumer
* A Kafka Admin client
Note that many applications are a combination of many of these - notably Streams applications and Kafka Connect.
* Client JAAS:
* Login Context: `KafkaClient`
* Can use *kinit* or *keytab* method.
* Example: [client/client.sasl.jaas.config](client/client.sasl.jaas.config)
* Client Properties:
* `sasl.kerberos.service.name={kafka-kerberos-service-name}`
* `security.protocol={security-protocol}`
* `sasl.jaas.config` - jaas override.
* Examples: [client/producer.properties](client/producer.properties), [client/consumer.properties](client/consumer.properties), [client/command.properties](client/command.properties)
### Client → Zookeeper (Optional)
Historically, clients needed to connect directly to ZooKeeper for service discovery and admin operations.
However, the new Kafka Admin API allows all this functionality via Client → Kafka Broker connection, so this direct connection should not be required.
* JAAS:
* LoginContext: `Client`
* Can use *kinit* or *keytab* method.
* JVM flags:
* `-Dzookeeper.sasl.client.username={zookeeper-kerberos-service-name}` (OPTIONAL)
### Confluent Metrics Reporter → Kafka Service (Optional)
The Confluent metrics reporter runs as a plugin within the Kafka broker, but from a Kerberos point of view is configured as a network client.
The configuration, including inline Jaas, is specified within the broker properties using a keytab:
* `confluent.metrics.reporter.sasl.mechanism=GSSAPI`
* `confluent.metrics.reporter.security.protocol={security-protocol}`
* `confluent.metrics.reporter.sasl.kerberos.service.name={kafka-kerberos-service-name}`
* `confluent.metrics.reporter.sasl.jaas.config={inline jaas configuration}`
* Example: [kafka/server.properties](kafka/server.properties)
# Authentication is not enough!
The steps above are sufficient to support Kerberos authenticated connections within the cluster.
This does not make your cluster secure though!
The demo also applies a minimal level of authorisation to prevent unauthenticated network access to the brokers and Zookeeper.
The following should be reviewed in the broker server properties:
* `allow.everyone.if.no.acl.found=false` - default to no access.
* `authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer` - enable the default authoriser for Kafka.
* `zookeeper.set.acl=true` - when storing ACL data in Zookeeper, apply Zookeeper access controls so that only the Broker → Zookeeper client principal can read and modify the lists.
* Example: [kafka/server.properties](kafka/server.properties)
# Putting it into action
In this demo we configure:
* A simple KDC to generate principals and keytabs.
* A single node Zookeeper with a Kerberized data access API.
* A single node Kafka broker with a Kerberized listener.
* Set up ACLs allowing `kafka-console-producer` and `kafka-console-consumer` usage.
_A basic knowledge of Docker is useful to follow the code, though only basic Docker techniques are used to keep the code readable._
_Each node is built using a `Dockerfile` into which configuration values are hard-coded, and the services are brought up using `docker-compose`._
_Kerberos keytabs and the krb5.conf file are shared amongst all nodes on the cluster using a shared Docker volume._
The demo is run using the [up](up) script, which orchestrates the following process:
* Builds and starts the KDC.
All nodes are joined to the KDC's realm by sharing `krb5.conf` amongst all nodes.
* Generates Kerberos principals and keytabs, sharing these on the shared Docker volume.
* Builds and starts Zookeeper, Kafka broker and Client.
* Uses the `admin` super user to configure ACLs for the `producer` and `consumer` users.
* Prints example usage to connect into Kafka with a Kerberos principal.
This is actually executed via the `client` node.
# Next up
* Extending Kerberos configuration to a full cluster (coming soon)
* Hardening access controls
# References
* https://www.youtube.com/watch?v=KD2Q-2ToloE Video overview of Kerberos authentication process.
================================================
FILE: kerberos/client/Dockerfile
================================================
FROM centos:centos7
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install confluent kafka tools:
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-kafka-2.12
# 3. Install Kerberos libaries
RUN yum install -y krb5-workstation krb5-libs
# 4. Copy in required settings for client access to Kafka
COPY consumer.properties /etc/kafka/consumer.properties
COPY producer.properties /etc/kafka/producer.properties
COPY command.properties /etc/kafka/command.properties
COPY client.sasl.jaas.config /etc/kafka/client_jaas.conf
ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
CMD sleep infinity
================================================
FILE: kerberos/client/client.sasl.jaas.config
================================================
/*
* Credentials to use when connecting to ZooKeeper directly.
*
* Whenever possible you should use the Kafka AdminClient API instead of ZooKeeper.
*/
Client {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=true;
};
/*
* Credentials to connect to Kafka.
*/
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=true;
};
================================================
FILE: kerberos/client/command.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
serviceName=kafka \
useTicketCache=true;
================================================
FILE: kerberos/client/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: kerberos/client/consumer.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useTicketCache=true;
================================================
FILE: kerberos/client/producer.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useTicketCache=true;
================================================
FILE: kerberos/docker-compose.yml
================================================
version: '3.5'
services:
kdc:
hostname: kdc.kerberos-demo.local
#domainname: kerberos_default
build: kdc/
container_name: kdc
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/etc/kdc/krb5.conf
zookeeper:
build: zookeeper/
container_name: zookeeper
hostname: zookeeper.kerberos-demo.local
#domainname: kerberos_default
depends_on:
- kdc
# Required to wait for the keytab to get generated
restart: on-failure
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/etc/krb5.conf
kafka:
build: kafka/
container_name: kafka
hostname: kafka.kerberos-demo.local
#domainname: kerberos_default
depends_on:
- zookeeper
- kdc
# Required to wait for the keytab to get generated
restart: on-failure
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/etc/krb5.conf
client:
build: client/
container_name: client
hostname: client.kerberos-demo.local
#domainname: kerberos_default
depends_on:
- kafka
- kdc
# Required to wait for the keytab to get generated
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/etc/krb5.conf
volumes:
secret: {}
networks:
default:
name: kerberos-demo.local
================================================
FILE: kerberos/kafka/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-kafka-2.12
RUN yum install -y confluent-control-center
# 3. Configure Kafka for Kerberos
RUN yum install -y krb5-workstation krb5-libs
COPY server.properties /etc/kafka/server.properties
COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf
EXPOSE 9093
ENV KAFKA_OPTS="-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dzookeeper.sasl.client.username=zkservice"
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: kerberos/kafka/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: kerberos/kafka/kafka.sasl.jaas.config
================================================
/*
* The service principal
*/
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO";
};
/*
* Zookeeper client principal
*/
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/zookeeper-client.key"
principal="zkclient@TEST.CONFLUENT.IO";
};
================================================
FILE: kerberos/kafka/server.properties
================================================
# Basic broker and listener configuration
broker.id=0
listeners=SASL_PLAINTEXT://kafka.kerberos-demo.local:9093
zookeeper.connect=zookeeper.kerberos-demo.local:2181
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
num.partitions=12
# Kerberos / GSSAPI Authentication mechanism
sasl.enabled.mechanisms=GSSAPI
sasl.kerberos.service.name=kafka
# Configure replication to require Kerberos:
sasl.mechanism.inter.broker.protocol=GSSAPI
security.inter.broker.protocol=SASL_PLAINTEXT
# Authorization config:
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
zookeeper.set.acl=true
allow.everyone.if.no.acl.found=false
super.users=User:admin;User:kafka
# Demonstrate setting up the Confluent Metrics Reporter with required *client* credentials
metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter
confluent.metrics.reporter.bootstrap.servers=kafka:9093
confluent.metrics.reporter.sasl.mechanism=GSSAPI
confluent.metrics.reporter.security.protocol=SASL_PLAINTEXT
confluent.metrics.reporter.sasl.kerberos.service.name=kafka
confluent.metrics.reporter.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useKeyTab=true \
storeKey=true \
keyTab="/var/lib/secret/kafka-admin.key" \
principal="admin/for-kafka@TEST.CONFLUENT.IO";
confluent.metrics.reporter.topic.replicas=1
confluent.support.metrics.enable=false
confluent.support.customer.id=anonymous
================================================
FILE: kerberos/kdc/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Installing Kerberos server, admin and client
RUN yum install -y krb5-server krb5-libs
RUN yum install -y krb5-workstation krb5-libs
# 2. Configuring Kerberos and KDC
COPY krb5.conf /etc/krb5.conf
RUN mkdir /var/log/kerberos
RUN mkdir /etc/kdc
RUN mkdir -p /var/kerberos/krb5kdc/
RUN ln -s /etc/krb5.conf /etc/kdc/krb5.conf
EXPOSE 88
RUN kdb5_util -P confluent -r TEST.CONFLUENT.IO create -s
CMD /usr/sbin/krb5kdc -n
================================================
FILE: kerberos/kdc/krb5.conf
================================================
[libdefaults]
default_realm = TEST.CONFLUENT.IO
forwardable = true
rdns = false
dns_lookup_kdc = no
dns_lookup_realm = no
[realms]
TEST.CONFLUENT.IO = {
kdc = kdc
admin_server = kadmin
}
[domain_realm]
.test.confluent.io = TEST.CONFLUENT.IO
test.confluent.io = TEST.CONFLUENT.IO
kerberos-demo.local = TEST.CONFLUENT.IO
.kerberos-demo.local = TEST.CONFLUENT.IO
[logging]
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmin.log
default = FILE:/var/log/kerberos/krb5lib.log
================================================
FILE: kerberos/up
================================================
#!/bin/sh
set -e
# Starting kerberos,
# Avoiding starting up all services at the begining to generate the keytab first
docker-compose build
docker-compose up -d kdc
### Create the required identities:
# Kafka service principal:
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO" > /dev/null
# Zookeeper service principal:
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey zkservice/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO" > /dev/null
# Create a principal with which to connect to Zookeeper from brokers - NB use the same credential on all brokers!
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey zkclient@TEST.CONFLUENT.IO" > /dev/null
# Create client principals to connect in to the cluster:
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_producer@TEST.CONFLUENT.IO" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_producer/instance_demo@TEST.CONFLUENT.IO" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_consumer@TEST.CONFLUENT.IO" > /dev/null
# Create an admin principal for the cluster, which we'll use to setup ACLs.
# Look after this - its also declared a super user in broker config.
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey admin/for-kafka@TEST.CONFLUENT.IO" > /dev/null
# Create keytabs to use for Kafka
docker exec -ti kdc rm -f /var/lib/secret/kafka.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/zookeeper.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/zookeeper-client.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/kafka-client.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/kafka-admin.key 2>&1 > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka.key -norandkey kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/zookeeper.key -norandkey zkservice/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/zookeeper-client.key -norandkey zkclient@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-client.key -norandkey kafka_producer@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-client.key -norandkey kafka_producer/instance_demo@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-client.key -norandkey kafka_consumer@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-admin.key -norandkey admin/for-kafka@TEST.CONFLUENT.IO " > /dev/null
# Starting zookeeper and kafka now that the keytab has been created with the required credentials and services
docker-compose up -d
# Adding ACLs for consumer and producer user:
docker exec client bash -c "kinit -k -t /var/lib/secret/kafka-admin.key admin/for-kafka && kafka-acls --bootstrap-server kafka:9093 --command-config /etc/kafka/command.properties --add --allow-principal User:kafka_producer --producer --topic=*"
docker exec client bash -c "kinit -k -t /var/lib/secret/kafka-admin.key admin/for-kafka && kafka-acls --bootstrap-server kafka:9093 --command-config /etc/kafka/command.properties --add --allow-principal User:kafka_consumer --consumer --topic=* --group=*"
# Output example usage:
echo "Example configuration to access kafka:"
echo "-> docker-compose exec client bash -c 'kinit -k -t /var/lib/secret/kafka-client.key kafka_producer && kafka-console-producer --broker-list kafka:9093 --topic test --producer.config /etc/kafka/producer.properties'"
echo "-> docker-compose exec client bash -c 'kinit -k -t /var/lib/secret/kafka-client.key kafka_consumer && kafka-console-consumer --bootstrap-server kafka:9093 --topic test --consumer.config /etc/kafka/consumer.properties --from-beginning'"
================================================
FILE: kerberos/zookeeper/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-kafka-2.12
# 3. Configure zookeeper for Kerberos
RUN yum install -y krb5-workstation krb5-libs
COPY zookeeper.properties /etc/kafka/zookeeper.properties
COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf
EXPOSE 2181
ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf
CMD zookeeper-server-start /etc/kafka/zookeeper.properties
================================================
FILE: kerberos/zookeeper/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: kerberos/zookeeper/zookeeper.properties
================================================
dataDir=/var/lib/zookeeper
clientPort=2181
maxClientCnxns=0
authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
zookeeper.allowSaslFailedClients=false
requireClientAuthScheme=sasl
================================================
FILE: kerberos/zookeeper/zookeeper.sasl.jaas.config
================================================
Server {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
keyTab="/var/lib/secret/zookeeper.key"
storeKey=true
useTicketCache=false
principal="zkservice/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=true;
};
================================================
FILE: kerberos-multi-node/README.md
================================================
# Kerberos multi-node deployment example
This example shows how-to deploy multiple kafka nodes in an example kerberos enabled environment.
The only thing that's different then your normal environment is that this example uses a different principal for each zookeeper client.
https://issues.apache.org/jira/browse/KAFKA-7710 Jira contains a more information.
TLDR; we have to set two configs in the zookeeper.properties to make this work
```
kerberos.removeHostFromPrincipal = true
kerberos.removeRealmFromPrincipal = false
```
The first removes the hostname from the principal name.
So that anyone authenticated with the principal 'kafka/*@REALM' is allowed by ZK ACLs.
================================================
FILE: kerberos-multi-node/docker-compose.yml
================================================
version: '3.8'
services:
kdc:
hostname: kdc
domainname: kerberos-multi-node_default
build: kdc/
container_name: kdc
volumes:
- secret:/var/lib/secret
zookeeper:
build: zookeeper/
container_name: zookeeper
hostname: zookeeper
domainname: kerberos-multi-node_default
depends_on:
- kdc
# Required to wait for the keytab to get generated
restart: on-failure
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf -Dsun.security.krb5.debug=true -Djava.security.krb5.conf=/tmp/krb5.conf
- KRB5_CONFIG=/tmp/krb5.conf
#- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/tmp/krb5.conf
kafka:
build: kafka/
container_name: kafka
hostname: kafka
domainname: kerberos-multi-node_default
environment:
# - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dsun.security.krb5.debug=true -Dzookeeper.sasl.client.username=zkservice
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dzookeeper.sasl.client.username=zkservice -Djava.security.krb5.conf=/tmp/krb5.conf
- KRB5_CONFIG=/tmp/krb5.conf
depends_on:
- zookeeper
- kdc
# Required to wait for the keytab to get generated
restart: on-failure
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/tmp/krb5.conf
kafka1:
build: kafka1/
container_name: kafka1
hostname: kafka1
domainname: kerberos-multi-node_default
environment:
# - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dsun.security.krb5.debug=true -Dzookeeper.sasl.client.username=zkservice
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dzookeeper.sasl.client.username=zkservice -Djava.security.krb5.conf=/tmp/krb5.conf
- KRB5_CONFIG=/tmp/krb5.conf
depends_on:
- zookeeper
- kdc
# Required to wait for the keytab to get generated
restart: on-failure
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/tmp/krb5.conf
volumes:
secret: {}
================================================
FILE: kerberos-multi-node/down
================================================
#!/bin/bash
DESTROY=no
if [ ! -f "${PWD}/docker-compose.yml" ]; then
echo "No docker-compose found. Exiting."
exit 2
fi
usage()
{
echo "Usage: $0 [-h] [-d]"
echo "-d destroy images. They will be rebuilt next time"
exit 2
}
destroy()
{
docker-compose rm --force
}
stop_docker-compose()
{
docker-compose stop
}
# Should use getopts here but, why?
if [[ "${1}" == "-h" ]]; then
usage
exit 2
fi
if [[ "${1}" == "-d" ]]; then
echo "Stopping and destroying containers"
DESTROY=yes
fi
stop_docker-compose
if [[ $? != 0 ]]; then
echo "Stopping the docker-compose failed. Exiting for manual cleanup"
echo "I suggest 'docker-compose ps'"
exit 2
fi
if [[ "${DESTROY}" == "yes" ]]; then
destroy
fi
================================================
FILE: kerberos-multi-node/kafka/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-platform-2.12
RUN yum install -y confluent-control-center
# 3. Configure Kafka for Kerberos
RUN yum install -y krb5-workstation krb5-libs
COPY server.properties /etc/kafka/server.properties
COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf
COPY consumer.properties /etc/kafka/consumer.properties
EXPOSE 9093
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: kerberos-multi-node/kafka/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/6.0/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/6.0/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/6.0
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/6.0/archive.key
enabled=1
================================================
FILE: kerberos-multi-node/kafka/consumer.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useTicketCache=true;
================================================
FILE: kerberos-multi-node/kafka/kafka.sasl.jaas.config
================================================
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="kafka/kafka.kerberos-multi-node_default@TEST.CONFLUENT.IO";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="admin@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/kafka.key"
principal="kafka@TEST.CONFLUENT.IO";
};
================================================
FILE: kerberos-multi-node/kafka/server.properties
================================================
broker.id=0
listeners=SASL_PLAINTEXT://kafka:9093
advertised.listeners=SASL_PLAINTEXT://kafka:9093
security.inter.broker.protocol=SASL_PLAINTEXT
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
zookeeper.connect=zookeeper.kerberos-multi-node_default:2181
zookeeper.set.acl=true
# Kerberos / GSSAPI Authentication mechanism
sasl.enabled.mechanisms=GSSAPI
sasl.mechanism.inter.broker.protocol=GSSAPI
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
allow.everyone.if.no.acl.found=false
super.users=User:admin;User:kafka
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# metric reporter configuration with Kerberos
metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter
confluent.metrics.reporter.bootstrap.servers=kafka:9093
confluent.metrics.reporter.sasl.mechanism=GSSAPI
confluent.metrics.reporter.security.protocol=SASL_PLAINTEXT
confluent.metrics.reporter.sasl.kerberos.service.name=kafka
confluent.metrics.reporter.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useKeyTab=true \
storeKey=true \
keyTab="/var/lib/secret/kafka.key" \
principal="kafka@TEST.CONFLUENT.IO";
confluent.metrics.reporter.topic.replicas=1
================================================
FILE: kerberos-multi-node/kafka1/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-platform-2.12
RUN yum install -y confluent-control-center
# 3. Configure Kafka for Kerberos
RUN yum install -y krb5-workstation krb5-libs
COPY server.properties /etc/kafka/server.properties
COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf
COPY consumer.properties /etc/kafka/consumer.properties
EXPOSE 9093
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: kerberos-multi-node/kafka1/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/6.0/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/6.0/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/6.0
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/6.0/archive.key
enabled=1
================================================
FILE: kerberos-multi-node/kafka1/consumer.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useTicketCache=true;
================================================
FILE: kerberos-multi-node/kafka1/kafka.sasl.jaas.config
================================================
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="kafka/kafka1.kerberos-multi-node_default@TEST.CONFLUENT.IO";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="admin@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/kafka.key"
principal="kafka@TEST.CONFLUENT.IO";
};
================================================
FILE: kerberos-multi-node/kafka1/server.properties
================================================
broker.id=1
listeners=SASL_PLAINTEXT://kafka1:9093
advertised.listeners=SASL_PLAINTEXT://kafka1:9093
security.inter.broker.protocol=SASL_PLAINTEXT
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
zookeeper.connect=zookeeper.kerberos-multi-node_default:2181
zookeeper.set.acl=true
# Kerberos / GSSAPI Authentication mechanism
sasl.enabled.mechanisms=GSSAPI
sasl.mechanism.inter.broker.protocol=GSSAPI
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
allow.everyone.if.no.acl.found=false
super.users=User:admin;User:kafka
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# metric reporter configuration with Kerberos
metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter
confluent.metrics.reporter.bootstrap.servers=kafka:9093
confluent.metrics.reporter.sasl.mechanism=GSSAPI
confluent.metrics.reporter.security.protocol=SASL_PLAINTEXT
confluent.metrics.reporter.sasl.kerberos.service.name=kafka
confluent.metrics.reporter.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useKeyTab=true \
storeKey=true \
keyTab="/var/lib/secret/kafka.key" \
principal="kafka@TEST.CONFLUENT.IO";
confluent.metrics.reporter.topic.replicas=1
================================================
FILE: kerberos-multi-node/kdc/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Installing Kerberos server, admin and client
RUN yum install -y krb5-server krb5-libs
RUN yum install -y krb5-workstation krb5-libs
# 2. Configuring Kerberos and KDC
COPY krb5.conf /etc/krb5.conf
RUN mkdir /var/log/kerberos
RUN mkdir /etc/kdc
RUN mkdir -p /var/kerberos/krb5kdc/
RUN ln -s /etc/krb5.conf /etc/kdc/krb5.conf
EXPOSE 88
RUN kdb5_util -P confluent -r TEST.CONFLUENT.IO create -s
CMD /usr/sbin/krb5kdc -n
================================================
FILE: kerberos-multi-node/kdc/krb5.conf
================================================
[libdefaults]
default_realm = TEST.CONFLUENT.IO
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
rdns = false
dns_lookup_kdc = no
dns_lookup_realm = no
[realms]
TEST.CONFLUENT.IO = {
kdc = kdc
admin_server = kadmin
}
[domain_realm]
.test.confluent.io = TEST.CONFLUENT.IO
test.confluent.io = TEST.CONFLUENT.IO
kerberos_default = TEST.CONFLUENT.IO
.kerberos_default = TEST.CONFLUENT.IO
[logging]
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmin.log
default = FILE:/var/log/kerberos/krb5lib.log
================================================
FILE: kerberos-multi-node/up
================================================
#!/bin/sh
# Starting kerberos,
# Avoiding starting up all services at the begining to generate the keytab first
docker-compose build
docker-compose up -d kdc
# Create the required credential
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_producer@TEST.CONFLUENT.IO" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_consumer@TEST.CONFLUENT.IO" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka@TEST.CONFLUENT.IO" > /dev/null
# Create server keys
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey zkservice/zookeeper.kerberos-multi-node_default@TEST.CONFLUENT.IO" > /dev/null
# princ for kafka0
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka/kafka.kerberos-multi-node_default@TEST.CONFLUENT.IO" > /dev/null
# princ for kafka1
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka/kafka1.kerberos-multi-node_default@TEST.CONFLUENT.IO" > /dev/null
# Create the keytab to use for Kafka
docker exec -ti kdc rm -f /var/lib/secret/kafka.key 2>&1 > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka.key -glob zkservice/*" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka.key -glob kafka*" > /dev/null
# Starting zookeeper and kafka now that the keytab has been created with the required credentials and services
docker-compose up -d
# Adding ACLs for consumer and producer user
docker-compose exec kafka bash -c "kinit -k -t /var/lib/secret/kafka.key kafka && kafka-acls --authorizer-properties zookeeper.connect=zookeeper:2181 --add --allow-principal User:kafka_producer --producer --topic=*"
docker-compose exec kafka bash -c "kinit -k -t /var/lib/secret/kafka.key kafka && kafka-acls --authorizer-properties zookeeper.connect=zookeeper:2181 --add --allow-principal User:kafka_consumer --consumer --topic=* --group=*"
echo "Example configuration to access kafka:"
echo "-> docker-compose exec kafka bash -c 'kinit -k -t /var/lib/secret/kafka.key kafka_producer && kafka-console-producer --broker-list kafka:9093 --topic test --producer.config /etc/kafka/consumer.properties'"
echo "-> docker-compose exec kafka bash -c 'kinit -k -t /var/lib/secret/kafka.key kafka_consumer && kafka-console-consumer --bootstrap-server kafka:9093 --topic test --consumer.config /etc/kafka/consumer.properties --from-beginning'"
================================================
FILE: kerberos-multi-node/zookeeper/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-platform-2.12
# 3. Configure zookeeper for Kerberos
RUN yum install -y krb5-workstation krb5-libs
COPY zookeeper.properties /etc/kafka/zookeeper.properties
COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf
EXPOSE 2181
CMD zookeeper-server-start /etc/kafka/zookeeper.properties
================================================
FILE: kerberos-multi-node/zookeeper/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/6.0/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/6.0/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/6.0
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/6.0/archive.key
enabled=1
================================================
FILE: kerberos-multi-node/zookeeper/zookeeper.properties
================================================
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the directory where the snapshot is stored.
dataDir=/var/lib/zookeeper
# the port at which the clients will connect
clientPort=2181
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
zookeeper.allowSaslFailedClients=false
requireClientAuthScheme=sasl
kerberos.removeHostFromPrincipal = true
kerberos.removeRealmFromPrincipal = false
================================================
FILE: kerberos-multi-node/zookeeper/zookeeper.sasl.jaas.config
================================================
Server {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/kafka.key"
principal="zkservice/zookeeper.kerberos-multi-node_default@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=true;
};
================================================
FILE: kerberos-multi-sasl/README.md
================================================
# Kerberos configuration demo
This demo sets up a basic Kafka cluster secured with Kerberos authentication, and sets up some basic ACLs to demonstrate authorisation.
The documentation below introduces the relevant components you need to understand to set up Kerberos in a Linux / JVM environment.
## Kerberos authentication process
Before configuring Kafka for Kerberos authentication, it is useful to understand the basics of Kerberos; the authentication process and some key terms.
_A note on what Kerberos is and is not: Kerberos is a *network authentication protocol* which allows a client application to connect to a network service in a way that allows the components to mutually verify each other's identities._
_It is put to good use in and integrated with network directory services, notably Windows Active Directory._
_Here, Kerberos identities are bound to network accounts and access privileges and, in the case of Windows, the SSPI API supports single sign-on and privilege impersonation natively in the OS._
_This is enabled by Kerberos, but Kerberos itself is not bound to such accounts and does not provide any such capability._
With that, let's work through the process for a client application making a connection into Kafka.
Kerberos involves three parties:
- a Kerberos Client, in this case our client application.
- a Kerberized Service, in this case Kafka.
- the Kerberos **Key Distribution Center (KDC)**
An important point to understand in this process is that the Client and Service each shares their own cryptographic key with the KDC.
By using this key to encrypt/decrypt tokens passed over the network, two network systems can verify each other's identities.
The Client and Service trust that they have only shared their secret with the KDC and so any correctly signed token must have originated from the KDC.
This is crucial.
During the Kerberos process the Client requests a token from the KDC _signed with the Service's key_ and presents this when making a connection.
The Service can then trust that the Client has valid credentials with the KDC and can be authenticated.
Other information is shared during the process to enable integrity checking and protection against various spoofing attacks.
For example, each signed token is:
* timestamped to bound the window for which it is valid
* linked to a network IP so that it is valid only from a single host
The first stage is that the Client application must authenticate itself with the KDC by proving that the Client knows the private credentials relating to the Client's Kerberos **Principal**.
The Principal is a a unique identity in the form {primary}/{instance}@{REALM} (more on these later).
The KDC authenticates the client using their shared cryptographic key and results in the client receiving a **Ticket Granting Ticket (TGT)**.
This is a cryptographic token that the Client may now use to prove that it has recently authenticated with the KDC.
The TGT is timestamped and includes an expiry time, typically a day.
The TGT is cached by the client to avoid having to re-authenticate unnecessarily.
Next, the Client wants to authenticate itself to the Kerberized Service.
For this to happen, the client must get a cryptographic token encrypted with the Kerberized Service's key - this token is a **Service Ticket** and is requested by the client from the KDC using the TGT and the requested service's principal name.
Including the TGT in this request is sufficient to prove that the client has already authenticated with the KDC allowing the service ticket to be returned.
Here is an important point to note - how does the client know the service principal name?
Very simply, it builds the principal with:
- {primary} = a client-side configured name for the service
- {instance} = the network address used to connect to the service
- {REALM} = the realm of the client and KDC.
In our example, our Client attempts to connect to the `kafka` Service on the host `kafka.kerberos-demo.local` in the realm `TEST.CONFLUENT.IO`.
Therefore, the service must be configured with a Service Principal Name of `kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO`.
Now the Client can connect directly to the Kerberized Service, and include the Service Ticket.
As the Service ticket is signed with the Service principal's key, the Service can decrypt the token to authenticate the request.
Based on the above, each connection in the cluster must be established with the following in place:
* On the Kerberos Client:
* A client principal and key to authenticate with the KDC, `{client name}@REALM`
* This is the *User Principal Name*.
* a configured name for the service to connect to, `{service name}`
* the network address for the service, `{network address}`.
* On the server:
* a principal name & key in the form `{service name}/{network address}@REALM`.
* This is the *Service Principal Name*.
As can be seen, the service principal must be constructed correctly to work.
However, the `{client name}` format is not mandated in the same way and is not bound to a network address.
Often the client name is a simple alphanumeric username, let's say 'john'.
However, you may sometimes see a client principal such as 'john/admin'.
In this form, 'admin' is called an _instance_ of the 'john' principal and can be used by 'john' to run services on the system with different credentials and privileges from the main account.
From the Kerberos perspective, the two principals are completely separate, but it can nonetheless be convenient to use this naming convention.
# Technical Components
## KDC
The KDC could be provided by MIT Kerberos, Windows Active Directory, Redhat Identity Manager and many others.
In this demo we use MIT kerberos.
## Kerberos libraries and tools
All the hosts must include Kerberos libraries and a shared configuration (krb5.conf) in order to use and trust the same KDC.
`kinit` is used to authenticate to the Kerberos server as principal, or if none is given, a system generated default (typically your login name at the default realm), and acquire a ticket grant
gitextract_67i_esfb/
├── .gitignore
├── KerberosCheatsheet.md
├── README.md
├── TlsCheatsheet.md
├── acls/
│ ├── docker-compose.yaml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── admin.conf
│ │ ├── consumer.conf
│ │ ├── kafka.conf
│ │ ├── kafka.sasl.jaas.conf
│ │ ├── kafkacat.conf
│ │ ├── log4j.properties.template
│ │ └── producer.conf
│ ├── up
│ └── zookeeper.sasl.jaas.conf
├── apache-kafka-with-zk3.5-and-tls/
│ ├── .gitignore
│ ├── README.md
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ └── server.properties
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── tlsZkCli.sh
│ └── zoo.cfg
├── auditlog/
│ ├── README.md
│ ├── config/
│ │ └── delete-records.json
│ ├── data/
│ │ └── my_msgs.txt
│ ├── docker-compose.yml
│ ├── example-config.json
│ ├── kafka/
│ │ ├── consumer-user.properties
│ │ ├── kafka-user.properties
│ │ ├── kafka.properties
│ │ ├── kafka.sasl.jaas.config
│ │ ├── log4j.properties
│ │ ├── producer-user.properties
│ │ └── tools-log4j.properties
│ ├── scripts/
│ │ ├── create-topics.sh
│ │ ├── delete-records.sh
│ │ ├── describe-topics.sh
│ │ ├── explore-audit-topic.sh
│ │ └── write-msg.sh
│ ├── up
│ └── zookeeper/
│ ├── log4j.properties
│ ├── tools-log4j.properties
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── ca-builder-scripts/
│ ├── .gitignore
│ ├── README.md
│ ├── build-a-batch-of-certs.sh
│ ├── build-a-batch-of-stores.sh
│ ├── configs/
│ │ ├── batch-of-certs.txt
│ │ ├── batch-of-stores.txt
│ │ ├── ca-config-vars
│ │ ├── ca.config
│ │ └── intermediate-ca.config
│ ├── create-crl.sh
│ ├── create-pair-certs.sh
│ ├── del-cert.sh
│ ├── revoke-cert.sh
│ ├── setup-ca-with-intermediate-ca.sh
│ ├── support-scripts/
│ │ ├── build-ca.sh
│ │ └── create-cert.sh
│ └── utils/
│ ├── build-ca.sh
│ ├── build-intermediate-ca.sh
│ └── functions.sh
├── delegation_tokens/
│ ├── .gitignore
│ ├── ca.cnf
│ ├── client.cnf
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ ├── create_client_properties.sh
│ │ ├── kafka_server_jaas.conf
│ │ └── server.properties
│ ├── server.cnf
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── kafka-connect-mtls/
│ ├── .gitignore
│ ├── README.md
│ ├── check-ssl-client-auth.sh
│ ├── connect/
│ │ ├── config/
│ │ │ ├── ca.cnf
│ │ │ └── client.cnf
│ │ └── secrets/
│ │ ├── ca-chain.cert.pem
│ │ ├── connect.cert.pem
│ │ ├── connect.key.pem
│ │ ├── server.keystore
│ │ └── server.truststore
│ ├── docker-compose.yml
│ └── up
├── kerberos/
│ ├── README.md
│ ├── client/
│ │ ├── Dockerfile
│ │ ├── client.sasl.jaas.config
│ │ ├── command.properties
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ └── producer.properties
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── kafka.sasl.jaas.config
│ │ └── server.properties
│ ├── kdc/
│ │ ├── Dockerfile
│ │ └── krb5.conf
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── kerberos-multi-node/
│ ├── README.md
│ ├── docker-compose.yml
│ ├── down
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ ├── kafka.sasl.jaas.config
│ │ └── server.properties
│ ├── kafka1/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ ├── kafka.sasl.jaas.config
│ │ └── server.properties
│ ├── kdc/
│ │ ├── Dockerfile
│ │ └── krb5.conf
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── kerberos-multi-sasl/
│ ├── README.md
│ ├── client/
│ │ ├── Dockerfile
│ │ ├── client.sasl.jaas.config
│ │ ├── command.properties
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ ├── producer.properties
│ │ └── scram.properties
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── kafka.sasl.jaas.config
│ │ └── server.properties
│ ├── kdc/
│ │ ├── Dockerfile
│ │ └── krb5.conf
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── kraft/
│ └── none/
│ ├── docker-compose.yml
│ ├── image/
│ │ └── kafka-images/
│ │ └── kafka/
│ │ ├── Dockerfile
│ │ ├── Dockerfile.ubi8
│ │ ├── README.md
│ │ ├── include/
│ │ │ └── etc/
│ │ │ └── confluent/
│ │ │ └── docker/
│ │ │ ├── configure
│ │ │ ├── ensure
│ │ │ ├── kafka.properties.template
│ │ │ ├── launch
│ │ │ ├── log4j.properties.template
│ │ │ ├── run
│ │ │ └── tools-log4j.properties.template
│ │ ├── pom.xml
│ │ ├── requirements.txt
│ │ ├── setup.py
│ │ ├── test/
│ │ │ ├── fixtures/
│ │ │ │ ├── cluster-bridged-plain.yml
│ │ │ │ ├── cluster-bridged-sasl.yml
│ │ │ │ ├── cluster-bridged-ssl.yml
│ │ │ │ ├── cluster-host-plain.yml
│ │ │ │ ├── cluster-host-sasl.yml
│ │ │ │ ├── cluster-host-ssl.yml
│ │ │ │ ├── secrets/
│ │ │ │ │ ├── bridged.consumer.ssl.config
│ │ │ │ │ ├── bridged.consumer.ssl.sasl.config
│ │ │ │ │ ├── bridged.producer.ssl.config
│ │ │ │ │ ├── bridged.producer.ssl.sasl.config
│ │ │ │ │ ├── bridged_broker1_jaas.conf
│ │ │ │ │ ├── bridged_broker2_jaas.conf
│ │ │ │ │ ├── bridged_broker3_jaas.conf
│ │ │ │ │ ├── bridged_consumer_jaas.conf
│ │ │ │ │ ├── bridged_krb.conf
│ │ │ │ │ ├── bridged_producer_jaas.conf
│ │ │ │ │ ├── broker1-ca1-signed.crt
│ │ │ │ │ ├── broker1_keystore_creds
│ │ │ │ │ ├── broker1_sslkey_creds
│ │ │ │ │ ├── broker1_truststore_creds
│ │ │ │ │ ├── broker2-ca1-signed.crt
│ │ │ │ │ ├── broker2_keystore_creds
│ │ │ │ │ ├── broker2_sslkey_creds
│ │ │ │ │ ├── broker2_truststore_creds
│ │ │ │ │ ├── broker3-ca1-signed.crt
│ │ │ │ │ ├── broker3_keystore_creds
│ │ │ │ │ ├── broker3_sslkey_creds
│ │ │ │ │ ├── broker3_truststore_creds
│ │ │ │ │ ├── client-plain.config
│ │ │ │ │ ├── config_krb.conf
│ │ │ │ │ ├── config_server1_jaas.conf
│ │ │ │ │ ├── consumer-ca1-signed.crt
│ │ │ │ │ ├── consumer_keystore_creds
│ │ │ │ │ ├── consumer_sslkey_creds
│ │ │ │ │ ├── consumer_truststore_creds
│ │ │ │ │ ├── create-certs.sh
│ │ │ │ │ ├── host.consumer.ssl.config
│ │ │ │ │ ├── host.consumer.ssl.sasl.config
│ │ │ │ │ ├── host.producer.ssl.config
│ │ │ │ │ ├── host.producer.ssl.sasl.config
│ │ │ │ │ ├── host_broker1_jaas.conf
│ │ │ │ │ ├── host_broker2_jaas.conf
│ │ │ │ │ ├── host_broker3_jaas.conf
│ │ │ │ │ ├── host_consumer_jaas.conf
│ │ │ │ │ ├── host_krb.conf
│ │ │ │ │ ├── host_producer_jaas.conf
│ │ │ │ │ ├── host_zookeeper_1_jaas.conf
│ │ │ │ │ ├── host_zookeeper_2_jaas.conf
│ │ │ │ │ ├── host_zookeeper_3_jaas.conf
│ │ │ │ │ ├── kafka.broker1.keystore.jks
│ │ │ │ │ ├── kafka.broker1.truststore.jks
│ │ │ │ │ ├── kafka.broker2.keystore.jks
│ │ │ │ │ ├── kafka.broker2.truststore.jks
│ │ │ │ │ ├── kafka.broker3.keystore.jks
│ │ │ │ │ ├── kafka.broker3.truststore.jks
│ │ │ │ │ ├── kafka.consumer.keystore.jks
│ │ │ │ │ ├── kafka.consumer.truststore.jks
│ │ │ │ │ ├── kafka.producer.keystore.jks
│ │ │ │ │ ├── kafka.producer.truststore.jks
│ │ │ │ │ ├── kafkacat-ca1-signed.pem
│ │ │ │ │ ├── kafkacat.client.key
│ │ │ │ │ ├── krb_server.conf
│ │ │ │ │ ├── producer-ca1-signed.crt
│ │ │ │ │ ├── producer-ssl.config
│ │ │ │ │ ├── producer_keystore_creds
│ │ │ │ │ ├── producer_sslkey_creds
│ │ │ │ │ ├── producer_truststore_creds
│ │ │ │ │ ├── snakeoil-ca-1.crt
│ │ │ │ │ └── snakeoil-ca-1.key
│ │ │ │ ├── standalone-config.yml
│ │ │ │ └── standalone-network.yml
│ │ │ └── test_kafka.py
│ │ └── tox.ini
│ └── up
├── ldap/
│ ├── acls/
│ │ └── acls.csv
│ ├── add-user
│ ├── custom/
│ │ ├── 01_base.ldif
│ │ ├── 02_KafkaDevelopers.ldif
│ │ ├── 10_alice.ldif
│ │ ├── 11_barnie.ldif
│ │ ├── 12_charlie.ldif
│ │ └── 20_group_add.ldif
│ ├── docker-compose-with-ssl.yaml
│ ├── docker-compose.yaml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── alice.properties
│ │ ├── barnie.properties
│ │ ├── charlie.properties
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ ├── jks/
│ │ │ └── .gitignore
│ │ ├── kafka.jaas.config
│ │ ├── kafka.properties
│ │ ├── log4j.properties
│ │ ├── server-with-ssl.properties
│ │ ├── server.properties
│ │ └── users/
│ │ └── purbon.properties
│ ├── ldap/
│ │ ├── certs/
│ │ │ └── .gitignore
│ │ └── custom/
│ │ ├── 01_base.ldif
│ │ ├── 02_KafkaDevelopers.ldif
│ │ ├── 10_alice.ldif
│ │ ├── 11_barnie.ldif
│ │ ├── 12_charlie.ldif
│ │ └── 20_group_add.ldif
│ ├── scripts/
│ │ ├── .gitignore
│ │ └── certs-create.sh
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── ldap-auth/
│ ├── docker-compose.yaml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── alice.properties
│ │ ├── barnie.properties
│ │ ├── charlie.properties
│ │ ├── confluent.repo
│ │ ├── kafka.jaas.config
│ │ ├── kafka.properties
│ │ ├── log4j.properties
│ │ └── server.properties
│ ├── ldap/
│ │ └── custom/
│ │ ├── 01_base.ldif
│ │ ├── 02_KafkaDevelopers.ldif
│ │ ├── 03_ProjectA.ldif
│ │ ├── 04_ProjectB.ldif
│ │ ├── 10_alice.ldif
│ │ ├── 11_barnie.ldif
│ │ ├── 12_charlie.ldif
│ │ ├── 13_donald.ldif
│ │ ├── 14_eva.ldif
│ │ ├── 15_fritz.ldif
│ │ ├── 16_greta.ldif
│ │ ├── 17_kafka.ldif
│ │ └── 20_group_add.ldif
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── multi-sasl/
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── consumer.plain.properties
│ │ ├── consumer.properties
│ │ ├── kafka.sasl.jaas.config
│ │ └── server.properties
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── zookeeper.properties
│ └── zookeeper.sasl.jaas.config
├── none/
│ ├── docker-compose.yml
│ └── up
├── oauth/
│ ├── .gitignore
│ ├── ca.cnf
│ ├── docker-compose.yml
│ ├── generate_certs.sh
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── client.properties
│ │ ├── confluent.repo
│ │ ├── kafka_server_jaas.conf
│ │ ├── oauthcallbackhandlers/
│ │ │ ├── .gitignore
│ │ │ ├── pom.xml
│ │ │ └── src/
│ │ │ ├── main/
│ │ │ │ └── java/
│ │ │ │ └── io/
│ │ │ │ └── confluent/
│ │ │ │ └── examples/
│ │ │ │ └── authentication/
│ │ │ │ └── oauth/
│ │ │ │ ├── JwtHelper.java
│ │ │ │ ├── MyOauthBearerToken.java
│ │ │ │ ├── OauthBearerLoginCallbackHandler.java
│ │ │ │ └── OauthBearerValidatorCallbackHandler.java
│ │ │ └── test/
│ │ │ └── java/
│ │ │ └── io/
│ │ │ └── confluent/
│ │ │ └── examples/
│ │ │ └── authentication/
│ │ │ └── oauth/
│ │ │ ├── JwtHelperTest.java
│ │ │ └── ProduceDataTest.java
│ │ ├── server.properties
│ │ └── test_produce_and_consume.sh
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ └── zookeeper.properties
├── plain/
│ ├── consumer.properties
│ ├── docker-compose.yml
│ ├── producer.properties
│ └── up
├── quotas/
│ ├── Client/
│ │ ├── Dockerfile
│ │ └── confluent.repo
│ ├── Grafana/
│ │ └── provisioning/
│ │ ├── dashboards/
│ │ │ ├── grafana-dashboard.json
│ │ │ └── one-quota.yaml
│ │ └── datasources/
│ │ └── prometheus.yaml
│ ├── JMX_Exporter/
│ │ ├── jmx_prometheus_javaagent-0.11.0.jar
│ │ ├── kafka_config.yml
│ │ └── zookeeper_config.yml
│ ├── Prometheus/
│ │ └── prometheus.yml
│ ├── docker-compose.yml
│ ├── secrets/
│ │ ├── admin.properties
│ │ ├── kafka_server_jaas.conf
│ │ ├── noquota.properties
│ │ └── quota.properties
│ └── up
├── rbac/
│ ├── README.md
│ ├── client-configs/
│ │ ├── alice.properties
│ │ ├── barnie.properties
│ │ ├── charlie.properties
│ │ ├── copy-props.sh
│ │ ├── donald.properties
│ │ ├── eva.properties
│ │ ├── fritz.properties
│ │ └── greta.properties
│ ├── conf/
│ │ ├── keypair.pem
│ │ └── public.pem
│ ├── create-role-bindings.sh
│ ├── docker-compose.yml
│ ├── functions.sh
│ ├── kafka-registered.sh
│ ├── ldap/
│ │ └── custom/
│ │ ├── 01_base.ldif
│ │ ├── 02_KafkaDevelopers.ldif
│ │ ├── 03_ProjectA.ldif
│ │ ├── 04_ProjectB.ldif
│ │ ├── 10_alice.ldif
│ │ ├── 11_barnie.ldif
│ │ ├── 12_charlie.ldif
│ │ ├── 13_donald.ldif
│ │ ├── 14_eva.ldif
│ │ ├── 15_fritz.ldif
│ │ ├── 16_greta.ldif
│ │ └── 20_group_add.ldif
│ └── up
├── schema-registry/
│ ├── with-basic-auth/
│ │ ├── docker-compose.yml
│ │ ├── jaas_config.file
│ │ ├── password-file
│ │ └── up
│ ├── with-basic-auth-and-ccloud/
│ │ ├── README.md
│ │ ├── docker-compose.yml
│ │ ├── jaas_config.file
│ │ ├── password-file
│ │ └── up
│ └── with-http_and_https/
│ ├── .gitignore
│ ├── README.md
│ ├── docker-compose.yml
│ ├── schema-registry/
│ │ ├── config/
│ │ │ ├── ca.cnf
│ │ │ └── client.cnf
│ │ └── secrets/
│ │ ├── ca-chain.cert.pem
│ │ ├── schema-registry.cert.pem
│ │ ├── schema-registry.key.pem
│ │ ├── schema-registry.keystore
│ │ └── schema-registry.truststore
│ ├── up
│ └── verify.sh
├── scram/
│ ├── admin.properties
│ ├── consumer.properties
│ ├── docker-compose.yml
│ ├── jline-2.14.6.jar
│ ├── kafka.sasl.jaas.config
│ ├── producer.properties
│ ├── up
│ └── zookeeper.sasl.jaas.config
├── secure-jmx/
│ ├── README.md
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ └── server.properties
│ ├── pull-jmx-kafka.sh
│ ├── pull-jmx-zookeeper.sh
│ ├── secrets/
│ │ ├── client.keystore
│ │ ├── client.truststore
│ │ ├── jmxremote.access
│ │ ├── jmxremote.password
│ │ ├── jmxremote.properties
│ │ ├── kafka.keystore
│ │ └── kafka.truststore
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── jmxremote.access
│ ├── jmxremote.password
│ ├── jmxremote.properties
│ └── zookeeper.properties
├── tls/
│ ├── .gitignore
│ ├── ca.cnf
│ ├── client.cnf
│ ├── docker-compose.yml
│ ├── kafka/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ ├── consumer.properties
│ │ ├── kafkacat
│ │ ├── kafkacat.conf
│ │ └── server.properties
│ ├── kafkacat.conf
│ ├── local-client.cnf
│ ├── schema-registry/
│ │ ├── Dockerfile
│ │ ├── confluent.repo
│ │ └── schema-registry.properties
│ ├── schema-registry-client.cnf
│ ├── server.cnf
│ ├── up
│ └── zookeeper/
│ ├── Dockerfile
│ ├── confluent.repo
│ └── zookeeper.properties
└── tls-with-ocrl/
├── .gitignore
├── README.md
├── certs/
│ ├── broker.keystore
│ ├── broker.truststore
│ ├── client.keystore
│ └── client.truststore
├── docker-compose.yml
├── kafka/
│ ├── Dockerfile
│ ├── confluent.repo
│ ├── consumer.properties
│ └── server.properties
├── up
├── web/
│ └── crls.pem
└── zookeeper/
├── Dockerfile
├── confluent.repo
└── zookeeper.properties
SYMBOL INDEX (73 symbols across 7 files)
FILE: kraft/none/image/kafka-images/kafka/test/test_kafka.py
class ConfigTest (line 77) | class ConfigTest(unittest.TestCase):
method setUpClass (line 80) | def setUpClass(cls):
method tearDownClass (line 102) | def tearDownClass(cls):
method is_kafka_healthy_for_service (line 108) | def is_kafka_healthy_for_service(cls, service, port, num_brokers, host...
method test_required_config_failure (line 112) | def test_required_config_failure(self):
method test_default_config (line 130) | def test_default_config(self):
method test_default_logging_config (line 150) | def test_default_logging_config(self):
method test_full_config (line 182) | def test_full_config(self):
method test_full_logging_config (line 194) | def test_full_logging_config(self):
method test_volumes (line 227) | def test_volumes(self):
method test_random_user (line 230) | def test_random_user(self):
method test_kitchen_sink (line 233) | def test_kitchen_sink(self):
method test_ssl_config (line 247) | def test_ssl_config(self):
method test_sasl_config (line 270) | def test_sasl_config(self):
class StandaloneNetworkingTest (line 297) | class StandaloneNetworkingTest(unittest.TestCase):
method setUpClass (line 300) | def setUpClass(cls):
method tearDownClass (line 307) | def tearDownClass(cls):
method is_kafka_healthy_for_service (line 311) | def is_kafka_healthy_for_service(cls, service, port, num_brokers, host...
method test_bridged_network (line 315) | def test_bridged_network(self):
method test_host_network (line 329) | def test_host_network(self):
method test_jmx_host_network (line 343) | def test_jmx_host_network(self):
method test_jmx_bridged_network (line 352) | def test_jmx_bridged_network(self):
class ClusterBridgedNetworkTest (line 362) | class ClusterBridgedNetworkTest(unittest.TestCase):
method setUpClass (line 364) | def setUpClass(cls):
method tearDownClass (line 370) | def tearDownClass(cls):
method test_cluster_running (line 373) | def test_cluster_running(self):
method is_kafka_healthy_for_service (line 377) | def is_kafka_healthy_for_service(cls, service, port, num_brokers, host...
method test_bridge_network (line 381) | def test_bridge_network(self):
class ClusterSSLBridgedNetworkTest (line 406) | class ClusterSSLBridgedNetworkTest(ClusterBridgedNetworkTest):
method setUpClass (line 408) | def setUpClass(cls):
method tearDownClass (line 422) | def tearDownClass(cls):
method test_bridge_network (line 426) | def test_bridge_network(self):
class ClusterSASLBridgedNetworkTest (line 460) | class ClusterSASLBridgedNetworkTest(ClusterBridgedNetworkTest):
method setUpClass (line 462) | def setUpClass(cls):
method tearDownClass (line 485) | def tearDownClass(cls):
method test_bridge_network (line 489) | def test_bridge_network(self):
class ClusterHostNetworkTest (line 532) | class ClusterHostNetworkTest(unittest.TestCase):
method setUpClass (line 534) | def setUpClass(cls):
method tearDownClass (line 540) | def tearDownClass(cls):
method test_cluster_running (line 543) | def test_cluster_running(self):
method is_kafka_healthy_for_service (line 547) | def is_kafka_healthy_for_service(cls, service, port, num_brokers, host...
method test_host_network (line 551) | def test_host_network(self):
class ClusterSSLHostNetworkTest (line 576) | class ClusterSSLHostNetworkTest(ClusterHostNetworkTest):
method setUpClass (line 578) | def setUpClass(cls):
method tearDownClass (line 593) | def tearDownClass(cls):
method test_host_network (line 597) | def test_host_network(self):
class ClusterSASLHostNetworkTest (line 631) | class ClusterSASLHostNetworkTest(ClusterHostNetworkTest):
method setUpClass (line 633) | def setUpClass(cls):
method tearDownClass (line 670) | def tearDownClass(cls):
method test_host_network (line 674) | def test_host_network(self):
FILE: oauth/kafka/oauthcallbackhandlers/src/main/java/io/confluent/examples/authentication/oauth/JwtHelper.java
class JwtHelper (line 13) | public class JwtHelper {
method createJwt (line 15) | String createJwt() throws UnsupportedEncodingException {
method validate (line 29) | MyOauthBearerToken validate(String jwt) throws UnsupportedEncodingExce...
FILE: oauth/kafka/oauthcallbackhandlers/src/main/java/io/confluent/examples/authentication/oauth/MyOauthBearerToken.java
class MyOauthBearerToken (line 9) | @Data
method MyOauthBearerToken (line 18) | MyOauthBearerToken() { }
method MyOauthBearerToken (line 20) | MyOauthBearerToken(String value) {
method value (line 25) | @Override
method scope (line 30) | @Override
method lifetimeMs (line 35) | @Override
method principalName (line 40) | @Override
method startTimeMs (line 45) | @Override
FILE: oauth/kafka/oauthcallbackhandlers/src/main/java/io/confluent/examples/authentication/oauth/OauthBearerLoginCallbackHandler.java
class OauthBearerLoginCallbackHandler (line 15) | public class OauthBearerLoginCallbackHandler implements AuthenticateCall...
method configure (line 21) | @Override
method close (line 26) | @Override
method handle (line 31) | @Override
FILE: oauth/kafka/oauthcallbackhandlers/src/main/java/io/confluent/examples/authentication/oauth/OauthBearerValidatorCallbackHandler.java
class OauthBearerValidatorCallbackHandler (line 15) | public class OauthBearerValidatorCallbackHandler implements Authenticate...
method configure (line 21) | @Override
method close (line 26) | @Override
method handle (line 31) | @Override
FILE: oauth/kafka/oauthcallbackhandlers/src/test/java/io/confluent/examples/authentication/oauth/JwtHelperTest.java
class JwtHelperTest (line 12) | public class JwtHelperTest {
method test (line 14) | @Test
FILE: oauth/kafka/oauthcallbackhandlers/src/test/java/io/confluent/examples/authentication/oauth/ProduceDataTest.java
class ProduceDataTest (line 16) | public class ProduceDataTest {
method test (line 19) | @Test
Condensed preview — 476 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (578K chars).
[
{
"path": ".gitignore",
"chars": 52,
"preview": ".envrc\n.java-version\nkerberos-multi-node/TODO\n.idea\n"
},
{
"path": "KerberosCheatsheet.md",
"chars": 3476,
"preview": "# Kerberos Cheat Sheet\n\n## Introduction \n\nThis cheat sheet contains common commands regarding Kerberos administration an"
},
{
"path": "README.md",
"chars": 18075,
"preview": "# Kafka security playbook\n\nThis repository contains a set of docker images to demonstrate the security configuration of "
},
{
"path": "TlsCheatsheet.md",
"chars": 4471,
"preview": "# TLS Cheat Sheet\n\n## Introduction \n\nThis cheat sheet contains common commands regarding TLS certificate generation and "
},
{
"path": "acls/docker-compose.yaml",
"chars": 3432,
"preview": "---\nversion: '3'\nservices:\n zookeeper:\n image: confluentinc/cp-zookeeper:5.4.0\n hostname: zookeeper\n container"
},
{
"path": "acls/kafka/Dockerfile",
"chars": 365,
"preview": "FROM confluentinc/cp-enterprise-kafka:5.4.0\n\nMAINTAINER sven@confluent.io\n\n# Make sure the log directory is world-writab"
},
{
"path": "acls/kafka/admin.conf",
"chars": 192,
"preview": "sasl.mechanism=SCRAM-SHA-256\nsecurity.protocol=SASL_PLAINTEXT\nsasl.jaas.config=org.apache.kafka.common.security.scram.Sc"
},
{
"path": "acls/kafka/consumer.conf",
"chars": 198,
"preview": "sasl.mechanism=SCRAM-SHA-256\nsecurity.protocol=SASL_PLAINTEXT\nsasl.jaas.config=org.apache.kafka.common.security.scram.Sc"
},
{
"path": "acls/kafka/kafka.conf",
"chars": 192,
"preview": "sasl.mechanism=SCRAM-SHA-256\nsecurity.protocol=SASL_PLAINTEXT\nsasl.jaas.config=org.apache.kafka.common.security.scram.Sc"
},
{
"path": "acls/kafka/kafka.sasl.jaas.conf",
"chars": 382,
"preview": "KafkaServer {\n org.apache.kafka.common.security.scram.ScramLoginModule required\n username=\"kafka\"\n password=\"kafka"
},
{
"path": "acls/kafka/kafkacat.conf",
"chars": 108,
"preview": "security.protocol=SASL_PLAINTEXT\nsasl.mechanisms=SCRAM-SHA-256\nsasl.username=kafka\nsasl.password=kafka-pass\n"
},
{
"path": "acls/kafka/log4j.properties.template",
"chars": 1239,
"preview": "log4j.rootLogger={{ env[\"KAFKA_LOG4J_ROOT_LOGLEVEL\"] | default('INFO') }}, stdout\n\nlog4j.appender.stdout=org.apache.log4"
},
{
"path": "acls/kafka/producer.conf",
"chars": 198,
"preview": "sasl.mechanism=SCRAM-SHA-256\nsecurity.protocol=SASL_PLAINTEXT\nsasl.jaas.config=org.apache.kafka.common.security.scram.Sc"
},
{
"path": "acls/up",
"chars": 2009,
"preview": "#!/bin/sh\n \ndocker-compose up -d --build\n\n# Creating the user kafka\n# kafka is configured as a super user, no need for "
},
{
"path": "acls/zookeeper.sasl.jaas.conf",
"chars": 220,
"preview": "Server {\n org.apache.zookeeper.server.auth.DigestLoginModule required\n user_admin=\"password\";\n};\nClient {\n org.apa"
},
{
"path": "apache-kafka-with-zk3.5-and-tls/.gitignore",
"chars": 60,
"preview": "bin/\ncerts/\ncerts-old/\ntmp-dir\nimages/\nzookeeper.properties\n"
},
{
"path": "apache-kafka-with-zk3.5-and-tls/README.md",
"chars": 2993,
"preview": "# Apache Kafka 2.4 (trunk) with Zookeeper 3.5.5\n\nThis playbook show the current (as of August 2019) necessary steps to e"
},
{
"path": "apache-kafka-with-zk3.5-and-tls/docker-compose.yml",
"chars": 891,
"preview": "version: '3'\nservices:\n zookeeper:\n build: zookeeper/\n container_name: zookeeper\n hostname: zookeeper\n rest"
},
{
"path": "apache-kafka-with-zk3.5-and-tls/kafka/Dockerfile",
"chars": 271,
"preview": "FROM purbon/kafka\nMAINTAINER pere.urbon@gmail.com\nENV container docker\n\n# 1. Install openjdk\nRUN yum install -y java-11-"
},
{
"path": "apache-kafka-with-zk3.5-and-tls/kafka/server.properties",
"chars": 8856,
"preview": " # Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE"
},
{
"path": "apache-kafka-with-zk3.5-and-tls/up",
"chars": 2785,
"preview": "#!/usr/bin/env bash\nset -e\n\nfunction gencert() {\n if [ -a $1.jks ];\n then\n echo \"The keystore $1.jks already exists"
},
{
"path": "apache-kafka-with-zk3.5-and-tls/zookeeper/Dockerfile",
"chars": 366,
"preview": "FROM purbon/zookeeper:3.5.5\nMAINTAINER pere.urbon@gmail.com\nENV container docker\n\n# 2. Install zookeeper and kafka\nRUN y"
},
{
"path": "apache-kafka-with-zk3.5-and-tls/zookeeper/tlsZkCli.sh",
"chars": 401,
"preview": "##!/usr/bin/env bash\n\nexport CLIENT_JVMFLAGS=\"-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -"
},
{
"path": "apache-kafka-with-zk3.5-and-tls/zookeeper/zoo.cfg",
"chars": 1394,
"preview": "# The number of milliseconds of each tick\ntickTime=2000\n# The number of ticks that the initial\n# synchronization phase c"
},
{
"path": "auditlog/README.md",
"chars": 11634,
"preview": "# Kafka Audit Log\n\nThis playbook add an example of using the confluent audit log trail.\nThe present example works with S"
},
{
"path": "auditlog/config/delete-records.json",
"chars": 148,
"preview": "{\n \"partitions\": [\n {\n \"topic\": \"bar\",\n \"partition\": 0,\n \"offset\": 3\n "
},
{
"path": "auditlog/data/my_msgs.txt",
"chars": 54,
"preview": "This is a message\nThis is another message\nAbracadabra\n"
},
{
"path": "auditlog/docker-compose.yml",
"chars": 2228,
"preview": "version: \"2\"\nservices:\n zookeeper:\n image: confluentinc/cp-zookeeper:5.5.0\n hostname: zookeeper\n container_nam"
},
{
"path": "auditlog/example-config.json",
"chars": 696,
"preview": "{\n\t\"routes\": {\n\t\t\"crn:///kafka=*/group=*\": {\n\t\t\t\"consume\": {\n\t\t\t\t\"allowed\": \"confluent-audit-log-events\",\n\t\t\t\t\"denied\": "
},
{
"path": "auditlog/kafka/consumer-user.properties",
"chars": 198,
"preview": "sasl.mechanism=SCRAM-SHA-256\nsecurity.protocol=SASL_PLAINTEXT\nsasl.jaas.config=org.apache.kafka.common.security.scram.Sc"
},
{
"path": "auditlog/kafka/kafka-user.properties",
"chars": 188,
"preview": "sasl.mechanism=SCRAM-SHA-256\nsecurity.protocol=SASL_PLAINTEXT\nsasl.jaas.config=org.apache.kafka.common.security.scram.Sc"
},
{
"path": "auditlog/kafka/kafka.properties",
"chars": 1235,
"preview": "broker.id=1\nadvertised.listeners=SASL_PLAINTEXT://kafka:9092\noffsets.topic.replication.factor=1\nallow.everyone.if.no.acl"
},
{
"path": "auditlog/kafka/kafka.sasl.jaas.config",
"chars": 245,
"preview": "KafkaServer {\n org.apache.kafka.common.security.scram.ScramLoginModule required\n username=\"kafka\"\n password=\"kafka"
},
{
"path": "auditlog/kafka/log4j.properties",
"chars": 541,
"preview": "\nlog4j.rootLogger=INFO, stdout\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org."
},
{
"path": "auditlog/kafka/producer-user.properties",
"chars": 198,
"preview": "sasl.mechanism=SCRAM-SHA-256\nsecurity.protocol=SASL_PLAINTEXT\nsasl.jaas.config=org.apache.kafka.common.security.scram.Sc"
},
{
"path": "auditlog/kafka/tools-log4j.properties",
"chars": 251,
"preview": "\nlog4j.rootLogger=WARN, stderr\n\nlog4j.appender.stderr=org.apache.log4j.ConsoleAppender\nlog4j.appender.stderr.layout=org."
},
{
"path": "auditlog/scripts/create-topics.sh",
"chars": 1459,
"preview": "#!/usr/bin/env bash\n\necho \"Create topic foo with User:kafka\"\necho \"NOTE: this topic creation will be ignored because use"
},
{
"path": "auditlog/scripts/delete-records.sh",
"chars": 227,
"preview": "#!/usr/bin/env bash\n\ndocker exec kafka kafka-delete-records --bootstrap-server kafka:9092 \\\n --command-con"
},
{
"path": "auditlog/scripts/describe-topics.sh",
"chars": 143,
"preview": "#!/usr/bin/env bash\n\ndocker exec kafka kafka-topics --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user"
},
{
"path": "auditlog/scripts/explore-audit-topic.sh",
"chars": 252,
"preview": "#!/usr/bin/env bash\n\nTOPIC=\"confluent-audit-log-events\"\ndocker-compose exec kafka kafka-console-consumer --bootstrap-ser"
},
{
"path": "auditlog/scripts/write-msg.sh",
"chars": 526,
"preview": "#!/usr/bin/env bash\n\nPWD=`pwd`\ntopic=$1\nnetwork=\"auditlog_default\"\n\nUSERNAME=producer\nPASSWORD=producerpass\n\necho \"Write"
},
{
"path": "auditlog/up",
"chars": 1627,
"preview": "#!/usr/bin/env bash\n\ndocker-compose up -d\n\ndocker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --"
},
{
"path": "auditlog/zookeeper/log4j.properties",
"chars": 213,
"preview": "\nlog4j.rootLogger=INFO, stdout\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org."
},
{
"path": "auditlog/zookeeper/tools-log4j.properties",
"chars": 251,
"preview": "\nlog4j.rootLogger=WARN, stderr\n\nlog4j.appender.stderr=org.apache.log4j.ConsoleAppender\nlog4j.appender.stderr.layout=org."
},
{
"path": "auditlog/zookeeper/zookeeper.properties",
"chars": 86,
"preview": "\ndataDir=/var/lib/zookeeper/data\ndataLogDir=/var/lib/zookeeper/log\n\nclientPort=2181\n\n\n"
},
{
"path": "auditlog/zookeeper/zookeeper.sasl.jaas.config",
"chars": 220,
"preview": "Server {\n org.apache.zookeeper.server.auth.DigestLoginModule required\n user_admin=\"password\";\n};\nClient {\n org.apa"
},
{
"path": "ca-builder-scripts/.gitignore",
"chars": 74,
"preview": "tmp-certs/\nstores\nlegacy/\n\n## remove from git the generated CA files\n\nca/\n"
},
{
"path": "ca-builder-scripts/README.md",
"chars": 4856,
"preview": "# Building a CA with OpenSSL\n\nThis is a collection is scripts useful to generated a local CA setup. While the PKI could "
},
{
"path": "ca-builder-scripts/build-a-batch-of-certs.sh",
"chars": 242,
"preview": "#!/usr/bin/env bash\n\ninput=$1\nwhile IFS= read -r line\ndo\n fields=($(echo $line | tr \",\" \"\\n\"))\n #./support-scripts/cre"
},
{
"path": "ca-builder-scripts/build-a-batch-of-stores.sh",
"chars": 2221,
"preview": "#!/usr/bin/env bash\n\nDEFAULT_PASSWORD=${2:-confluent}\n\nif [ -z \"${CA_ROOT_DIR+x}\" ];\nthen\nCA_ROOT_DIR='.'\nfi\n\nCA_CERT=\"$"
},
{
"path": "ca-builder-scripts/configs/batch-of-certs.txt",
"chars": 118,
"preview": "consumer,machine0.example.com\nproducer,machine1.example.com\nkafka,machine2.example.com\nzookeeper,machine3.example.com\n"
},
{
"path": "ca-builder-scripts/configs/batch-of-stores.txt",
"chars": 118,
"preview": "consumer,machine0.example.com\nproducer,machine1.example.com\nkafka,machine2.example.com\nzookeeper,machine3.example.com\n"
},
{
"path": "ca-builder-scripts/configs/ca-config-vars",
"chars": 35,
"preview": "DE\nBerlin\nBerlin\nConfluent Germany\n"
},
{
"path": "ca-builder-scripts/configs/ca.config",
"chars": 4117,
"preview": "[ ca ]\ndefault_ca = CA_default\n\n[ CA_default ]\n# Directory and file locations.\ndir = .\ncerts ="
},
{
"path": "ca-builder-scripts/configs/intermediate-ca.config",
"chars": 4356,
"preview": "[defaults]\ncrl_url = http://httpd:80/crls.pem # CRL distribution point\n\n[ ca ]\n# `man ca`\ndefault_ca = CA_"
},
{
"path": "ca-builder-scripts/create-crl.sh",
"chars": 209,
"preview": "#!/usr/bin/env bash\n\n\nDEFAULT_PASSWORD=${1:-confluent}\n\nif [ -z \"${CA_ROOT_DIR+x}\" ];\nthen\nCA_ROOT_DIR='.'\nfi\n\nsource $C"
},
{
"path": "ca-builder-scripts/create-pair-certs.sh",
"chars": 845,
"preview": "#!/usr/bin/env bash\n\n#HOSTNAME=\"www.example.com\"\n#EXTENSION=\"server_cert\" #usr_cert for client auth, server_cert for for"
},
{
"path": "ca-builder-scripts/del-cert.sh",
"chars": 276,
"preview": "#!/usr/bin/env bash\n\nNAME=$1\n\nif [ -z \"${CA_ROOT_DIR+x}\" ];\nthen\nCA_ROOT_DIR='.'\nfi\n\necho \"Deleting CERT $NAME\"\n\nrm \"$CA"
},
{
"path": "ca-builder-scripts/revoke-cert.sh",
"chars": 199,
"preview": "#!/usr/bin/env bash\n\nCERT=$1\nDEFAULT_PASSWORD=${2:-confluent}\n\nif [ -z \"${CA_ROOT_DIR+x}\" ];\nthen\nCA_ROOT_DIR='.'\nfi\n\nso"
},
{
"path": "ca-builder-scripts/setup-ca-with-intermediate-ca.sh",
"chars": 565,
"preview": "#!/usr/bin/env bash\n\n##\n# This script builds a Certificate Authority of the form:\n# Root CA -> intermediate CA\n#\n# In th"
},
{
"path": "ca-builder-scripts/support-scripts/build-ca.sh",
"chars": 1301,
"preview": "#!/usr/bin/expect\n\nproc slurp {file} {\n set fh [open $file r]\n set ret [read $fh]\n close $fh\n return $ret\n}\n"
},
{
"path": "ca-builder-scripts/support-scripts/create-cert.sh",
"chars": 1097,
"preview": "#!/usr/bin/expect -f\n\nproc slurp {file} {\n set fh [open $file r]\n set ret [read $fh]\n close $fh\n return $ret"
},
{
"path": "ca-builder-scripts/utils/build-ca.sh",
"chars": 423,
"preview": "#!/usr/bin/env bash\n\nDEFAULT_PASSWORD=${1:-confluent}\n\nif [ -z \"${CA_ROOT_DIR+x}\" ];\nthen\nCA_ROOT_DIR='.'\nfi\n\nsource $CA"
},
{
"path": "ca-builder-scripts/utils/build-intermediate-ca.sh",
"chars": 555,
"preview": "#!/usr/bin/env bash\n\nDEFAULT_PASSWORD=${1:-confluent}\n\nif [ -z \"${CA_ROOT_DIR+x}\" ];\nthen\nCA_ROOT_DIR='.'\nfi\nITERMEDIATE"
},
{
"path": "ca-builder-scripts/utils/functions.sh",
"chars": 3605,
"preview": "#!/usr/bin/env bash\n\ngenerate_ca_keys_and_certs () {\n\nopenssl genrsa -aes256 -passout pass:$DEFAULT_PASSWORD -out privat"
},
{
"path": "delegation_tokens/.gitignore",
"chars": 7,
"preview": "certs/\n"
},
{
"path": "delegation_tokens/ca.cnf",
"chars": 553,
"preview": "[ policy_match ]\ncountryName = match\nstateOrProvinceName = match\norganizationName = match\norganizationalUnitName = optio"
},
{
"path": "delegation_tokens/client.cnf",
"chars": 646,
"preview": "[req]\nprompt = no\ndistinguished_name = dn\ndefault_md = sha256\ndefault_bits = 4096\nreq_extensions = v3_req\n\n[ dn ]\ncountr"
},
{
"path": "delegation_tokens/docker-compose.yml",
"chars": 878,
"preview": "version: '3'\nservices:\n zookeeper:\n build: zookeeper/\n container_name: zookeeper\n hostname: zookeeper\n doma"
},
{
"path": "delegation_tokens/kafka/Dockerfile",
"chars": 683,
"preview": "FROM centos\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 1. Adding Confluent repository\nRUN rpm --import htt"
},
{
"path": "delegation_tokens/kafka/confluent.repo",
"chars": 340,
"preview": "[Confluent.dist]\nname=Confluent repository (dist)\nbaseurl=https://packages.confluent.io/rpm/5.4/7\ngpgcheck=1\ngpgkey=http"
},
{
"path": "delegation_tokens/kafka/consumer.properties",
"chars": 444,
"preview": "sasl.mechanism=SCRAM-SHA-256\n# Configure SASL_SSL if SSL encryption is enabled, otherwise configure SASL_PLAINTEXT\nsecur"
},
{
"path": "delegation_tokens/kafka/create_client_properties.sh",
"chars": 937,
"preview": "#!/bin/bash\n\nset -e\nset -u\n\nRESPONSE=$(kafka-delegation-tokens \\\n --bootstrap-server kafka.confluent.local:9093 \\"
},
{
"path": "delegation_tokens/kafka/kafka_server_jaas.conf",
"chars": 531,
"preview": "// Username and password are used by the broker to initiate connections to other brokers\n// admin is another user allowe"
},
{
"path": "delegation_tokens/kafka/server.properties",
"chars": 845,
"preview": "############################# Server Basics #############################\nbroker.id=0\nlisteners=SASL_SSL://kafka.conflue"
},
{
"path": "delegation_tokens/server.cnf",
"chars": 658,
"preview": "[req]\nprompt = no\ndistinguished_name = dn\ndefault_md = sha256\ndefault_bits = 4096\nreq_extensions = v3_req\n\n[ dn ]\ncountr"
},
{
"path": "delegation_tokens/up",
"chars": 2894,
"preview": "#!/bin/sh\nset -e\n\n# Creating TLS CA, Certificates and keystore / truststore\nrm -rf certs \nmkdir -p certs\n# Generate CA c"
},
{
"path": "delegation_tokens/zookeeper/Dockerfile",
"chars": 580,
"preview": "FROM centos\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 1. Adding Confluent repository\nRUN rpm --import htt"
},
{
"path": "delegation_tokens/zookeeper/confluent.repo",
"chars": 340,
"preview": "[Confluent.dist]\nname=Confluent repository (dist)\nbaseurl=https://packages.confluent.io/rpm/5.4/7\ngpgcheck=1\ngpgkey=http"
},
{
"path": "delegation_tokens/zookeeper/zookeeper.properties",
"chars": 167,
"preview": "dataDir=/var/lib/zookeeper\nclientPort=2181\nmaxClientCnxns=0\nauthProvider.1 = org.apache.zookeeper.server.auth.SASLAuthen"
},
{
"path": "delegation_tokens/zookeeper/zookeeper.sasl.jaas.config",
"chars": 98,
"preview": "Server {\n org.apache.zookeeper.server.auth.DigestLoginModule required\n user_kafka=\"kafka\";\n};\n"
},
{
"path": "kafka-connect-mtls/.gitignore",
"chars": 56,
"preview": "connect/secrets/client-*.pem\nconnect/secrets/client.p12\n"
},
{
"path": "kafka-connect-mtls/README.md",
"chars": 1284,
"preview": "# Kafka Connect REST api ssl client auth\n\nOne of the common question regarding security on Kafka Connect REST api is how"
},
{
"path": "kafka-connect-mtls/check-ssl-client-auth.sh",
"chars": 2191,
"preview": "#!/usr/bin/env bash\n\nverify_ok_ssl_client_auth () {\n cp -f ../ca-builder-scripts/ca/intermediate/private/$1.key.pem con"
},
{
"path": "kafka-connect-mtls/connect/config/ca.cnf",
"chars": 555,
"preview": "[ policy_match ]\ncountryName = match\nstateOrProvinceName = match\norganizationName = match\norganizationalUnitName = optio"
},
{
"path": "kafka-connect-mtls/connect/config/client.cnf",
"chars": 627,
"preview": "[req]\nprompt = no\ndistinguished_name = dn\ndefault_md = sha256\ndefault_bits = 4096\nreq_extensions = v3_req\n\n[ dn ]\ncountr"
},
{
"path": "kafka-connect-mtls/connect/secrets/ca-chain.cert.pem",
"chars": 4208,
"preview": "-----BEGIN CERTIFICATE-----\nMIIF4TCCA8mgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgYExCzAJBgNVBAYTAkRF\nMQ8wDQYDVQQIDAZCZXJsaW4xDzA"
},
{
"path": "kafka-connect-mtls/connect/secrets/connect.cert.pem",
"chars": 2183,
"preview": "-----BEGIN CERTIFICATE-----\nMIIGIDCCBAigAwIBAgICEAEwDQYJKoZIhvcNAQELBQAwfTELMAkGA1UEBhMCREUx\nDzANBgNVBAgMBkJlcmxpbjEWMBQ"
},
{
"path": "kafka-connect-mtls/connect/secrets/connect.key.pem",
"chars": 1766,
"preview": "-----BEGIN RSA PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\nDEK-Info: AES-256-CBC,03578ECB28A28BA14408AF4EB2F82DCB\n\nhQkZlH8Vn"
},
{
"path": "kafka-connect-mtls/docker-compose.yml",
"chars": 3353,
"preview": "---\nversion: '3'\nservices:\n zookeeper:\n image: confluentinc/cp-zookeeper:5.3.1\n environment:\n ZOOKEEPER_CLIE"
},
{
"path": "kafka-connect-mtls/up",
"chars": 115,
"preview": "#!/usr/bin/env bash\n\ndocker-compose up -d\n\necho \"to verify the connection use the check-ssl-client-auth.sh script\"\n"
},
{
"path": "kerberos/README.md",
"chars": 16593,
"preview": "# Kerberos configuration demo\n\nThis demo sets up a basic Kafka cluster secured with Kerberos authentication, and sets up"
},
{
"path": "kerberos/client/Dockerfile",
"chars": 821,
"preview": "FROM centos:centos7\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 1. Adding Confluent repository\nRUN rpm --im"
},
{
"path": "kerberos/client/client.sasl.jaas.config",
"chars": 393,
"preview": "/*\n* Credentials to use when connecting to ZooKeeper directly.\n*\n* Whenever possible you should use the Kafka AdminClien"
},
{
"path": "kerberos/client/command.properties",
"chars": 194,
"preview": "bootstrap.servers=kafka:9093\nsecurity.protocol=SASL_PLAINTEXT\nsasl.jaas.config=com.sun.security.auth.module.Krb5LoginMod"
},
{
"path": "kerberos/client/confluent.repo",
"chars": 339,
"preview": "[Confluent.dist]\nname=Confluent repository (dist)\nbaseurl=https://packages.confluent.io/rpm/5.4/7\ngpgcheck=1\ngpgkey=http"
},
{
"path": "kerberos/client/consumer.properties",
"chars": 198,
"preview": "bootstrap.servers=kafka:9093\nsecurity.protocol=SASL_PLAINTEXT\nsasl.kerberos.service.name=kafka\nsasl.jaas.config=com.sun."
},
{
"path": "kerberos/client/producer.properties",
"chars": 198,
"preview": "bootstrap.servers=kafka:9093\nsecurity.protocol=SASL_PLAINTEXT\nsasl.kerberos.service.name=kafka\nsasl.jaas.config=com.sun."
},
{
"path": "kerberos/docker-compose.yml",
"chars": 1288,
"preview": "version: '3.5'\nservices:\n kdc:\n hostname: kdc.kerberos-demo.local\n #domainname: kerberos_default\n build: kdc/\n"
},
{
"path": "kerberos/kafka/Dockerfile",
"chars": 1011,
"preview": "FROM centos:centos8\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 0. Fixing Mirror list for Centos\nRUN sed -i"
},
{
"path": "kerberos/kafka/confluent.repo",
"chars": 339,
"preview": "[Confluent.dist]\nname=Confluent repository (dist)\nbaseurl=https://packages.confluent.io/rpm/5.4/7\ngpgcheck=1\ngpgkey=http"
},
{
"path": "kerberos/kafka/kafka.sasl.jaas.config",
"chars": 508,
"preview": "/*\n* The service principal\n*/\nKafkaServer {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=true"
},
{
"path": "kerberos/kafka/server.properties",
"chars": 1497,
"preview": "# Basic broker and listener configuration\nbroker.id=0\nlisteners=SASL_PLAINTEXT://kafka.kerberos-demo.local:9093\nzookeepe"
},
{
"path": "kerberos/kdc/Dockerfile",
"chars": 726,
"preview": "FROM centos:centos8\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 0. Fixing Mirror list for Centos\nRUN sed -i"
},
{
"path": "kerberos/kdc/krb5.conf",
"chars": 550,
"preview": "[libdefaults]\n default_realm = TEST.CONFLUENT.IO\n forwardable = true\n rdns = false\n dns_lookup_kdc = no\n "
},
{
"path": "kerberos/up",
"chars": 4184,
"preview": "#!/bin/sh\nset -e\n\n# Starting kerberos,\n# Avoiding starting up all services at the begining to generate the keytab first\n"
},
{
"path": "kerberos/zookeeper/Dockerfile",
"chars": 951,
"preview": "FROM centos:centos8\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 0. Fixing Mirror list for Centos\nRUN sed -i"
},
{
"path": "kerberos/zookeeper/confluent.repo",
"chars": 339,
"preview": "[Confluent.dist]\nname=Confluent repository (dist)\nbaseurl=https://packages.confluent.io/rpm/5.4/7\ngpgcheck=1\ngpgkey=http"
},
{
"path": "kerberos/zookeeper/zookeeper.properties",
"chars": 205,
"preview": "dataDir=/var/lib/zookeeper\nclientPort=2181\nmaxClientCnxns=0\nauthProvider.1 = org.apache.zookeeper.server.auth.SASLAuthen"
},
{
"path": "kerberos/zookeeper/zookeeper.sasl.jaas.config",
"chars": 364,
"preview": "Server {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=true\n keyTab=\"/var/lib/secr"
},
{
"path": "kerberos-multi-node/README.md",
"chars": 679,
"preview": "# Kerberos multi-node deployment example\n\nThis example shows how-to deploy multiple kafka nodes in an example kerberos e"
},
{
"path": "kerberos-multi-node/docker-compose.yml",
"chars": 2277,
"preview": "version: '3.8'\nservices:\n kdc:\n hostname: kdc\n domainname: kerberos-multi-node_default\n build: kdc/\n contai"
},
{
"path": "kerberos-multi-node/down",
"chars": 759,
"preview": "#!/bin/bash\nDESTROY=no\nif [ ! -f \"${PWD}/docker-compose.yml\" ]; then\n echo \"No docker-compose found. Exiting.\"\n ex"
},
{
"path": "kerberos-multi-node/kafka/Dockerfile",
"chars": 943,
"preview": "FROM centos:centos8\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 0. Fixing Mirror list for Centos\nRUN sed -i"
},
{
"path": "kerberos-multi-node/kafka/confluent.repo",
"chars": 339,
"preview": "[Confluent.dist]\nname=Confluent repository (dist)\nbaseurl=https://packages.confluent.io/rpm/6.0/7\ngpgcheck=1\ngpgkey=http"
},
{
"path": "kerberos-multi-node/kafka/consumer.properties",
"chars": 198,
"preview": "bootstrap.servers=kafka:9093\nsecurity.protocol=SASL_PLAINTEXT\nsasl.kerberos.service.name=kafka\nsasl.jaas.config=com.sun."
},
{
"path": "kerberos-multi-node/kafka/kafka.sasl.jaas.config",
"chars": 632,
"preview": "KafkaServer {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=true\n storeKey=true\n keyTab="
},
{
"path": "kerberos-multi-node/kafka/server.properties",
"chars": 1305,
"preview": "broker.id=0\nlisteners=SASL_PLAINTEXT://kafka:9093\nadvertised.listeners=SASL_PLAINTEXT://kafka:9093\nsecurity.inter.broker"
},
{
"path": "kerberos-multi-node/kafka1/Dockerfile",
"chars": 943,
"preview": "FROM centos:centos8\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 0. Fixing Mirror list for Centos\nRUN sed -i"
},
{
"path": "kerberos-multi-node/kafka1/confluent.repo",
"chars": 339,
"preview": "[Confluent.dist]\nname=Confluent repository (dist)\nbaseurl=https://packages.confluent.io/rpm/6.0/7\ngpgcheck=1\ngpgkey=http"
},
{
"path": "kerberos-multi-node/kafka1/consumer.properties",
"chars": 198,
"preview": "bootstrap.servers=kafka:9093\nsecurity.protocol=SASL_PLAINTEXT\nsasl.kerberos.service.name=kafka\nsasl.jaas.config=com.sun."
},
{
"path": "kerberos-multi-node/kafka1/kafka.sasl.jaas.config",
"chars": 633,
"preview": "KafkaServer {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=true\n storeKey=true\n keyTab="
},
{
"path": "kerberos-multi-node/kafka1/server.properties",
"chars": 1305,
"preview": "broker.id=1\nlisteners=SASL_PLAINTEXT://kafka1:9093\nadvertised.listeners=SASL_PLAINTEXT://kafka1:9093\nsecurity.inter.brok"
},
{
"path": "kerberos-multi-node/kdc/Dockerfile",
"chars": 726,
"preview": "FROM centos:centos8\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 0. Fixing Mirror list for Centos\nRUN sed -i"
},
{
"path": "kerberos-multi-node/kdc/krb5.conf",
"chars": 575,
"preview": "[libdefaults]\n\tdefault_realm = TEST.CONFLUENT.IO\n ticket_lifetime = 24h\n renew_lifetime = 7d\n forwardable = true\n rd"
},
{
"path": "kerberos-multi-node/up",
"chars": 2497,
"preview": "#!/bin/sh\n\n# Starting kerberos,\n# Avoiding starting up all services at the begining to generate the keytab first \n\ndocke"
},
{
"path": "kerberos-multi-node/zookeeper/Dockerfile",
"chars": 869,
"preview": "FROM centos:centos8\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 0. Fixing Mirror list for Centos\nRUN sed -i"
},
{
"path": "kerberos-multi-node/zookeeper/confluent.repo",
"chars": 340,
"preview": "[Confluent.dist]\nname=Confluent repository (dist)\nbaseurl=https://packages.confluent.io/rpm/6.0/7\ngpgcheck=1\ngpgkey=http"
},
{
"path": "kerberos-multi-node/zookeeper/zookeeper.properties",
"chars": 1252,
"preview": "# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE f"
},
{
"path": "kerberos-multi-node/zookeeper/zookeeper.sasl.jaas.config",
"chars": 348,
"preview": "Server {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=true\n storeKey=true\n\t\tuseTicketCache"
},
{
"path": "kerberos-multi-sasl/README.md",
"chars": 16593,
"preview": "# Kerberos configuration demo\n\nThis demo sets up a basic Kafka cluster secured with Kerberos authentication, and sets up"
},
{
"path": "kerberos-multi-sasl/client/Dockerfile",
"chars": 874,
"preview": "FROM centos:centos7\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 1. Adding Confluent repository\nRUN rpm --im"
},
{
"path": "kerberos-multi-sasl/client/client.sasl.jaas.config",
"chars": 393,
"preview": "/*\n* Credentials to use when connecting to ZooKeeper directly.\n*\n* Whenever possible you should use the Kafka AdminClien"
},
{
"path": "kerberos-multi-sasl/client/command.properties",
"chars": 194,
"preview": "bootstrap.servers=kafka:9093\nsecurity.protocol=SASL_PLAINTEXT\nsasl.jaas.config=com.sun.security.auth.module.Krb5LoginMod"
},
{
"path": "kerberos-multi-sasl/client/confluent.repo",
"chars": 339,
"preview": "[Confluent.dist]\nname=Confluent repository (dist)\nbaseurl=https://packages.confluent.io/rpm/5.4/7\ngpgcheck=1\ngpgkey=http"
},
{
"path": "kerberos-multi-sasl/client/consumer.properties",
"chars": 198,
"preview": "bootstrap.servers=kafka:9093\nsecurity.protocol=SASL_PLAINTEXT\nsasl.kerberos.service.name=kafka\nsasl.jaas.config=com.sun."
},
{
"path": "kerberos-multi-sasl/client/producer.properties",
"chars": 198,
"preview": "bootstrap.servers=kafka:9093\nsecurity.protocol=SASL_PLAINTEXT\nsasl.kerberos.service.name=kafka\nsasl.jaas.config=com.sun."
},
{
"path": "kerberos-multi-sasl/client/scram.properties",
"chars": 187,
"preview": "sasl.mechanism=SCRAM-SHA-512\nsecurity.protocol=SASL_PLAINTEXT\nsasl.jaas.config=org.apache.kafka.common.security.scram.Sc"
},
{
"path": "kerberos-multi-sasl/docker-compose.yml",
"chars": 1288,
"preview": "version: '3.5'\nservices:\n kdc:\n hostname: kdc.kerberos-demo.local\n #domainname: kerberos_default\n build: kdc/\n"
},
{
"path": "kerberos-multi-sasl/kafka/Dockerfile",
"chars": 971,
"preview": "FROM centos:centos8\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 0. Fixing Mirror list for Centos\nRUN sed -i"
},
{
"path": "kerberos-multi-sasl/kafka/confluent.repo",
"chars": 339,
"preview": "[Confluent.dist]\nname=Confluent repository (dist)\nbaseurl=https://packages.confluent.io/rpm/5.4/7\ngpgcheck=1\ngpgkey=http"
},
{
"path": "kerberos-multi-sasl/kafka/kafka.sasl.jaas.config",
"chars": 514,
"preview": "/*\n* The service principal\n*/\n/*\nKafkaServer {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=t"
},
{
"path": "kerberos-multi-sasl/kafka/server.properties",
"chars": 1937,
"preview": "# Basic broker and listener configuration\nbroker.id=0\nlisteners=SASL_PLAINTEXT://kafka.kerberos-demo.local:9093\nzookeepe"
},
{
"path": "kerberos-multi-sasl/kdc/Dockerfile",
"chars": 726,
"preview": "FROM centos:centos8\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 0. Fixing Mirror list for Centos\nRUN sed -i"
},
{
"path": "kerberos-multi-sasl/kdc/krb5.conf",
"chars": 581,
"preview": "[libdefaults]\n\tdefault_realm = TEST.CONFLUENT.IO\n ticket_lifetime = 24h\n renew_lifetime = 7d\n forwardable = true\n rd"
},
{
"path": "kerberos-multi-sasl/up",
"chars": 4552,
"preview": "#!/bin/sh\nset -e\n\n# Starting kerberos,\n# Avoiding starting up all services at the begining to generate the keytab first\n"
},
{
"path": "kerberos-multi-sasl/zookeeper/Dockerfile",
"chars": 954,
"preview": "FROM centos:centos8\nMAINTAINER d.gasparina@gmail.com\nENV container docker\n\n# 0. Fixing Mirror list for Centos\nRUN sed -i"
},
{
"path": "kerberos-multi-sasl/zookeeper/confluent.repo",
"chars": 339,
"preview": "[Confluent.dist]\nname=Confluent repository (dist)\nbaseurl=https://packages.confluent.io/rpm/5.4/7\ngpgcheck=1\ngpgkey=http"
},
{
"path": "kerberos-multi-sasl/zookeeper/zookeeper.properties",
"chars": 205,
"preview": "dataDir=/var/lib/zookeeper\nclientPort=2181\nmaxClientCnxns=0\nauthProvider.1 = org.apache.zookeeper.server.auth.SASLAuthen"
},
{
"path": "kerberos-multi-sasl/zookeeper/zookeeper.sasl.jaas.config",
"chars": 364,
"preview": "Server {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=true\n keyTab=\"/var/lib/secr"
},
{
"path": "kraft/none/docker-compose.yml",
"chars": 4045,
"preview": "---\nversion: '3'\nservices:\n kafka-controller-1:\n build: ./image/kafka-images/kafka/\n hostname: kafka-controller-1"
},
{
"path": "kraft/none/image/kafka-images/kafka/Dockerfile",
"chars": 2790,
"preview": "#\n# Copyright 2019 Confluent Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use "
},
{
"path": "kraft/none/image/kafka-images/kafka/Dockerfile.ubi8",
"chars": 2790,
"preview": "#\n# Copyright 2019 Confluent Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use "
},
{
"path": "kraft/none/image/kafka-images/kafka/README.md",
"chars": 2195,
"preview": "# Confluent Community Docker Image for Apache Kafka\n\nDocker image for deploying and running the Community Version of Kaf"
},
{
"path": "kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/configure",
"chars": 1732,
"preview": "#!/usr/bin/env bash\n#\n# Copyright 2016 Confluent Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\")"
},
{
"path": "kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/ensure",
"chars": 790,
"preview": "#!/usr/bin/env bash\n#\n# Copyright 2020 Confluent Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\")"
},
{
"path": "kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/kafka.properties.template",
"chars": 1159,
"preview": "{% set excluded_props = ['KAFKA_VERSION',\n 'KAFKA_HEAP_OPTS'\n 'KAFKA_LOG"
},
{
"path": "kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/launch",
"chars": 2027,
"preview": "#!/usr/bin/env bash\n#\n# Copyright 2016 Confluent Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\")"
},
{
"path": "kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/log4j.properties.template",
"chars": 801,
"preview": "\nlog4j.rootLogger={{ env[\"KAFKA_LOG4J_ROOT_LOGLEVEL\"] | default('INFO') }}, stdout\n\nlog4j.appender.stdout=org.apache.log"
},
{
"path": "kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/run",
"chars": 1025,
"preview": "#!/usr/bin/env bash\n#\n# Copyright 2016 Confluent Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\")"
},
{
"path": "kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/tools-log4j.properties.template",
"chars": 305,
"preview": "\nlog4j.rootLogger={{ env[\"KAFKA_TOOLS_LOG4J_LOGLEVEL\"] | default('WARN') }}, stderr\n\nlog4j.appender.stderr=org.apache.lo"
},
{
"path": "kraft/none/image/kafka-images/kafka/pom.xml",
"chars": 1984,
"preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!--~\n ~ Copyright 2019 Confluent Inc.\n ~\n ~ Licensed under the Apache License"
},
{
"path": "kraft/none/image/kafka-images/kafka/requirements.txt",
"chars": 67,
"preview": "git+https://github.com/confluentinc/confluent-docker-utils@v0.0.32\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/setup.py",
"chars": 451,
"preview": "import setuptools\n\n\nsetuptools.setup(\n name='kafka-tests',\n version='0.0.1',\n author=\"Confluent, Inc.\",\n aut"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/cluster-bridged-plain.yml",
"chars": 2461,
"preview": "---\nversion: '2'\nnetworks:\n zk:\n driver: bridge\nservices:\n zookeeper-1:\n image: confluentinc/cp-zookeeper:latest"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/cluster-bridged-sasl.yml",
"chars": 5629,
"preview": "---\nversion: '2'\nnetworks:\n zk:\n driver: bridge\nservices:\n zookeeper-1:\n image: confluentinc/cp-zookeeper:latest"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/cluster-bridged-ssl.yml",
"chars": 3778,
"preview": "---\nversion: '2'\nnetworks:\n zk:\n driver: bridge\nservices:\n zookeeper-1:\n image: confluentinc/cp-zookeeper:latest"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/cluster-host-plain.yml",
"chars": 2147,
"preview": "---\nversion: '2'\nservices:\n zookeeper-1:\n image: confluentinc/cp-zookeeper:latest\n environment:\n ZOOKEEPER_S"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/cluster-host-sasl.yml",
"chars": 6653,
"preview": "---\nversion: '2'\nservices:\n zookeeper-sasl-1:\n image: confluentinc/cp-zookeeper:latest\n # This is required becaus"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/cluster-host-ssl.yml",
"chars": 3446,
"preview": "---\nversion: '2'\nservices:\n zookeeper-1:\n image: confluentinc/cp-zookeeper:latest\n environment:\n ZOOKEEPER_S"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged.consumer.ssl.config",
"chars": 280,
"preview": "group.id=ssl-bridged\nssl.truststore.location=/etc/kafka/secrets/kafka.consumer.truststore.jks\nssl.truststore.password=co"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged.consumer.ssl.sasl.config",
"chars": 345,
"preview": "group.id=ssl-sasl-bridged\nssl.truststore.location=/etc/kafka/secrets/kafka.consumer.truststore.jks\nssl.truststore.passwo"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged.producer.ssl.config",
"chars": 259,
"preview": "ssl.truststore.location=/etc/kafka/secrets/kafka.producer.truststore.jks\nssl.truststore.password=confluent\n\nssl.keystore"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged.producer.ssl.sasl.config",
"chars": 319,
"preview": "ssl.truststore.location=/etc/kafka/secrets/kafka.producer.truststore.jks\nssl.truststore.password=confluent\n\nssl.keystore"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_broker1_jaas.conf",
"chars": 451,
"preview": "KafkaServer {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=true\n storeKey=true\n keyTab="
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_broker2_jaas.conf",
"chars": 451,
"preview": "KafkaServer {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=true\n storeKey=true\n keyTab="
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_broker3_jaas.conf",
"chars": 451,
"preview": "KafkaServer {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=true\n storeKey=true\n keyTab="
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_consumer_jaas.conf",
"chars": 244,
"preview": "KafkaClient {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=true\n storeKey=true\n keyTab="
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_krb.conf",
"chars": 926,
"preview": "[logging]\n default = FILE:/var/log/kerberos/krb5libs.log\n kdc = FILE:/var/log/kerberos/krb5kdc.log\n admin_server = FILE:"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_producer_jaas.conf",
"chars": 244,
"preview": "KafkaClient {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=true\n storeKey=true\n keyTab="
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker1-ca1-signed.crt",
"chars": 1038,
"preview": "-----BEGIN CERTIFICATE-----\nMIIC0jCCAjsCCQC4Ge6Xmxv2ajANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj\nYTEudGVzdC5jb25mbHVlbnQuaW8"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker1_keystore_creds",
"chars": 10,
"preview": "confluent\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker1_sslkey_creds",
"chars": 10,
"preview": "confluent\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker1_truststore_creds",
"chars": 10,
"preview": "confluent\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker2-ca1-signed.crt",
"chars": 1038,
"preview": "-----BEGIN CERTIFICATE-----\nMIIC0jCCAjsCCQC4Ge6Xmxv2azANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj\nYTEudGVzdC5jb25mbHVlbnQuaW8"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker2_keystore_creds",
"chars": 10,
"preview": "confluent\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker2_sslkey_creds",
"chars": 10,
"preview": "confluent\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker2_truststore_creds",
"chars": 10,
"preview": "confluent\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker3-ca1-signed.crt",
"chars": 1038,
"preview": "-----BEGIN CERTIFICATE-----\nMIIC0jCCAjsCCQC4Ge6Xmxv2bDANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj\nYTEudGVzdC5jb25mbHVlbnQuaW8"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker3_keystore_creds",
"chars": 10,
"preview": "confluent\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker3_sslkey_creds",
"chars": 10,
"preview": "confluent\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker3_truststore_creds",
"chars": 10,
"preview": "confluent\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/client-plain.config",
"chars": 0,
"preview": ""
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/config_krb.conf",
"chars": 489,
"preview": "[logging]\n default = FILE:/var/log/kerberos/krb5libs.log\n kdc = FILE:/var/log/kerberos/krb5kdc.log\n admin_server = FILE:"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/config_server1_jaas.conf",
"chars": 432,
"preview": "KafkaServer {\n com.sun.security.auth.module.Krb5LoginModule required\n useKeyTab=true\n storeKey=true\n keyTab="
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/consumer-ca1-signed.crt",
"chars": 1042,
"preview": "-----BEGIN CERTIFICATE-----\nMIIC0zCCAjwCCQC4Ge6Xmxv2bjANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj\nYTEudGVzdC5jb25mbHVlbnQuaW8"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/consumer_keystore_creds",
"chars": 10,
"preview": "confluent\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/consumer_sslkey_creds",
"chars": 10,
"preview": "confluent\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/consumer_truststore_creds",
"chars": 10,
"preview": "confluent\n"
},
{
"path": "kraft/none/image/kafka-images/kafka/test/fixtures/secrets/create-certs.sh",
"chars": 2171,
"preview": "#!/bin/bash\n\nset -o nounset \\\n -o errexit \\\n -o verbose \\\n -o xtrace\n\n# Generate CA key\nopenssl req -new -x509 "
}
]
// ... and 276 more files (download for full content)
About this extraction
This page contains the full source code of the Dabz/kafka-security-playbook GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 476 files (515.5 KB), approximately 167.8k tokens, and a symbol index with 73 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.