/tls/certs/truststore.jks
-Djavax.net.ssl.keyStorePassword=test1234
-Djavax.net.ssl.trustStorePassword=test1234
```
### Important configuration files
kafka server.properties
listeners=SSL://kafka.confluent.local:9093
advertised.listeners=SSL://kafka.confluent.local:9093
security.inter.broker.protocol=SSL
ssl.truststore.location=/var/lib/secret/truststore.jks
ssl.truststore.password=test1234
ssl.keystore.location=/var/lib/secret/server.keystore.jks
ssl.keystore.password=test1234
ssl.client.auth=required
# To use TLS based authorization
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
super.users=User:CN=kafka.confluent.local,L=London,O=Confluent,C=UK
kafka consumer and producer configuration
bootstrap.servers=kafka.conflent.local:9093
security.protocol=SSL
ssl.truststore.location=/var/lib/secret/truststore.jks
ssl.truststore.password=test1234
ssl.keystore.location=/var/lib/secret/client.keystore.jks
ssl.keystore.password=test1234
ssl.key.password=test1234
#### For further information
* [kafka documentation on TLS](http://kafka.apache.org/documentation.html#security_ssl)
* [Confluent documentation on TLS authentication](https://docs.confluent.io/current/kafka/authentication_ssl.html)
* [Confluent documentation on TLS key generation](https://docs.confluent.io/current/tutorials/security_tutorial.html#generating-keys-certs)
## Kerberos (GSSAPI) authentication without TLS
This example contains a basic KDC server and configure both zookeeper and kafka with Kerberos authentication and authorization. Credentials are created without password, a keytab containing credentials is available in a Docker volume named "secret". The following credential are automatically created in the KDC database:
1. __kafka/admin__ - to access zookeeper
2. __kafka_producer/producer__ - to access kafka as a producer
3. __kafka_consumer/consumer__ - to access kafka as a consumer
### Usage
```bash
cd kerberos
# Scripts orchestrating the docker-compose services
./up
# Using kinit with a keytab for authentication then invoking kafka interfaces
docker-compose exec kafka bash -c 'kinit -k -t /var/lib/secret/kafka.key kafka_producer/producer && kafka-console-producer --broker-list kafka:9093 --topic test --producer.config /etc/kafka/consumer.properties'
docker-compose exec kafka bash -c 'kinit -k -t /var/lib/secret/kafka.key kafka_consumer/consumer && kafka-console-consumer --bootstrap-server kafka:9093 --topic test --consumer.config /etc/kafka/consumer.properties --from-beginning'
```
### Important configuration files
zookeeper properties
authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
zookeeper server and client jaas configuration
Server {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/kafka.key"
principal="zookeeper/zookeeper.kerberos_default@TEST.CONFLUENT.IO";
};
kafka server.properties
listeners=SASL_PLAINTEXT://kafka:9093
advertised.listeners=SASL_PLAINTEXT://kafka:9093
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.enabled.mechanisms=GSSAPI
sasl.mechanism.inter.broker.protocol=GSSAPI
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
allow.everyone.if.no.acl.found=false
super.users=User:admin;User:kafka
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
kafka server and client jaas configuration
/*
* Cluster kerberos services
*/
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="kafka/kafka.kerberos_default@TEST.CONFLUENT.IO";
};
/*
* For client and broker identificatoin
*/
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="admin/kafka.kerberos_default@TEST.CONFLUENT.IO";
};
/*
* For Zookeeper authentication
*/
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/kafka.key"
principal="kafka/kafka.kerberos_default@TEST.CONFLUENT.IO";
};
kafka consumer and producer configuration
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useTicketCache=true
#### For further information
* [Confluent documentation on GSSAPI authentication](https://docs.confluent.io/current/kafka/authentication_sasl_gssapi.html)
* [Confluent documentation on ACL](https://docs.confluent.io/current/kafka/authorization.html)
## Oauth authentication via TLS encryption
Kafka supports SASL authentication via Oauth bearer tokens. A sample playbook for secured oauth token authentication is contained in the oauth subfolder of this repository.
### Usage
Prerequisites: jdk8, maven, docker-compose, openssl.
```bash
cd oauth
./up
```
In this sample playbook both the identity of brokers (`sasl.mechanism.inter.broker.protocol=OAUTHBEARER` within server.properties) and the identity of clients (`sasl.mechanism=OAUTHBEARER` within consumer.properties) are verified by the brokers using oauth bearer tokens.
Within this sample playbook oauth bearer tokens are generated and validated using the `jjwt` library without communication to an authorization server. In real life, this would be different.
The class `OauthBearerLoginCallbackHandler` is used by the clients and by brokers to generate a JWT token using a shared secret. This class is configured within the `client.properties` file:
Note that the client does not need to have a keystore configured, since client authentication is achieved using bearer tokens.
Still it needs a truststore to store the brokers certificate authorities.
kafka consumer and producer configuration
security.protocol=SASL_SSL
sasl.mechanism=OAUTHBEARER
sasl.login.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerLoginCallbackHandler
ssl.truststore.location=/etc/kafka/kafka.client.truststore.jks
ssl.truststore.password=secret
The `OauthBearerLoginCallbackHandler` class is also configured for broker clients within the `server.properties` file (see below). The `server.properties` file must also include a reference to the token validator class (`OauthBearerValidatorCallbackHandler`):
kafka broker configuration
listeners=SASL_SSL://kafka.confluent.local:9093
advertised.listeners=SASL_SSL://kafka.confluent.local:9093
security.inter.broker.protocol=SASL_SSL
sasl.mechanism.inter.broker.protocol=OAUTHBEARER
sasl.enabled.mechanisms=OAUTHBEARER
listener.name.sasl_ssl.oauthbearer.sasl.server.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerValidatorCallbackHandler
listener.name.sasl_ssl.oauthbearer.sasl.login.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerLoginCallbackHandler
ssl.truststore.location=/etc/kafka/kafka.server.truststore.jks
ssl.truststore.password=secret
ssl.keystore.location=/etc/kafka/kafka.server.keystore.jks
ssl.keystore.password=secret
ssl.key.password=secret
Kafka brokers need a keystore to store its private certificate as well as a truststore to verify the identity of other brokers.
### Further information
* [Confluent documentation on Oauth authentication](https://docs.confluent.io/current/kafka/authentication_sasl/authentication_sasl_oauth.html)
* [Blog Post](https://medium.com/@jairsjunior/how-to-setup-oauth2-mechanism-to-a-kafka-broker-e42e72839fe)
## Schema registry basic security
According to documentation the schema registry plugin only supports SSL principals, but there is an undocumented separate authentication possibility via Jetty Authentication.
```bash
cd schema-registry-basic-auth
./up
```
Now you can access the schema registry REST interface on `http://localhost:8089`
Note that in order to test the schema registry properly, you need to either `curl` into it, or use the `kafka-avro-consule-producer` and consumer. The latter require special considerations.
First, access via `curl`:
```
curl -X GET http://localhost:8089 -u admin:admin
```
If you want to try out the console producer, you need to exec into the schema-registry docker image and then run the producer:
```
docker-compose exec schema-registry bash
kafka-avro-console-producer --broker-list kafka:9092 --topic avro-test --property \
value.schema='{"type":"record","name":"myrecord","fields":[{"name":"f1","type":"string"}]}' \
--property basic.auth.credentials.source=USER_INFO \
--property schema.registry.basic.auth.user.info=write:write
> {"f1": "value1"}
> {"f1": "value2"}
> ^D
```
Note that the official documentation is wrong on two accounts. First, to define the source, you need to use `basic.auth.credentials.source` without the `schema.registry` in front of it.
Second, user authentication via a property file gets ignored, you need to pass the credentials via `--property`.
## Schema registry semi-open security
This playbook is an example of configuration where Schema Registry is configured for accepting request on `http` and `https`.
Requests on the `http` endpoint are actually identified as the `ANONYMOUS` user. This is possible thanks to the `confluent.schema.registry.anonymous.principal=true` option.
The following ACLs are configured:
- `sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p 'ANONYMOUS' -o 'SUBJECT_READ'`
- `sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -p 'ANONYMOUS' -o 'GLOBAL_SUBJECTS_READ'`
- `sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -p 'ANONYMOUS' -o 'GLOBAL_COMPATIBILITY_READ'`
- `sr-acl-cli --config /etc/schema-registry/schema-registry.properties --add -s '*' -p 'C=UK,O=Confluent,L=London,CN=schema-registry' -o '*'`
With this configuration, ` curl -X GET http://localhost:8089/subjects/` is successful, but the `ANONYMOUS` user does not have the privileges to write new schemas.
Only the client with the TLS client certificate `C=UK,O=Confluent,L=London,CN=schema-registry` can write new schemas, this could be for example your CI tool or an admin user.
================================================
FILE: TlsCheatsheet.md
================================================
# TLS Cheat Sheet
## Introduction
This cheat sheet contains common commands regarding TLS certificate generation and TLS troubleshooting. If you are looking for a script to generate keystore , certificate authority and certificates, I recommend you to check out [confluent kafka-generate-ssl.sh script](https://github.com/confluentinc/confluent-platform-security-tools/blob/master/kafka-generate-ssl.sh)
## Generating self-signed certificates or a new Certificate Authority
```bash
openssl req -new -nodes -x509 -days 3650 -newkey rsa:2048 -keyout sever.key -out certs/server.crt -config $CONFIG_PATH
```
## Generating certificate signing request
```bash
openssl req -new -newkey rsa:2048 -keyout server.key -out server.csr -config $CONFIG_PATH -nodes
```
## Displaying content of a signing request
```bash
openssl req -text -in $CERT
```
## Displaying content of a certificate that a server presents
```bash
openssl s_client -showcerts -connect www.example.com:443
```
## Verifying that server certificate was signed by a CA
```bash
curl --cacert /var/lib/secret/ca.pem https://www.example:8443
```
## Signing certificate signing request
```bash
openssl x509 -req -days $DURATION -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -extfile $CONFIG_PATH
```
## Generate a signed certificate with keytool
```bash
# First create the keystore
keytool -keystore keystore.server.jks -alias server -validity $DURATION -genkey -keyalg RSA
# Generate a certificate signing request and export it to a file
keytool -keystore keystore.server.jks -alias server -certreq -file $KEYSTORE_SIGN_REQUEST
# Sign the certificate request with OpenSSL and a CA
openssl x509 -req -CA ca.crt -CAkey ca.key -in $KEYSTORE_SIGN_REQUEST -out $KEYSTORE_SIGNED_CERT -days $VALIDITY_IN_DAYS -CAcreateserial
# Importing the signed certificate to the keystore
keytool -keystore $KEYSTORE_WORKING_DIRECTORY/$KEYSTORE_FILENAME -alias localhost -import -file $KEYSTORE_SIGNED_CERT
```
## Displaying content of a signed certificate
```bash
openssl x509 -text -in $CERT
```
## Importing signed certificate with its private key into a keystore
```bash
# Exporting certificate to PKCS12 format
openssl pkcs12 -export -in server.crt -inkey server.key -chain -CAfile ca.pem -name "kafka.confluent.local" -out server.p12 -password pass:$PASSWORD
# Importing PKCS12 into another keystore (or create it)
keytool -importkeystore -deststorepass $PASSWORD -destkeystore server.keystore.jks -srckeystore server.p12 -deststoretype PKCS12 -srcstoretype PKCS12 -noprompt -srcstorepass $PASSWORD
```
## Import certificate into a keystore
```bash
keytool -keystore truststore.jks -alias $ALIAS -import -file $CRT_FILE -storepass $PASSWORD -noprompt -storetype PKCS12
```
## Example of OpenSSL configuration file to generate a CA
```
[ policy_match ]
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
x509_extensions = v3_ca
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName = kafka.confluent.local
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
```
## Example of OpenSSL configuration file to generate a server certificate
```
[req]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
req_extensions = v3_req
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName=kafka.confluent.local
[ v3_req ]
subjectKeyIdentifier = hash
basicConstraints = CA:FALSE
nsComment = "OpenSSL Generated Certificate"
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth, clientAuth
subjectAltName = @alt_names
[ alt_names ]
DNS.1=kafka.confluent.local
```
## Example of OpenSSL configuration file to generate a client certificate
```
[req]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
req_extensions = v3_req
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName=kafka.confluent.local
[ v3_req ]
subjectKeyIdentifier = hash
basicConstraints = CA:FALSE
nsComment = "OpenSSL Generated Certificate"
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
```
================================================
FILE: acls/docker-compose.yaml
================================================
---
version: '3'
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.4.0
hostname: zookeeper
container_name: zookeeper
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 2182
ZOOKEEPER_TICK_TIME: "2000"
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: localhost
KAFKA_OPTS: -Djava.security.auth.login.config=/tmp/zookeeper.sasl.jaas.conf
-Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
ports:
- 2182:2182
volumes:
- "$PWD/zookeeper.sasl.jaas.conf:/tmp/zookeeper.sasl.jaas.conf"
kafka-1:
build: kafka/
hostname: kafka-1
container_name: kafka-1
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2182
KAFKA_LISTENERS: INTERNAL://kafka-1:19093, EXTERNAL://0.0.0.0:9093
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:SASL_PLAINTEXT,EXTERNAL:SASL_PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka-1:19093, EXTERNAL://localhost:9093
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka-1:19093
KAFKA_CONFLUENT_METRICS_REPORTER_SASL_MECHANISM: "SCRAM-SHA-256"
KAFKA_CONFLUENT_METRICS_REPORTER_SECURITY_PROTOCOL: SASL_PLAINTEXT
KAFKA_CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_SASL_ENABLED_MECHANISMS: "SCRAM-SHA-256"
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: "SCRAM-SHA-256"
KAFKA_ZOOKEEPER_SET_ACL: "true"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "false"
KAFKA_SUPER_USERS: "User:kafka;User:admin"
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: kafka-1
KAFKA_OPTS: "-Djava.security.auth.login.config=/tmp/kafka.sasl.jaas.conf"
ports:
- 9093:9093
kafka-2:
build: kafka/
hostname: kafka-2
container_name: kafka-2
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2182
KAFKA_LISTENERS: INTERNAL://kafka-2:19094, EXTERNAL://0.0.0.0:9094
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:SASL_PLAINTEXT,EXTERNAL:SASL_PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka-2:19094, EXTERNAL://localhost:9094
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka-2:19094
KAFKA_CONFLUENT_METRICS_REPORTER_SASL_MECHANISM: "SCRAM-SHA-256"
KAFKA_CONFLUENT_METRICS_REPORTER_SECURITY_PROTOCOL: SASL_PLAINTEXT
KAFKA_CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_SASL_ENABLED_MECHANISMS: "SCRAM-SHA-256"
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: "SCRAM-SHA-256"
KAFKA_ZOOKEEPER_SET_ACL: "true"
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "false"
KAFKA_SUPER_USERS: "User:kafka;User:admin"
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: kafka-2
KAFKA_OPTS: "-Djava.security.auth.login.config=/tmp/kafka.sasl.jaas.conf"
ports:
- 9094:9094
================================================
FILE: acls/kafka/Dockerfile
================================================
FROM confluentinc/cp-enterprise-kafka:5.4.0
MAINTAINER sven@confluent.io
# Make sure the log directory is world-writable
RUN echo "===> Creating authorizer logs dir ..." \
&& mkdir -p /var/log/kafka-auth-logs \
&& chmod -R ag+w /var/log/kafka-auth-logs
COPY log4j.properties.template /etc/confluent/docker/log4j.properties.template
COPY *.conf /tmp/
================================================
FILE: acls/kafka/admin.conf
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="admin" \
password="admin-pass";
================================================
FILE: acls/kafka/consumer.conf
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="consumer" \
password="consumer-pass";
================================================
FILE: acls/kafka/kafka.conf
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka-pass";
================================================
FILE: acls/kafka/kafka.sasl.jaas.conf
================================================
KafkaServer {
org.apache.kafka.common.security.scram.ScramLoginModule required
username="kafka"
password="kafka-pass";
};
KafkaClient {
org.apache.kafka.common.security.scram.ScramLoginModule required
username="kafka"
password="kafka-pass";
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="password";
};
================================================
FILE: acls/kafka/kafkacat.conf
================================================
security.protocol=SASL_PLAINTEXT
sasl.mechanisms=SCRAM-SHA-256
sasl.username=kafka
sasl.password=kafka-pass
================================================
FILE: acls/kafka/log4j.properties.template
================================================
log4j.rootLogger={{ env["KAFKA_LOG4J_ROOT_LOGLEVEL"] | default('INFO') }}, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.authorizerAppender.File=/var/log/kafka-auth-logs/kafka-authorizer.log
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.additivity.kafka.authorizer.logger=false
{% set loggers = {
'kafka': 'INFO',
'kafka.network.RequestChannel$': 'WARN',
'kafka.producer.async.DefaultEventHandler': 'DEBUG',
'kafka.request.logger': 'WARN',
'kafka.controller': 'TRACE',
'kafka.log.LogCleaner': 'INFO',
'state.change.logger': 'TRACE',
'kafka.authorizer.logger': 'DEBUG, authorizerAppender'
} -%}
{% if env['KAFKA_LOG4J_LOGGERS'] %}
{% set loggers = parse_log4j_loggers(env['KAFKA_LOG4J_LOGGERS'], loggers) %}
{% endif %}
{% for logger,loglevel in loggers.iteritems() %}
log4j.logger.{{logger}}={{loglevel}}
{% endfor %}
================================================
FILE: acls/kafka/producer.conf
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="producer" \
password="producer-pass";
================================================
FILE: acls/up
================================================
#!/bin/sh
docker-compose up -d --build
# Creating the user kafka
# kafka is configured as a super user, no need for additional ACL
docker-compose exec kafka-1 kafka-configs --zookeeper zookeeper:2182 --alter --add-config 'SCRAM-SHA-256=[password=kafka-pass],SCRAM-SHA-512=[password=kafka-pass]' --entity-type users --entity-name kafka
docker-compose exec kafka-1 kafka-configs --zookeeper zookeeper:2182 --alter --add-config 'SCRAM-SHA-256=[password=admin-pass],SCRAM-SHA-512=[password=admin-pass]' --entity-type users --entity-name admin
docker-compose exec kafka-1 kafka-configs --zookeeper zookeeper:2182 --alter --add-config 'SCRAM-SHA-256=[password=producer-pass],SCRAM-SHA-512=[password=producer-pass]' --entity-type users --entity-name producer
docker-compose exec kafka-1 kafka-configs --zookeeper zookeeper:2182 --alter --add-config 'SCRAM-SHA-256=[password=consumer-pass],SCRAM-SHA-512=[password=consumer-pass]' --entity-type users --entity-name consumer
# ACLs
docker-compose exec kafka-1 kafka-acls --authorizer-properties zookeeper.connect=zookeeper:2182 --add --allow-principal User:producer --producer --topic=*
docker-compose exec kafka-1 kafka-acls --authorizer-properties zookeeper.connect=zookeeper:2182 --add --allow-principal User:consumer --consumer --topic=* --group=*
echo "Example configuration:"
echo "-> kafka-console-producer --broker-list localhost:9093 --producer.config kafka/producer.conf --topic test"
echo "-> kafka-console-consumer --bootstrap-server localhost:9094 --consumer.config kafka/consumer.conf --topic test --from-beginning"
echo "ZooKeeper shell with authorization from host:"
echo "-> KAFKA_OPTS=\"-Djava.security.auth.login.config=zookeeper.sasl.jaas.conf\" zookeeper-shell localhost:2182"
echo "ZooKeeper shell with authorization within container (KAFKA_OPTS already set):"
echo "-> docker-compose exec kafka-1 zookeeper-shell zookeeper:2182"
echo "Kafkacat with authorization from host:"
echo "-> kafkacat -L -b localhost:9094 -F kafka/kafkacat.conf"
================================================
FILE: acls/zookeeper.sasl.jaas.conf
================================================
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_admin="password";
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="password";
};
================================================
FILE: apache-kafka-with-zk3.5-and-tls/.gitignore
================================================
bin/
certs/
certs-old/
tmp-dir
images/
zookeeper.properties
================================================
FILE: apache-kafka-with-zk3.5-and-tls/README.md
================================================
# Apache Kafka 2.4 (trunk) with Zookeeper 3.5.5
This playbook show the current (as of August 2019) necessary steps to enable a secured TLS connection between an Apache Kafka broker and his corresponding
Apache Zookeeper counter part.
As of today, this only covers using Zookeeper 3.5.5 with the upcoming Apache Kafka 2.4 version. Using it in earlier versions is not properly tested.
## Run the playbook.
To run the playbook you need installed in your machine, docker, docker-compose.
The playbook can be started by running the _$> ./up_ script.
### Configuration on Apache ZooKeeper
Required environment variables:
```bash
SERVER_JVMFLAGS=-Dzookeeper.serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory
````
zoo.cfg file:
```bash
secureClientPort=2182
authProvider.1=org.apache.zookeeper.server.auth.X509AuthenticationProvider
ssl.trustStore.location=/var/lib/secret/truststore.jks
ssl.trustStore.password=test1234
ssl.keyStore.location=/var/lib/secret/zookeeper.jks
ssl.keyStore.password=test1234
ssl.clientAuth=true
```
### Configuration for Apache Kafka
Required environment variables:
```bash
KAFKA_OPTS=-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true -Dzookeeper.ssl.keyStore.location=/var/lib/secret/kafka.jks -Dzookeeper.ssl.keyStore.password=confluent -Dzookeeper.ssl.trustStore.location=/var/lib/secret/truststore.jks -Dzookeeper.ssl.trustStore.password=confluent
```
server.properties file:
```
zookeeper.connect=zookeeper:2182
```
to use the secure port, a use can use both (but I would certainly not recommended as it water down security)
## Things pending..
* The current zookeeper migration tool works based on JAAS files, there is currently no option to set authentication in a different way. There is an issue open with Apache Kafka (https://issues.apache.org/jira/browse/KAFKA-8843) to fix this, as well as the required overall KIP https://cwiki.apache.org/confluence/display/KAFKA/KIP-515%3A+Enable+ZK+client+to+use+the+new+TLS+supported+authentication, currently under discussion.
* The https://cwiki.apache.org/confluence/display/KAFKA/KIP-515%3A+Enable+ZK+client+to+use+the+new+TLS+supported+authentication covers as well the challenge of configuring zookeeper TLS access, for the brokers, using environment variables. There is a change proposed to make things better.
*NOTE*: This playbook utilised a custom made Apache Kafka docker image, build from a trunk snapshot the 22 of August 2019. Currently Apache Kafka 2.4 is still not released. Changing based images will be easy when an official confluent image is released.
## Reference
* https://cwiki.apache.org/confluence/display/ZOOKEEPER/ZooKeeper+SSL+User+Guide
* https://cwiki.apache.org/confluence/display/KAFKA/KIP-515%3A+Enable+ZK+client+to+use+the+new+TLS+supported+authentication
* https://issues.apache.org/jira/browse/KAFKA-8843
* https://github.com/apache/kafka/commit/d67495d6a7f4c5f7e8736a25d6a11a1c1bef8d87
================================================
FILE: apache-kafka-with-zk3.5-and-tls/docker-compose.yml
================================================
version: '3'
services:
zookeeper:
build: zookeeper/
container_name: zookeeper
hostname: zookeeper
restart: on-failure
environment:
- SERVER_JVMFLAGS=-Dzookeeper.serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory
volumes:
- ./certs/zk-stores:/var/lib/secret
kafka:
build: kafka/
container_name: kafka
hostname: kafka
depends_on:
- zookeeper
restart: on-failure
volumes:
- ./certs/kafka-stores:/var/lib/secret
environment:
- KAFKA_OPTS=-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true -Dzookeeper.ssl.keyStore.location=/var/lib/secret/kafka.jks -Dzookeeper.ssl.keyStore.password=confluent -Dzookeeper.ssl.trustStore.location=/var/lib/secret/truststore.jks -Dzookeeper.ssl.trustStore.password=confluent
ports:
- 29092:29092
================================================
FILE: apache-kafka-with-zk3.5-and-tls/kafka/Dockerfile
================================================
FROM purbon/kafka
MAINTAINER pere.urbon@gmail.com
ENV container docker
# 1. Install openjdk
RUN yum install -y java-11-openjdk
# 2. Configure Kafka
COPY server.properties /etc/kafka/server.properties
EXPOSE 9092
CMD kafka-server-start.sh /etc/kafka/server.properties
================================================
FILE: apache-kafka-with-zk3.5-and-tls/kafka/server.properties
================================================
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
############################# Socket Server Settings #############################
# The address the socket server listens on. It will get the value returned from
# java.net.InetAddress.getCanonicalHostName() if not configured.
# FORMAT:
# listeners = listener_name://host_name:port
# EXAMPLE:
# listeners = PLAINTEXT://your.host.name:9092
listeners=PLAINTEXT://kafka:9092,EXT_PLAINTEXT://localhost:29092
# Hostname and port the broker will advertise to producers and consumers. If not set,
# it uses the value for "listeners" if configured. Otherwise, it will use the value
# returned from java.net.InetAddress.getCanonicalHostName().
advertised.listeners=PLAINTEXT://kafka:9092,EXT_PLAINTEXT://localhost:29092
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
listener.security.protocol.map=PLAINTEXT:PLAINTEXT,EXT_PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
#security.inter.broker.protocol=SSL
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
num.network.threads=3
# The number of threads that the server uses for processing requests, which may include disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma separated list of directories under which to store log files
log.dirs=/var/lib/kafka
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
#log.retention.bytes=1073741824
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=zookeeper:2182
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=6000
##################### Confluent Metrics Reporter #######################
# Confluent Control Center and Confluent Auto Data Balancer integration
#
# Uncomment the following lines to publish monitoring data for
# Confluent Control Center and Confluent Auto Data Balancer
# If you are using a dedicated metrics cluster, also adjust the settings
# to point to your metrics kakfa cluster.
#metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter
#confluent.metrics.reporter.bootstrap.servers=localhost:9092
#
# Uncomment the following line if the metrics cluster has a single broker
#confluent.metrics.reporter.topic.replicas=1
##################### Confluent Proactive Support ######################
# If set to true, and confluent-support-metrics package is installed
# then the feature to collect and report support metrics
# ("Metrics") is enabled. If set to false, the feature is disabled.
#
#confluent.support.metrics.enable=false
# The customer ID under which support metrics will be collected and
# reported.
#
# When the customer ID is set to "anonymous" (the default), then only a
# reduced set of metrics is being collected and reported.
#
# Confluent customers
# -------------------
# If you are a Confluent customer, then you should replace the default
# value with your actual Confluent customer ID. Doing so will ensure
# that additional support metrics will be collected and reported.
#
#confluent.support.customer.id=anonymous
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0
# TLS Configuration
#ssl.truststore.location=/var/lib/secret/truststore.jks
#ssl.truststore.password=test1234
#ssl.keystore.location=/var/lib/secret/server.keystore.jks
#ssl.keystore.password=test1234
#ssl.client.auth=required
#authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
#super.users=User:CN=kafka.confluent.local,L=London,O=Confluent,C=UK;User:CN=schema-registry.confluent.local,L=London,O=Confluent,C=UK
================================================
FILE: apache-kafka-with-zk3.5-and-tls/up
================================================
#!/usr/bin/env bash
set -e
function gencert() {
if [ -a $1.jks ];
then
echo "The keystore $1.jks already exists";
exit;
fi
echo "Creating keystore $1.jks with a certificate and a key-pair for CN $1"
keytool -keystore $1.jks -alias $1 -validity $VALIDITY -genkey -storepass $PASSWORD -keypass $PASSWORD -dname "CN=$1,OU=kafka,O=confluent,L=MS,ST=Berlin,C=DE"
echo "Creating a Certificate-Signing-Request for the generated certificate"
keytool -keystore $1.jks -alias $1 -certreq -file cert-file -storepass $PASSWORD
echo "Signing the Certificate-Signing-Request and adding an additional DNS-entry for localhost"
openssl x509 -req -CA ca-cert -CAkey ca-key -in cert-file -out cert-signed -days $VALIDITY -CAcreateserial -passin pass:$PASSWORD -extensions SAN -extfile <(printf "\n[SAN]\nsubjectAltName=DNS:$1,DNS:localhost")
echo "Importing the root-certificate for the CA into the keystore $1.jks"
keytool -keystore $1.jks -alias CARoot -import -file ca-cert -storepass $PASSWORD -noprompt
echo "Importing the signed certificate for CN $1 into the keystore $1.jks"
keytool -keystore $1.jks -alias $1 -import -file cert-signed -storepass $PASSWORD
echo "Removing obsolet files..."
rm -v cert-file cert-signed
}
function gentruststore() {
if [ -a ca-cert ];
then
echo "The root-certificate for the CA already exists...";
else
echo "Creating a x509-certificate for the CA...";
openssl req -new -x509 -subj "/C=DE/ST=Berlin/L=MS/O=confluent/OU=kafka/CN=Root-CA" -keyout ca-key -out ca-cert -days $VALIDITY -passout pass:$PASSWORD
fi
#
if [ -a truststore.jks ];
then
echo "The keystore truststore.jks already exists!";
else
echo "Importing the root-certificate of the CA into truststore.jks..."
keytool -keystore truststore.jks -storepass $PASSWORD -alias CARoot -import -file ca-cert -noprompt
fi
}
rm -rf certs
rm -rf tmp-dir
mkdir tmp-dir
mkdir -p certs/kafka-stores
mkdir -p certs/zk-stores
VALIDITY=365
PASSWORD=confluent
(cd tmp-dir; gentruststore)
hosts=( "zookeeper" "client" "kafka")
for host in "${hosts[@]}"
do
(cd tmp-dir; gencert $host )
done
cp tmp-dir/truststore.jks certs/kafka-stores
cp tmp-dir/truststore.jks certs/zk-stores
cp tmp-dir/zookeeper.jks certs/zk-stores
cp tmp-dir/kafka.jks certs/kafka-stores
# Starting docker-compose services
docker-compose up -d --build
echo "Example configuration to access kafka:"
echo "-> docker-compose exec kafka kafka-topics.sh --bootstrap-server kafka:9092 --create --topic foo --partitions 1 --replication-factor 1"
echo "-> docker-compose exec kafka kafka-console-producer.sh --broker-list kafka:9092 --topic foo"
echo "-> docker-compose exec kafka kafka-console-consumer.sh --bootstrap-server kafka:9092 --topic foo --from-beginning"
================================================
FILE: apache-kafka-with-zk3.5-and-tls/zookeeper/Dockerfile
================================================
FROM purbon/zookeeper:3.5.5
MAINTAINER pere.urbon@gmail.com
ENV container docker
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
# 3. Configure zookeeper
COPY zoo.cfg "${ZK_HOME}/conf/zoo.cfg"
# 4. Add extra utility scripts
ENV PATH="/opt/tlsZkCli.sh:${PATH}"
COPY tlsZkCli.sh /opt/tlsZkCli.sh
EXPOSE 2182
CMD zkServer.sh start-foreground
================================================
FILE: apache-kafka-with-zk3.5-and-tls/zookeeper/tlsZkCli.sh
================================================
##!/usr/bin/env bash
export CLIENT_JVMFLAGS="-Dzookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty -Dzookeeper.client.secure=true
-Dzookeeper.ssl.keyStore.location=/var/lib/secret/zookeeper.jks
-Dzookeeper.ssl.keyStore.password=confluent
-Dzookeeper.ssl.trustStore.location=/var/lib/secret/truststore.jks
-Dzookeeper.ssl.trustStore.password=confluent"
zkCli.sh -server $1
================================================
FILE: apache-kafka-with-zk3.5-and-tls/zookeeper/zoo.cfg
================================================
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/tmp/zookeeper
# the port at which the clients will connect
#clientPort=2181
secureClientPort=2182
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
authProvider.1=org.apache.zookeeper.server.auth.X509AuthenticationProvider
ssl.trustStore.location=/var/lib/secret/truststore.jks
ssl.trustStore.password=confluent
ssl.keyStore.location=/var/lib/secret/zookeeper.jks
ssl.keyStore.password=confluent
# This option is commented out only as an example of what is possible for the
# SSL authentication. In a production environment this should be set as
# here, with ssl.clientAuth=need
#ssl.clientAuth=need
================================================
FILE: auditlog/README.md
================================================
# Kafka Audit Log
This playbook add an example of using the confluent audit log trail.
The present example works with SASL/SCRAM but this example can be extended to other authentication methods such as RBAC, other SASL flavours or TLS.
## Playbook.
1.- start all the components running the _./up_ script.
```bash
./up
Creating zookeeper ... done
Creating kafka ... done
Completed updating config for entity: user-principal 'kafka'.
Completed updating config for entity: user-principal 'consumer'.
Completed updating config for entity: user-principal 'producer'.
[2020-05-12 12:20:50,405] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Adding ACLs for resource `ResourcePattern(resourceType=TOPIC, name=*, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:producer, host=*, operation=WRITE, permissionType=ALLOW)
(principal=User:producer, host=*, operation=CREATE, permissionType=ALLOW)
[2020-05-12 12:20:51,026] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Current ACLs for resource `ResourcePattern(resourceType=TOPIC, name=*, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:producer, host=*, operation=CREATE, permissionType=ALLOW)
(principal=User:producer, host=*, operation=WRITE, permissionType=ALLOW)
[2020-05-12 12:20:53,986] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Adding ACLs for resource `ResourcePattern(resourceType=TOPIC, name=*, patternType=LITERAL)`:
(principal=User:consumer, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:consumer, host=*, operation=READ, permissionType=ALLOW)
Adding ACLs for resource `ResourcePattern(resourceType=GROUP, name=*, patternType=LITERAL)`:
(principal=User:consumer, host=*, operation=READ, permissionType=ALLOW)
[2020-05-12 12:20:54,538] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Current ACLs for resource `ResourcePattern(resourceType=TOPIC, name=*, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=CREATE, permissionType=ALLOW)
(principal=User:producer, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:consumer, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:producer, host=*, operation=WRITE, permissionType=ALLOW)
(principal=User:consumer, host=*, operation=READ, permissionType=ALLOW)
Current ACLs for resource `ResourcePattern(resourceType=GROUP, name=*, patternType=LITERAL)`:
(principal=User:consumer, host=*, operation=READ, permissionType=ALLOW)
[2020-05-12 12:20:57,354] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Adding ACLs for resource `ResourcePattern(resourceType=TOPIC, name=confluent-audit-log-events, patternType=PREFIXED)`:
(principal=User:confluent-audit, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:confluent-audit, host=*, operation=WRITE, permissionType=ALLOW)
(principal=User:confluent-audit, host=*, operation=CREATE, permissionType=ALLOW)
[2020-05-12 12:20:57,928] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Current ACLs for resource `ResourcePattern(resourceType=TOPIC, name=confluent-audit-log-events, patternType=PREFIXED)`:
(principal=User:confluent-audit, host=*, operation=WRITE, permissionType=ALLOW)
(principal=User:confluent-audit, host=*, operation=DESCRIBE, permissionType=ALLOW)
(principal=User:confluent-audit, host=*, operation=CREATE, permissionType=ALLOW)
Example configuration:
-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9092 --producer.config /etc/kafka/producer-user.properties --topic test
-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 --consumer.config /etc/kafka/consumer-user.properties --topic test --from-beginning
```
2.- Explore the currently created topics.
```
./scripts/describe-topics.sh
[2020-05-12 12:21:55,868] WARN The configuration 'sasl.jaas.config' was supplied but isn't a known config. (org.apache.kafka.clients.admin.AdminClientConfig)
Topic: _confluent-license PartitionCount: 1 ReplicationFactor: 1 Configs: min.insync.replicas=1,cleanup.policy=compact
Topic: _confluent-license Partition: 0 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: __confluent.support.metrics PartitionCount: 1 ReplicationFactor: 1 Configs: retention.ms=31536000000
Topic: __confluent.support.metrics Partition: 0 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events PartitionCount: 12 ReplicationFactor: 1 Configs: retention.ms=7776000000,message.timestamp.type=CreateTime,retention.bytes=-1,segment.ms=14400000
Topic: confluent-audit-log-events Partition: 0 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 1 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 2 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 3 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 4 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 5 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 6 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 7 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 8 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 9 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 10 Leader: 1 Replicas: 1 Isr: 1 Offline:
Topic: confluent-audit-log-events Partition: 11 Leader: 1 Replicas: 1 Isr: 1 Offline:
```
3.- Explore the audit log topics
```
./scripts/explore-audit-topic.sh
```
empty at the beginning.
Keep this open and it will start showing the generated events as we're issuing them.
4.- Create some topics and acls.
```
./scripts/create-topics.sh
Create topic foo with User:kafka
NOTE: this topic creation will be ignored because uses a user inside the ignore list.
Created topic foo.
Create topic bar with User:producer
NOTE: This action will be noted in the audit log.
Created topic bar.
Adding ACLs for resource `ResourcePattern(resourceType=TOPIC, name=bar, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=ALTER_CONFIGS, permissionType=ALLOW)
Current ACLs for resource `ResourcePattern(resourceType=TOPIC, name=bar, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=ALTER_CONFIGS, permissionType=ALLOW)
Adding ACLs for resource `ResourcePattern(resourceType=TOPIC, name=bar, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=DELETE, permissionType=ALLOW)
Current ACLs for resource `ResourcePattern(resourceType=TOPIC, name=bar, patternType=LITERAL)`:
(principal=User:producer, host=*, operation=ALTER_CONFIGS, permissionType=ALLOW)
(principal=User:producer, host=*, operation=DELETE, permissionType=ALLOW)
Change of a configuration
NOTE: This action will be noted in the audit log.
Completed updating config for topic bar.
```
Now the audit log topic should reflect the information about the generated actions.
```
./scripts/explore-audit-topic.sh
{"data":{"serviceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","methodName":"kafka.CreateTopics","resourceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","authenticationInfo":{"principal":"User:producer"},"authorizationInfo":{"granted":true,"operation":"Create","resourceType":"Topic","resourceName":"bar","patternType":"LITERAL","aclAuthorization":{"permissionType":"ALLOW","host":"*"}},"request":{"correlation_id":"4","client_id":"adminclient-1"},"requestMetadata":{"client_address":"/172.27.0.3"}},"id":"b17cc9c7-96f0-413a-b94c-124e21834a55","source":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","specversion":"0.3","type":"io.confluent.kafka.server/authorization","time":"2020-05-12T12:24:37.838Z","datacontenttype":"application/json","subject":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","confluentRouting":{"route":"confluent-audit-log-events"}}
{"data":{"serviceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","methodName":"kafka.IncrementalAlterConfigs","resourceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","authenticationInfo":{"principal":"User:producer"},"authorizationInfo":{"granted":true,"operation":"AlterConfigs","resourceType":"Topic","resourceName":"bar","patternType":"LITERAL","aclAuthorization":{"permissionType":"ALLOW","host":"*"}},"request":{"correlation_id":"4","client_id":"adminclient-1"},"requestMetadata":{"client_address":"/172.27.0.3"}},"id":"93e94659-8b45-4f44-b691-f284192ebe42","source":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","specversion":"0.3","type":"io.confluent.kafka.server/authorization","time":"2020-05-12T12:24:47.700Z","datacontenttype":"application/json","subject":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","confluentRouting":{"route":"confluent-audit-log-events"}}
```
5.- Write some messages
```
./scripts/write-msg.sh bar
Write messages to topic bar
```
More messages coming into the audit log.
```
{"data":{"serviceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","methodName":"kafka.Produce","resourceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","authenticationInfo":{"principal":"User:producer"},"authorizationInfo":{"granted":true,"operation":"Write","resourceType":"Topic","resourceName":"bar","patternType":"LITERAL","aclAuthorization":{"permissionType":"ALLOW","host":"*"}},"request":{"correlation_id":"6","client_id":"rdkafka"},"requestMetadata":{"client_address":"/172.27.0.4"}},"id":"7789d492-df1c-404a-b494-3dc44fb01b24","source":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","specversion":"0.3","type":"io.confluent.kafka.server/authorization","time":"2020-05-12T12:26:49.353Z","datacontenttype":"application/json","subject":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","confluentRouting":{"route":"confluent-audit-log-events"}}
```
6.- Delete of messages
```
./scripts/delete-records.sh
Executing records delete operation
Records delete operation completed:
partition: bar-0 low_watermark: 3
```
new messages in the audit trail.
```
{"data":{"serviceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","methodName":"kafka.DeleteRecords","resourceName":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","authenticationInfo":{"principal":"User:producer"},"authorizationInfo":{"granted":true,"operation":"Delete","resourceType":"Topic","resourceName":"bar","patternType":"LITERAL","aclAuthorization":{"permissionType":"ALLOW","host":"*"}},"request":{"correlation_id":"4","client_id":"adminclient-1"},"requestMetadata":{"client_address":"/172.27.0.3"}},"id":"f6664ede-fbd4-4425-873a-d31df5eb0b7f","source":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA","specversion":"0.3","type":"io.confluent.kafka.server/authorization","time":"2020-05-12T12:27:34.425Z","datacontenttype":"application/json","subject":"crn:///kafka=STOiZ_jWTxqgum3T5zEoqA/topic=bar","confluentRouting":{"route":"confluent-audit-log-events"}}
```
## More information
This is only a summary and playbook of this functionality, more intel can be found in the reference documentation.
1.- https://docs.confluent.io/current/security/audit-logs.html
================================================
FILE: auditlog/config/delete-records.json
================================================
{
"partitions": [
{
"topic": "bar",
"partition": 0,
"offset": 3
}
],
"version": 1
}
================================================
FILE: auditlog/data/my_msgs.txt
================================================
This is a message
This is another message
Abracadabra
================================================
FILE: auditlog/docker-compose.yml
================================================
version: "2"
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.5.0
hostname: zookeeper
container_name: zookeeper
volumes:
- ./zookeeper:/etc/kafka
ports:
- 2181:2181
environment:
ZOOKEEPER_CLIENT_PORT: 2181
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/zookeeper.sasl.jaas.config
-Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
kafka:
image: confluentinc/cp-server:5.5.0
container_name: kafka
hostname: kafka
depends_on:
- zookeeper
volumes:
- ./kafka:/etc/kafka
- ./config:/tmp/config
ports:
- 9092:9092
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_ADVERTISED_LISTENERS: "SASL_PLAINTEXT://kafka:9092"
KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1"
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: "1"
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: "1"
KAFKA_SASL_ENABLED_MECHANISMS: "SCRAM-SHA-256"
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: "SCRAM-SHA-256"
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: "SASL_PLAINTEXT"
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "false"
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/kafka.sasl.jaas.config"
KAFKA_SUPER_USERS: "User:kafka"
KAFKA_ZOOKEEPER_SET_ACL: "true"
KAFKA_AUTHORIZER_CLASS_NAME: io.confluent.kafka.security.authorizer.ConfluentServerAuthorizer
KAFKA_CONFLUENT_SECURITY_EVENT_ROUTER_CONFIG: "{\"routes\":{\"crn:///kafka=*/group=*\":{\"consume\":{\"allowed\":\"confluent-audit-log-events\",\"denied\":\"confluent-audit-log-events\"}},\"crn:///kafka=*/topic=*\":{\"produce\":{\"allowed\":\"confluent-audit-log-events\",\"denied\":\"confluent-audit-log-events\"},\"consume\":{\"allowed\":\"confluent-audit-log-events\",\"denied\":\"confluent-audit-log-events\"}}},\"destinations\":{\"topics\":{\"confluent-audit-log-events\":{\"retention_ms\":7776000000}}},\"default_topics\":{\"allowed\":\"confluent-audit-log-events\",\"denied\":\"confluent-audit-log-events\"},\"excluded_principals\":[\"User:kafka\",\"User:ANONYMOUS\"]}"
================================================
FILE: auditlog/example-config.json
================================================
{
"routes": {
"crn:///kafka=*/group=*": {
"consume": {
"allowed": "confluent-audit-log-events",
"denied": "confluent-audit-log-events"
}
},
"crn:///kafka=*/topic=*": {
"produce": {
"allowed": "confluent-audit-log-events",
"denied": "confluent-audit-log-events"
},
"consume": {
"allowed": "confluent-audit-log-events",
"denied": "confluent-audit-log-events"
}
}
},
"destinations": {
"topics": {
"confluent-audit-log-events": {
"retention_ms": 7776000000
}
}
},
"default_topics": {
"allowed": "confluent-audit-log-events",
"denied": "confluent-audit-log-events"
},
"excluded_principals": ["User:kafka", "User:ANONYMOUS"]
}
================================================
FILE: auditlog/kafka/consumer-user.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="consumer" \
password="consumerpass";
================================================
FILE: auditlog/kafka/kafka-user.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka";
================================================
FILE: auditlog/kafka/kafka.properties
================================================
broker.id=1
advertised.listeners=SASL_PLAINTEXT://kafka:9092
offsets.topic.replication.factor=1
allow.everyone.if.no.acl.found=false
zookeeper.connect=zookeeper:2181
security.inter.broker.protocol=SASL_PLAINTEXT
authorizer.class.name=io.confluent.kafka.security.authorizer.ConfluentServerAuthorizer
log.dirs=/var/lib/kafka/data
confluent.security.event.router.config={"routes":{"crn:///kafka=*/group=*":{"consume":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"}},"crn:///kafka=*/topic=*":{"produce":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"},"consume":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"}}},"destinations":{"topics":{"confluent-audit-log-events":{"retention_ms":7776000000}}},"default_topics":{"allowed":"confluent-audit-log-events","denied":"confluent-audit-log-events"},"excluded_principals":["User:kafka","User:ANONYMOUS"]}
listeners=SASL_PLAINTEXT://0.0.0.0:9092
zookeeper.set.acl=true
super.users=User:kafka
offsets.topic.num.partitions=1
sasl.enabled.mechanisms=SCRAM-SHA-256
transaction.state.log.replication.factor=1
sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256
confluent.license.topic.replication.factor=1
-%}
================================================
FILE: auditlog/kafka/kafka.sasl.jaas.config
================================================
KafkaServer {
org.apache.kafka.common.security.scram.ScramLoginModule required
username="kafka"
password="kafka";
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="password";
};
================================================
FILE: auditlog/kafka/log4j.properties
================================================
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.logger.kafka.authorizer.logger=WARN
log4j.logger.kafka.log.LogCleaner=INFO
log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG
log4j.logger.kafka.controller=TRACE
log4j.logger.kafka.network.RequestChannel$=WARN
log4j.logger.kafka.request.logger=WARN
log4j.logger.state.change.logger=TRACE
log4j.logger.kafka=INFO
================================================
FILE: auditlog/kafka/producer-user.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="producer" \
password="producerpass";
================================================
FILE: auditlog/kafka/tools-log4j.properties
================================================
log4j.rootLogger=WARN, stderr
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stderr.Target=System.err
================================================
FILE: auditlog/scripts/create-topics.sh
================================================
#!/usr/bin/env bash
echo "Create topic foo with User:kafka"
echo "NOTE: this topic creation will be ignored because uses a user inside the ignore list."
echo
docker exec kafka kafka-topics --bootstrap-server kafka:9092 \
--command-config /etc/kafka/kafka-user.properties \
--create --topic foo --replication-factor 1 --partitions 1
sleep 1
echo "Create topic bar with User:producer"
echo "NOTE: This action will be noted in the audit log."
echo
docker exec kafka kafka-topics --bootstrap-server kafka:9092 \
--command-config /etc/kafka/producer-user.properties \
--create --topic bar --replication-factor 1 --partitions 1
## Add extra ACLs need to handle the topic bar
docker exec kafka kafka-acls --bootstrap-server kafka:9092 \
--command-config /etc/kafka/kafka-user.properties \
--add --allow-principal User:producer --operation AlterConfigs \
--topic "bar"
docker exec kafka kafka-acls --bootstrap-server kafka:9092 \
--command-config /etc/kafka/kafka-user.properties \
--add --allow-principal User:producer --operation Delete \
--topic "bar"
sleep 1
echo "Change of a configuration"
echo "NOTE: This action will be noted in the audit log."
echo
docker exec kafka kafka-configs --bootstrap-server kafka:9092 \
--topic bar --add-config retention.ms=2592000001 \
--alter --command-config /etc/kafka/producer-user.properties
================================================
FILE: auditlog/scripts/delete-records.sh
================================================
#!/usr/bin/env bash
docker exec kafka kafka-delete-records --bootstrap-server kafka:9092 \
--command-config /etc/kafka/producer-user.properties \
--offset-json-file /tmp/config/delete-records.json
================================================
FILE: auditlog/scripts/describe-topics.sh
================================================
#!/usr/bin/env bash
docker exec kafka kafka-topics --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --describe
================================================
FILE: auditlog/scripts/explore-audit-topic.sh
================================================
#!/usr/bin/env bash
TOPIC="confluent-audit-log-events"
docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 \
--consumer.config /etc/kafka/kafka-user.properties \
--topic $TOPIC --from-beginning
================================================
FILE: auditlog/scripts/write-msg.sh
================================================
#!/usr/bin/env bash
PWD=`pwd`
topic=$1
network="auditlog_default"
USERNAME=producer
PASSWORD=producerpass
echo "Write messages to topic $1"
docker run --network $network \
--volume $PWD/data/my_msgs.txt:/data/my_msgs.txt \
confluentinc/cp-kafkacat \
kafkacat -b kafka:9092 \
-t $topic \
-X security.protocol=SASL_PLAINTEXT -X sasl.mechanisms=SCRAM-SHA-256 -X sasl.username=$USERNAME -X sasl.password=$PASSWORD \
-P -l /data/my_msgs.txt
================================================
FILE: auditlog/up
================================================
#!/usr/bin/env bash
docker-compose up -d
docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka
docker-compose exec zookeeper kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=consumerpass],SCRAM-SHA-512=[password=consumerpass]' --entity-type users --entity-name consumer
docker-compose exec zookeeper kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=producerpass],SCRAM-SHA-512=[password=producerpass]' --entity-type users --entity-name producer
# ACLs
docker-compose exec kafka kafka-acls --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --add --allow-principal User:producer --producer --topic=*
docker-compose exec kafka kafka-acls --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --add --allow-principal User:consumer --consumer --topic=* --group=*
docker-compose exec kafka kafka-acls --bootstrap-server kafka:9092 --command-config /etc/kafka/kafka-user.properties --add --allow-principal User:confluent-audit --producer --topic confluent-audit-log-events --resource-pattern-type prefixed
echo "Example configuration:"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9092 --producer.config /etc/kafka/producer-user.properties --topic test"
echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 --consumer.config /etc/kafka/consumer-user.properties --topic test --from-beginning"
================================================
FILE: auditlog/zookeeper/log4j.properties
================================================
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
================================================
FILE: auditlog/zookeeper/tools-log4j.properties
================================================
log4j.rootLogger=WARN, stderr
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stderr.Target=System.err
================================================
FILE: auditlog/zookeeper/zookeeper.properties
================================================
dataDir=/var/lib/zookeeper/data
dataLogDir=/var/lib/zookeeper/log
clientPort=2181
================================================
FILE: auditlog/zookeeper/zookeeper.sasl.jaas.config
================================================
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_admin="password";
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="password";
};
================================================
FILE: ca-builder-scripts/.gitignore
================================================
tmp-certs/
stores
legacy/
## remove from git the generated CA files
ca/
================================================
FILE: ca-builder-scripts/README.md
================================================
# Building a CA with OpenSSL
This is a collection is scripts useful to generated a local CA setup. While the PKI could be set in different ways, for this example
we generate:
* A root CA identity, for example for your mother company.
* An intermediate CA identity, for example the one generated for your department or smaller company.
* And then the client certificates.
All the scripts are wrapping openssl to generate the required structures.
## Commands
A collection of scripts are provided to generate any of the required steps to build this CA.
*IMPORTANT:* This scripts set a default password for the CA certs, this password is: __confluent__ . You should change it.
### Building the root CA
To construct the root CA setup, you can run the script:
```bash
./utils/build-ca.sh
```
this script will generate the default directory structure for the CA, including the root certificate for your authority. After the execution you should see a directory structure like this:
```bash
➜ ca-builder-scripts git:(ca-builder-scripts) ✗ ls -la ca
total 48
drwxr-xr-x 13 pere staff 416 3 May 16:33 .
drwxr-xr-x 16 pere staff 512 3 May 16:32 ..
drwxr-xr-x 3 pere staff 96 3 May 16:33 certs
drwxr-xr-x 2 pere staff 64 3 May 16:32 crl
-rw-r--r-- 1 pere staff 97 3 May 16:33 index.txt
-rw-r--r-- 1 pere staff 21 3 May 16:33 index.txt.attr
drwxr-xr-x 3 pere staff 96 3 May 16:33 newcerts
-rw-r--r-- 1 pere staff 4117 3 May 16:32 openssl.cnf
drwx------ 3 pere staff 96 3 May 16:32 private
-rw-r--r-- 1 pere staff 5 3 May 16:33 serial
```
*NOTE*: This script sets a default password for the root certificate, change it if you require to have another one.
### Building the intermediate CA.
Once the main CA structure is created, you need to create the intermediate CA, for this you can use this script:
```bash
./utils/build-intermediate-ca.sh
```
Once the script is run, you should see a directory structure like this:
```bash
➜ ca-builder-scripts git:(ca-builder-scripts) ✗ ls -la ca/intermediate
total 80
drwxr-xr-x 16 pere staff 512 3 May 17:22 .
drwxr-xr-x 13 pere staff 416 3 May 16:33 ..
drwxr-xr-x 5 pere staff 160 3 May 16:34 certs
drwxr-xr-x 3 pere staff 96 3 May 17:21 crl
-rw-r--r-- 1 pere staff 5 3 May 17:22 crlnumber
drwxr-xr-x 4 pere staff 128 3 May 16:34 csr
-rw-r--r-- 1 pere staff 109 3 May 17:20 index.txt
-rw-r--r-- 1 pere staff 21 3 May 17:20 index.txt.attr
drwxr-xr-x 3 pere staff 96 3 May 16:34 newcerts
-rw-r--r-- 1 pere staff 4328 3 May 16:33 openssl.cnf
drwx------ 4 pere staff 128 3 May 16:33 private
-rw-r--r-- 1 pere staff 5 3 May 16:34 serial
```
*NOTE*: This script sets a default password for the certificate, change it if you require to have another one.
### Generating an end user certificate
Once the full CA is setup, next step is to generate end user certificates, to do this you can use a command that look like:
```bash
./create-pair-certs.sh kafka.confluent.local server_cert
```
where the first parameter is the certificate name and the second is the extension being used. For this CA we support server_certs and usr_cert. See the [configs/](configs/) directory for details of the configuration.
### revoke certs
A common process in any CA is to revoke certificates, in with this scripts you can do it like this:
```bash
./revoke-cert.sh kafka.confluent.local
```
this command will revoke a certificate with the name _kakfa.confluent.local_.
Once this command is run, you should an update in the intermediate CA text db like this:
```bash
➜ ca-builder-scripts git:(ca-builder-scripts) ✗ cat ca/intermediate/index.txt
R 200512143408Z 190503152037Z 1000 unknown /C=DE/ST=Berlin/L=Berlin/O=Confluent Ltd/CN=kafka.confluent.local
```
this means this cert is revoked, so no longer valid
## create certificate revocation lists
To revoke a cert is nice, but you need to announce this to the world, for this you need to create a list of revoked certificates. This you can do using this script:
```bash
./create-crl.sh
```
Once this is run, there will be a new file being created under
```bash
➜ ca-builder-scripts git:(ca-builder-scripts) ✗ ls ca/intermediate/crl
intermediate.crl.pem
```
that will contain the list of revoked certs, this can be used then as part of your distribution points list, to inform clients of the CA which identities are being revoked.
## Common errors
> error 20 at 0 depth lookup:unable to get local issuer certificate
could not find the original file, paths to cerfiticates CA is wrong.
> TXT_DB error number 2 failed to update database
Because you have generated your own self signed certificate with the same CN (Common Name) information that the CA certificate that you’ve generated before.
Enter another Common Name.
================================================
FILE: ca-builder-scripts/build-a-batch-of-certs.sh
================================================
#!/usr/bin/env bash
input=$1
while IFS= read -r line
do
fields=($(echo $line | tr "," "\n"))
#./support-scripts/create-cert.sh ${fields[0]} ${fields[1]}
echo "./support-scripts/create-cert.sh ${fields[0]} ${fields[1]}"
done < "$input"
================================================
FILE: ca-builder-scripts/build-a-batch-of-stores.sh
================================================
#!/usr/bin/env bash
DEFAULT_PASSWORD=${2:-confluent}
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
CA_CERT="$CA_ROOT_DIR/ca/certs/ca.cert.pem"
CA_KEY="$CA_ROOT_DIR/ca/private/ca.key.pem"
INT_CA_CERT="$CA_ROOT_DIR/ca/intermediate/certs/intermediate.cert.pem"
INT_CA_KEY="$CA_ROOT_DIR/ca/intermediate/private/intermediate.key.pem"
function build_store {
cert_name=$1
store_type=$2
CERT_PATH="$CA_ROOT_DIR/ca/intermediate/certs/$cert_name.cert.pem"
KEY_PATH="$CA_ROOT_DIR/ca/intermediate/private/$cert_name.key.pem"
openssl pkcs12 -export -in $CERT_PATH -inkey $KEY_PATH -passin pass:$DEFAULT_PASSWORD -passout pass:$DEFAULT_PASSWORD -name $cert_name -out tmp-certs/$cert_name.p12
sleep 1
## build keystore and truststores
keytool -noprompt -importkeystore -deststorepass $DEFAULT_PASSWORD -destkeystore stores/$store_type.keystore -srckeystore tmp-certs/$cert_name.p12 -srcstorepass $DEFAULT_PASSWORD -storepass $DEFAULT_PASSWORD -keypass $DEFAULT_PASSWORD -srcstoretype PKCS12 -deststoretype pkcs12
openssl pkcs12 -export -in $CA_CERT -inkey $CA_KEY -passin pass:$DEFAULT_PASSWORD -passout pass:$DEFAULT_PASSWORD -name 'ca' -out tmp-certs/ca.p12
sleep 1
openssl pkcs12 -export -in $INT_CA_CERT -inkey $INT_CA_KEY -passin pass:$DEFAULT_PASSWORD -passout pass:$DEFAULT_PASSWORD -name 'intermediate-ca' -out tmp-certs/inter-ca.p12
sleep 1
keytool -noprompt -importkeystore -deststorepass $DEFAULT_PASSWORD -destkeystore stores/$store_type.truststore -srckeystore tmp-certs/ca.p12 -srcstorepass $DEFAULT_PASSWORD -srcstoretype PKCS12 -storepass $DEFAULT_PASSWORD -keypass $DEFAULT_PASSWORD -deststoretype pkcs12
sleep 1
keytool -noprompt -importkeystore -deststorepass $DEFAULT_PASSWORD -destkeystore stores/$store_type.truststore -srckeystore tmp-certs/inter-ca.p12 -srcstorepass $DEFAULT_PASSWORD -srcstoretype PKCS12 -storepass $DEFAULT_PASSWORD -keypass $DEFAULT_PASSWORD -deststoretype pkcs12
}
## buildind stores for the brokers
mkdir -p stores tmp-certs
CONFIG_FILE=$1
while read data; do
fields=($(echo $data | tr "," "\n"))
echo "Building a store for ${fields[0]} and ${fields[1]}"
build_store "${fields[0]}" "${fields[1]}"
done <$CONFIG_FILE
rm -rf temp-certs
================================================
FILE: ca-builder-scripts/configs/batch-of-certs.txt
================================================
consumer,machine0.example.com
producer,machine1.example.com
kafka,machine2.example.com
zookeeper,machine3.example.com
================================================
FILE: ca-builder-scripts/configs/batch-of-stores.txt
================================================
consumer,machine0.example.com
producer,machine1.example.com
kafka,machine2.example.com
zookeeper,machine3.example.com
================================================
FILE: ca-builder-scripts/configs/ca-config-vars
================================================
DE
Berlin
Berlin
Confluent Germany
================================================
FILE: ca-builder-scripts/configs/ca.config
================================================
[ ca ]
default_ca = CA_default
[ CA_default ]
# Directory and file locations.
dir = .
certs = $dir/certs
crl_dir = $dir/crl
new_certs_dir = $dir/newcerts
database = $dir/index.txt
serial = $dir/serial
RANDFILE = $dir/private/.rand
# The root key and root certificate.
private_key = $dir/private/ca.key.pem
certificate = $dir/certs/ca.cert.pem
# For certificate revocation lists.
crlnumber = $dir/crlnumber
crl = $dir/crl/ca.crl.pem
crl_extensions = crl_ext
default_crl_days = 30
# SHA-1 is deprecated, so use SHA-2 instead.
default_md = sha256
name_opt = ca_default
cert_opt = ca_default
default_days = 375
preserve = no
policy = policy_strict
[ policy_strict ]
# The root CA should only sign intermediate certificates that match.
# See the POLICY FORMAT section of `man ca`.
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ policy_loose ]
# Allow the intermediate CA to sign a more diverse range of certificates.
# See the POLICY FORMAT section of the `ca` man page.
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
# Options for the `req` tool (`man req`).
default_bits = 2048
distinguished_name = req_distinguished_name
string_mask = utf8only
# SHA-1 is deprecated, so use SHA-2 instead.
default_md = sha256
# Extension to add when the -x509 option is used.
x509_extensions = v3_ca
[ req_distinguished_name ]
# See .
countryName = Country Name (2 letter code)
stateOrProvinceName = State or Province Name
localityName = Locality Name
0.organizationName = Organization Name
organizationalUnitName = Organizational Unit Name
commonName = Common Name
emailAddress = Email Address
# Optionally, specify some defaults.
countryName_default = DE
stateOrProvinceName_default = Berlin
localityName_default = Berlin
0.organizationName_default = Confluent Ltd
#organizationalUnitName_default =
#emailAddress_default =
[ v3_ca ]
# Extensions for a typical CA (`man x509v3_config`).
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
basicConstraints = critical, CA:true
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
[ v3_intermediate_ca ]
# Extensions for a typical intermediate CA (`man x509v3_config`).
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
basicConstraints = critical, CA:true, pathlen:0
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
[ usr_cert ]
# Extensions for client certificates (`man x509v3_config`).
basicConstraints = CA:FALSE
nsCertType = client, email
nsComment = "OpenSSL Generated Client Certificate"
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth, emailProtection
[ server_cert ]
# Extensions for server certificates (`man x509v3_config`).
basicConstraints = CA:FALSE
nsCertType = server
nsComment = "OpenSSL Generated Server Certificate"
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer:always
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
[ crl_ext ]
# Extension for CRLs (`man x509v3_config`).
authorityKeyIdentifier=keyid:always
[ ocsp ]
# Extension for OCSP signing certificates (`man ocsp`).
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
keyUsage = critical, digitalSignature
extendedKeyUsage = critical, OCSPSigning
================================================
FILE: ca-builder-scripts/configs/intermediate-ca.config
================================================
[defaults]
crl_url = http://httpd:80/crls.pem # CRL distribution point
[ ca ]
# `man ca`
default_ca = CA_default
[ CA_default ]
# Directory and file locations.
dir = intermediate/
certs = $dir/certs
crl_dir = $dir/crl
new_certs_dir = $dir/newcerts
database = $dir/index.txt
serial = $dir/serial
RANDFILE = $dir/private/.rand
# The root key and root certificate.
private_key = $dir/private/intermediate.key.pem
certificate = $dir/certs/intermediate.cert.pem
# For certificate revocation lists.
crlnumber = $dir/crlnumber
crl = $dir/crl/intermediate.crl.pem
crl_extensions = crl_ext
default_crl_days = 30
# SHA-1 is deprecated, so use SHA-2 instead.
default_md = sha256
name_opt = ca_default
cert_opt = ca_default
default_days = 375
preserve = no
policy = policy_loose
[ policy_strict ]
# The root CA should only sign intermediate certificates that match.
# See the POLICY FORMAT section of `man ca`.
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ policy_loose ]
# Allow the intermediate CA to sign a more diverse range of certificates.
# See the POLICY FORMAT section of the `ca` man page.
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
# Options for the `req` tool (`man req`).
default_bits = 2048
distinguished_name = req_distinguished_name
string_mask = utf8only
# SHA-1 is deprecated, so use SHA-2 instead.
default_md = sha256
# Extension to add when the -x509 option is used.
x509_extensions = v3_ca
[ req_distinguished_name ]
# See .
countryName = Country Name (2 letter code)
stateOrProvinceName = State or Province Name
localityName = Locality Name
0.organizationName = Organization Name
organizationalUnitName = Organizational Unit Name
commonName = Common Name
emailAddress = Email Address
# Optionally, specify some defaults.
countryName_default = DE
stateOrProvinceName_default = Berlin
localityName_default = Berlin
0.organizationName_default = Confluent Ltd
organizationalUnitName_default =
emailAddress_default =
[ v3_ca ]
# Extensions for a typical CA (`man x509v3_config`).
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
basicConstraints = critical, CA:true
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
[ v3_intermediate_ca ]
# Extensions for a typical intermediate CA (`man x509v3_config`).
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
basicConstraints = critical, CA:true, pathlen:0
keyUsage = critical, digitalSignature, cRLSign, keyCertSign
[ usr_cert ]
# Extensions for client certificates (`man x509v3_config`).
basicConstraints = CA:FALSE
nsCertType = client, email
nsComment = "OpenSSL Generated Client Certificate"
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth, emailProtection
[ server_cert ]
# Extensions for server certificates (`man x509v3_config`).
basicConstraints = CA:FALSE
nsCertType = client, server
nsComment = "OpenSSL Generated Server Certificate"
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer:always
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth, serverAuth
crlDistributionPoints = URI:http://httpd:80/crls.pem
subjectAltName = @alt_names
[ crl_ext ]
# Extension for CRLs (`man x509v3_config`).
authorityKeyIdentifier=keyid:always
[ ocsp ]
# Extension for OCSP signing certificates (`man ocsp`).
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
keyUsage = critical, digitalSignature
extendedKeyUsage = critical, OCSPSigning
================================================
FILE: ca-builder-scripts/create-crl.sh
================================================
#!/usr/bin/env bash
DEFAULT_PASSWORD=${1:-confluent}
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
source $CA_ROOT_DIR/utils/functions.sh
(cd $CA_ROOT_DIR/ca; create_certificate_revokation_list )
================================================
FILE: ca-builder-scripts/create-pair-certs.sh
================================================
#!/usr/bin/env bash
#HOSTNAME="www.example.com"
#EXTENSION="server_cert" #usr_cert for client auth, server_cert for for backend
#HOSTNAME="my.kafka.consumer"
#EXTENSION="usr_cert"
set -e
HOSTNAME=$1
MACHINE=${2:-""}
EXTENSION=${3:-server_cert}
DEFAULT_PASSWORD=${4:-confluent}
echo "Building a part of certificates for $HOSTNAME using $EXTENSION"
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
ITERMEDIATE_CA_DIR=$CA_ROOT_DIR/ca/intermediate
CERT_FILE="$ITERMEDIATE_CA_DIR/certs/$HOSTNAME.cert.pem"
if test -f "$CERT_FILE"; then
RED='\033[0;31m'
NC='\033[0m' # No Color
printf "${RED}Cert $CERT_FILE exist! exiting...${NC}"
exit 1
fi
source $CA_ROOT_DIR/utils/functions.sh
(cd $CA_ROOT_DIR; refresh_openssl_file "$CA_ROOT_DIR" "$ITERMEDIATE_CA_DIR" )
(cd $CA_ROOT_DIR/ca; generate_final_certificate "$MACHINE" )
================================================
FILE: ca-builder-scripts/del-cert.sh
================================================
#!/usr/bin/env bash
NAME=$1
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
echo "Deleting CERT $NAME"
rm "$CA_ROOT_DIR/ca/intermediate/private/$NAME.key.pem"
rm "$CA_ROOT_DIR/ca/intermediate/certs/$NAME.cert.pem"
rm "$CA_ROOT_DIR/ca/intermediate/csr/$NAME.csr.pem"
================================================
FILE: ca-builder-scripts/revoke-cert.sh
================================================
#!/usr/bin/env bash
CERT=$1
DEFAULT_PASSWORD=${2:-confluent}
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
source $CA_ROOT_DIR/utils/functions.sh
(cd $CA_ROOT_DIR/ca; revoke_cert $CERT )
================================================
FILE: ca-builder-scripts/setup-ca-with-intermediate-ca.sh
================================================
#!/usr/bin/env bash
##
# This script builds a Certificate Authority of the form:
# Root CA -> intermediate CA
#
# In the CA_ROOT_DIR, this script will create the necessary directory strucures
# and generate the certificates, all signed using the value provided as an
# argument to this script, or confluent by default.
##
DEFAULT_PASSWORD=${1:-confluent}
export CA_ROOT_DIR=`pwd`
echo -e "Building the CA root setup\n"
./utils/build-ca.sh $DEFAULT_PASSWORD
echo -e "Building the intemedite CA root setup:\n"
./utils/build-intermediate-ca.sh $DEFAULT_PASSWORD
================================================
FILE: ca-builder-scripts/support-scripts/build-ca.sh
================================================
#!/usr/bin/expect
proc slurp {file} {
set fh [open $file r]
set ret [read $fh]
close $fh
return $ret
}
set timeout 20
set configslurp [slurp configs/ca-config-vars]
set lines [split $configslurp \n]
set COUNTRY_NAME [lrange $lines 0 0]
set STATE [lrange $lines 1 1]
set LOCALITY [lrange $lines 2 2]
set ORGANIZATION [lrange $lines 3 3]
eval spawn ./setup-ca-with-intermediate-ca.sh
## Generating the data for the CA setup.
expect "Country Name (2 letter code)"
send "$COUNTRY_NAME\r";
expect "State or Province Name"
send "$STATE\r";
expect "Locality Name"
send "$LOCALITY\r";
expect "Organization Name"
send "$ORGANIZATION\r";
expect "Organizational Unit Name"
send "\r";
expect "Common Name"
send "CA\r";
expect "Email Address"
send "\r";
## Generating the data for the Intermediate setup.
expect "Country Name (2 letter code)"
send "$COUNTRY_NAME\r";
expect "State or Province Name"
send "$STATE\r";
expect "Locality Name"
send "$LOCALITY\r";
expect "Organization Name"
send "$ORGANIZATION\r";
expect "Organizational Unit Name"
send "\r";
expect "Common Name"
send "Intermediate-CA\r";
expect "Email Address"
send "\r";
# Sign the certificate and commit
expect "Sign the certificate?"
send "y\r";
expect "1 out of 1 certificate requests certified, commit?"
send "y\r";
interact
================================================
FILE: ca-builder-scripts/support-scripts/create-cert.sh
================================================
#!/usr/bin/expect -f
proc slurp {file} {
set fh [open $file r]
set ret [read $fh]
close $fh
return $ret
}
proc create_certs {cert_name,machine} {
eval spawn ./create-pair-certs.sh $cert_name $machine
}
set timeout 20
set configslurp [slurp configs/ca-config-vars]
set lines [split $configslurp \n]
set COUNTRY_NAME [lrange $lines 0 0]
set STATE [lrange $lines 1 1]
set LOCALITY [lrange $lines 2 2]
set ORGANIZATION [lrange $lines 3 3]
set cert_name [lindex $argv 0]
set machine [lrange $argv 1 end]
spawn ./create-pair-certs.sh $cert_name $machine
## Generating the data for the CA setup.
expect "Country Name (2 letter code)"
send "$COUNTRY_NAME\r";
expect "State or Province Name"
send "$STATE\r";
expect "Locality Name"
send "$LOCALITY\r";
expect "Organization Name"
send "$ORGANIZATION\r";
expect "Organizational Unit Name"
send "\r";
expect "Common Name"
send "$cert_name\r";
expect "Email Address"
send "\r";
# Sign the certificate and commit
expect "Sign the certificate?"
send "y\r";
expect "1 out of 1 certificate requests certified, commit"
send "y\r";
interact
================================================
FILE: ca-builder-scripts/utils/build-ca.sh
================================================
#!/usr/bin/env bash
DEFAULT_PASSWORD=${1:-confluent}
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
source $CA_ROOT_DIR/utils/functions.sh
mkdir $CA_ROOT_DIR/ca;
setup_ca_dir_structure "$CA_ROOT_DIR/ca"
cp $CA_ROOT_DIR/configs/ca.config $CA_ROOT_DIR/ca/openssl.cnf
(cd $CA_ROOT_DIR/ca; generate_ca_keys_and_certs )
## Verify the CA certificate
openssl x509 -noout -text -in $CA_ROOT_DIR/ca/certs/ca.cert.pem
================================================
FILE: ca-builder-scripts/utils/build-intermediate-ca.sh
================================================
#!/usr/bin/env bash
DEFAULT_PASSWORD=${1:-confluent}
if [ -z "${CA_ROOT_DIR+x}" ];
then
CA_ROOT_DIR='.'
fi
ITERMEDIATE_CA_DIR=$CA_ROOT_DIR/ca/intermediate
source $CA_ROOT_DIR/utils/functions.sh
mkdir -p $ITERMEDIATE_CA_DIR
setup_intermediate_ca_dir_structure $ITERMEDIATE_CA_DIR
cp $CA_ROOT_DIR/configs/intermediate-ca.config $ITERMEDIATE_CA_DIR/openssl.cnf
(cd $ITERMEDIATE_CA_DIR; generate_intermediate_keys_and_certs)
(cd $CA_ROOT_DIR/ca; sign_intermediate_cert_authority; verify_generate_intermediate_ca)
(cd $CA_ROOT_DIR/ca; create_ca_chain)
================================================
FILE: ca-builder-scripts/utils/functions.sh
================================================
#!/usr/bin/env bash
generate_ca_keys_and_certs () {
openssl genrsa -aes256 -passout pass:$DEFAULT_PASSWORD -out private/ca.key.pem 4096
chmod 400 private/ca.key.pem
openssl req -config openssl.cnf \
-key private/ca.key.pem \
-new -x509 -days 7300 -sha256 -extensions v3_ca \
-passin pass:$DEFAULT_PASSWORD -passout pass:$DEFAULT_PASSWORD \
-out certs/ca.cert.pem
chmod 444 certs/ca.cert.pem
}
setup_ca_dir_structure() {
mkdir -p $1/certs $1/crl $1/newcerts $1/private
chmod 700 $1/private
touch $1/index.txt
echo 1000 > $1/serial
}
setup_intermediate_ca_dir_structure() {
setup_ca_dir_structure $1
mkdir -p $1/csr
echo 1000 > $1/crlnumber
}
generate_intermediate_keys_and_certs () {
openssl genrsa -aes256 -passout pass:$DEFAULT_PASSWORD -out private/intermediate.key.pem 4096
chmod 400 private/intermediate.key.pem
openssl req -config openssl.cnf -new -sha256 \
-passin pass:$DEFAULT_PASSWORD -passout pass:$DEFAULT_PASSWORD \
-key private/intermediate.key.pem \
-out csr/intermediate.csr.pem
}
sign_intermediate_cert_authority () {
# signature
openssl ca -config openssl.cnf -extensions v3_intermediate_ca \
-days 3650 -notext -md sha256 \
-in intermediate/csr/intermediate.csr.pem \
-passin pass:$DEFAULT_PASSWORD \
-out intermediate/certs/intermediate.cert.pem
chmod 444 intermediate/certs/intermediate.cert.pem
}
verify_generate_intermediate_ca () {
# verification
openssl x509 -noout -text -in intermediate/certs/intermediate.cert.pem
openssl verify -CAfile certs/ca.cert.pem intermediate/certs/intermediate.cert.pem
}
create_ca_chain () {
# create the CA chain
cat intermediate/certs/intermediate.cert.pem certs/ca.cert.pem > intermediate/certs/ca-chain.cert.pem
chmod 444 intermediate/certs/ca-chain.cert.pem
}
refresh_openssl_file() {
ca_root_dir=$1
intermediate_dir=$2
cp $ca_root_dir/configs/intermediate-ca.config $intermediate_dir/openssl.cnf
}
generate_final_certificate () {
alt_name=$1
echo "$DEFAULT_PASSWORD"
# create a private key
openssl genrsa -aes256 -passout pass:$DEFAULT_PASSWORD -out intermediate/private/$HOSTNAME.key.pem 2048
chmod 400 intermediate/private/$HOSTNAME.key.pem
echo -e "" >> intermediate/openssl.cnf
echo -e "[ alt_names ]" >> intermediate/openssl.cnf
echo -e "DNS.1=localhost" >> intermediate/openssl.cnf
echo -e "DNS.2=$alt_name" >> intermediate/openssl.cnf
# create a csr
openssl req -config intermediate/openssl.cnf \
-passin pass:confluent -passout pass:$DEFAULT_PASSWORD \
-key intermediate/private/$HOSTNAME.key.pem \
-new -sha256 -out intermediate/csr/$HOSTNAME.csr.pem
# create the cert
openssl ca -config intermediate/openssl.cnf -extensions $EXTENSION -days 375 -notext -md sha256 \
-in intermediate/csr/$HOSTNAME.csr.pem \
-passin pass:$DEFAULT_PASSWORD \
-out intermediate/certs/$HOSTNAME.cert.pem
chmod 444 intermediate/certs/$HOSTNAME.cert.pem
# verify the cert
openssl x509 -noout -text -in intermediate/certs/$HOSTNAME.cert.pem
# verify the chain trust
openssl verify -CAfile intermediate/certs/ca-chain.cert.pem intermediate/certs/$HOSTNAME.cert.pem
}
create_certificate_revokation_list () {
openssl ca -config intermediate/openssl.cnf -gencrl \
-passin pass:$DEFAULT_PASSWORD \
-out intermediate/crl/intermediate.crl.pem
}
revoke_cert () {
openssl ca -config intermediate/openssl.cnf -passin pass:$DEFAULT_PASSWORD -revoke "intermediate/certs/$1.cert.pem"
}
================================================
FILE: delegation_tokens/.gitignore
================================================
certs/
================================================
FILE: delegation_tokens/ca.cnf
================================================
[ policy_match ]
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
x509_extensions = v3_ca
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName = kafka.confluent.local
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
================================================
FILE: delegation_tokens/client.cnf
================================================
[req]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
req_extensions = v3_req
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName=kafka.confluent.local
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
[ v3_req ]
subjectKeyIdentifier = hash
basicConstraints = CA:FALSE
nsComment = "OpenSSL Generated Certificate"
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
subjectAltName = @alt_names
[ alt_names ]
DNS.1=kafka.confluent.local
================================================
FILE: delegation_tokens/docker-compose.yml
================================================
version: '3'
services:
zookeeper:
build: zookeeper/
container_name: zookeeper
hostname: zookeeper
domainname: confluent.local
restart: on-failure
volumes:
- ./certs/:/var/lib/secret
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf
networks:
default:
aliases:
- zookeeper.confluent.local
kafka:
build: kafka/
container_name: kafka
hostname: kafka
domainname: confluent.local
depends_on:
- zookeeper
restart: on-failure
volumes:
- ./certs/:/var/lib/secret
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
networks:
default:
aliases:
- kafka.confluent.local
ports:
- "9093:9093"
volumes:
secret: {}
networks:
default:
================================================
FILE: delegation_tokens/kafka/Dockerfile
================================================
FROM centos
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-1.8.0-openjdk
RUN yum install -y confluent-platform-2.12
# 3. Configure Kafka
COPY server.properties /etc/kafka/server.properties
COPY kafka_server_jaas.conf /etc/kafka/kafka_server_jaas.conf
COPY consumer.properties /etc/kafka/consumer.properties
COPY create_client_properties.sh /etc/kafka/create_client_properties.sh
EXPOSE 9093
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: delegation_tokens/kafka/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: delegation_tokens/kafka/consumer.properties
================================================
sasl.mechanism=SCRAM-SHA-256
# Configure SASL_SSL if SSL encryption is enabled, otherwise configure SASL_PLAINTEXT
security.protocol=SASL_SSL
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka";
ssl.truststore.location=/var/lib/secret/truststore.jks
ssl.truststore.password=test1234
ssl.keystore.location=/var/lib/secret/client.keystore.jks
ssl.keystore.password=test1234
================================================
FILE: delegation_tokens/kafka/create_client_properties.sh
================================================
#!/bin/bash
set -e
set -u
RESPONSE=$(kafka-delegation-tokens \
--bootstrap-server kafka.confluent.local:9093 \
--create \
--command-config /etc/kafka/consumer.properties \
--max-life-time-period -1 | tail -1)
TOKENID=$(echo $RESPONSE | cut -d " " -f1)
HMAC=$(echo $RESPONSE | cut -d " " -f2)
echo "Received token id: $TOKENID"
echo "Received message authentication code: $HMAC"
echo 'sasl.mechanism=SCRAM-SHA-256
# Configure SASL_SSL if SSL encryption is enabled, otherwise configure SASL_PLAINTEXT
security.protocol=SASL_SSL
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="'$TOKENID'" \
password="'$HMAC'" \
tokenauth="true";
ssl.truststore.location=/var/lib/secret/truststore.jks
ssl.truststore.password=test1234
ssl.keystore.location=/var/lib/secret/client.keystore.jks
ssl.keystore.password=test1234' > /tmp/delegation_token_client.properties
================================================
FILE: delegation_tokens/kafka/kafka_server_jaas.conf
================================================
// Username and password are used by the broker to initiate connections to other brokers
// admin is another user allowed to connect to the broker.
KafkaServer {
org.apache.kafka.common.security.scram.ScramLoginModule required
username="kafka"
password="kafka"
user_admin="admin";
};
// The client section is used by kafka to connect to zookeeper.
// This must match the zookeeper jaas configuration.
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="kafka"
password="kafka";
};
================================================
FILE: delegation_tokens/kafka/server.properties
================================================
############################# Server Basics #############################
broker.id=0
listeners=SASL_SSL://kafka.confluent.local:9093
advertised.listeners=SASL_SSL://kafka.confluent.local:9093
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
zookeeper.connect=zookeeper.confluent.local:2181
# TLS Configuration
security.inter.broker.protocol=SASL_SSL
ssl.truststore.location=/var/lib/secret/truststore.jks
ssl.truststore.password=test1234
ssl.keystore.location=/var/lib/secret/server.keystore.jks
ssl.keystore.password=test1234
ssl.client.auth=required
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
delegation.token.master.key=foo
sasl.enabled.mechanisms=SCRAM-SHA-256
sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256
super.users=User:kafka
================================================
FILE: delegation_tokens/server.cnf
================================================
[req]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
req_extensions = v3_req
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName=kafka.confluent.local
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
[ v3_req ]
subjectKeyIdentifier = hash
basicConstraints = CA:FALSE
nsComment = "OpenSSL Generated Certificate"
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth, clientAuth
subjectAltName = @alt_names
[ alt_names ]
DNS.1=kafka.confluent.local
================================================
FILE: delegation_tokens/up
================================================
#!/bin/sh
set -e
# Creating TLS CA, Certificates and keystore / truststore
rm -rf certs
mkdir -p certs
# Generate CA certificates
openssl req -new -nodes -x509 -days 3650 -newkey rsa:2048 -keyout certs/ca.key -out certs/ca.crt -config ca.cnf
cat certs/ca.crt certs/ca.key > certs/ca.pem
# Generate kafka server certificates
openssl req -new -newkey rsa:2048 -keyout certs/server.key -out certs/server.csr -config server.cnf -nodes
openssl x509 -req -days 3650 -in certs/server.csr -CA certs/ca.crt -CAkey certs/ca.key -CAcreateserial -out certs/server.crt -extfile server.cnf -extensions v3_req
openssl pkcs12 -export -in certs/server.crt -inkey certs/server.key -chain -CAfile certs/ca.pem -name "kafka.confluent.local" -out certs/server.p12 -password pass:test1234
# Generate client certificates
openssl req -new -newkey rsa:2048 -keyout certs/client.key -out certs/client.csr -config client.cnf -nodes
openssl x509 -req -days 3650 -in certs/client.csr -CA certs/ca.crt -CAkey certs/ca.key -CAcreateserial -out certs/client.crt -extfile client.cnf -extensions v3_req
openssl pkcs12 -export -in certs/client.crt -inkey certs/client.key -chain -CAfile certs/ca.pem -name "kafka.confluent.local" -out certs/client.p12 -password pass:test1234
# Import server certificate to keystore and CA to truststore
keytool -importkeystore -deststorepass test1234 -destkeystore certs/server.keystore.jks \
-srckeystore certs/server.p12 \
-deststoretype PKCS12 \
-srcstoretype PKCS12 \
-noprompt \
-srcstorepass test1234
keytool -importkeystore -deststorepass test1234 -destkeystore certs/client.keystore.jks \
-srckeystore certs/client.p12 \
-deststoretype PKCS12 \
-srcstoretype PKCS12 \
-noprompt \
-srcstorepass test1234
keytool -keystore certs/truststore.jks -alias CARoot -import -file certs/ca.crt -storepass test1234 -noprompt -storetype PKCS12
# Starting docker-compose services
docker-compose up -d --build
docker-compose exec kafka kafka-configs \
--zookeeper zookeeper.confluent.local:2181 \
--alter --add-config 'SCRAM-SHA-256=[password=kafka],SCRAM-SHA-512=[password=kafka]' \
--entity-type users --entity-name kafka
docker-compose exec kafka kafka-acls \
--authorizer kafka.security.auth.SimpleAclAuthorizer \
--authorizer-properties zookeeper.connect=zookeeper.confluent.local:2181 \
--add --allow-principal User:kafka --operation All --topic test
docker exec kafka /etc/kafka/create_client_properties.sh
echo "Example configuration to access kafka:"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka.confluent.local:9093 --topic test --producer.config /tmp/delegation_token_client.properties"
echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka.confluent.local:9093 --topic test --consumer.config /tmp/delegation_token_client.properties --from-beginning"
================================================
FILE: delegation_tokens/zookeeper/Dockerfile
================================================
FROM centos
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-1.8.0-openjdk
RUN yum install -y confluent-platform-2.12
# 3. Configure zookeeper
COPY zookeeper.properties /etc/kafka/zookeeper.properties
COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf
EXPOSE 2181
CMD zookeeper-server-start /etc/kafka/zookeeper.properties
================================================
FILE: delegation_tokens/zookeeper/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: delegation_tokens/zookeeper/zookeeper.properties
================================================
dataDir=/var/lib/zookeeper
clientPort=2181
maxClientCnxns=0
authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
================================================
FILE: delegation_tokens/zookeeper/zookeeper.sasl.jaas.config
================================================
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_kafka="kafka";
};
================================================
FILE: kafka-connect-mtls/.gitignore
================================================
connect/secrets/client-*.pem
connect/secrets/client.p12
================================================
FILE: kafka-connect-mtls/README.md
================================================
# Kafka Connect REST api ssl client auth
One of the common question regarding security on Kafka Connect REST api is how to prevent unwanted access.
This playbook show one of the possible methods currently possible (as of November 2019) using the SSL mTLS feature.
## Requirements
To be able to execute this playbook you require:
* Docker (19.03 or later)
* Docker compose (1.24.1 or later)
* curl
## Bootstrap the playbook
The playbook bootstrap can be done by executing the ```./up``` script.
### Prepared TLS certificates and keystores
A set of prepared TLS certificates and keystores are available within the _connect/secrets_ directory.
Most relevant ones are:
* _certificate.p12_: TLS certificate to verify the failure of mTLS (this is a self sign certificate)
* _rest-client.p12_: TLS certificate to verify the positive verification using mTLS (this cert is sign by the same CA as the server identity)
* _server.keystore_ and _server.truststore_: keystores prepared for the Kafka Connect REST server identity.
All this certs has been created with the ca-builder-scripts.
## Verify the connectivity
To verify the connectivity there is a prepared script ```check-ssl-client-auth.sh```.
This script uses curl to verify a success and a failure authentication using mTLS
================================================
FILE: kafka-connect-mtls/check-ssl-client-auth.sh
================================================
#!/usr/bin/env bash
verify_ok_ssl_client_auth () {
cp -f ../ca-builder-scripts/ca/intermediate/private/$1.key.pem connect/secrets/$1.key.pem
cp -f ../ca-builder-scripts/ca/intermediate/certs/ca-chain.cert.pem connect/secrets/ca-chain.cert.pem
cp -f ../ca-builder-scripts/ca/intermediate/certs/$1.cert.pem connect/secrets/$1.cert.pem
curl --key connect/secrets/$1.key.pem --cacert connect/secrets/ca-chain.cert.pem --cert connect/secrets/$1.cert.pem:confluent https://localhost:18083
}
verify_ko_ssl_client_auth() {
mkdir connect/certs
openssl req -new -nodes -x509 -days 3650 -newkey rsa:2048 -keyout connect/certs/ca.key -out connect/certs/ca.crt -config connect/config/ca.cnf
cat connect/certs/ca.crt connect/certs/ca.key > connect/certs/ca.pem
openssl req -new -newkey rsa:2048 -keyout connect/certs/client.key -out connect/certs/client.csr -config connect/config/client.cnf -nodes
openssl x509 -req -days 3650 -in connect/certs/client.csr -CA connect/certs/ca.crt -CAkey connect/certs/ca.key -CAcreateserial -out connect/certs/client.crt -extfile connect/config/client.cnf -extensions v3_req
openssl pkcs12 -export -in connect/certs/client.crt -inkey connect/certs/client.key -chain -CAfile connect/certs/ca.pem -name "connect" -out connect/certs/client.p12 -password pass:confluent
cp connect/certs/client.p12 connect/secrets/client.p12
rm -rf connect/certs
openssl pkcs12 -in connect/secrets/client.p12 -out connect/secrets/client-ca.pem -cacerts -nokeys -passin pass:confluent -passout pass:confluent
openssl pkcs12 -in connect/secrets/client.p12 -out connect/secrets/client-client.pem -clcerts -nokeys -passin pass:confluent -passout pass:confluent
openssl pkcs12 -in connect/secrets/client.p12 -out connect/secrets/client-key.pem -nocerts -passin pass:confluent -passout pass:confluent
curl --insecure --key connect/secrets/client-key.pem --cacert connect/secrets/client-ca.pem --cert connect/secrets/client-client.pem:confluent https://localhost:18083
}
echo "Check SSL client auth with an unknown certificate"
verify_ko_ssl_client_auth
echo ""
echo ""
echo "Check SSL client auth with a valid client"
verify_ok_ssl_client_auth "connect"
================================================
FILE: kafka-connect-mtls/connect/config/ca.cnf
================================================
[ policy_match ]
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
x509_extensions = v3_ca
[ dn ]
countryName = DE
organizationName = Confluent
localityName = Berlin
commonName = connect.confluent.local
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
================================================
FILE: kafka-connect-mtls/connect/config/client.cnf
================================================
[req]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
req_extensions = v3_req
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName=connect.client
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
[ v3_req ]
subjectKeyIdentifier = hash
basicConstraints = CA:FALSE
nsComment = "OpenSSL Generated Certificate"
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
subjectAltName = @alt_names
[ alt_names ]
DNS.1=localhost
================================================
FILE: kafka-connect-mtls/connect/secrets/ca-chain.cert.pem
================================================
-----BEGIN CERTIFICATE-----
MIIF4TCCA8mgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwgYExCzAJBgNVBAYTAkRF
MQ8wDQYDVQQIDAZCZXJsaW4xDzANBgNVBAcMBkJlcmxpbjEWMBQGA1UECgwNQ29u
Zmx1ZW50IEx0ZDELMAkGA1UECwwCUFMxCzAJBgNVBAMMAmNhMR4wHAYJKoZIhvcN
AQkBFg9jYUBjb25mbHVlbnQuaW8wHhcNMTkxMTEyMTExMTMxWhcNMjkxMTA5MTEx
MTMxWjB9MQswCQYDVQQGEwJERTEPMA0GA1UECAwGQmVybGluMRYwFAYDVQQKDA1D
b25mbHVlbnQgTHRkMQswCQYDVQQLDAJQUzEYMBYGA1UEAwwPSW50ZXJtZWRpYXRl
LUNBMR4wHAYJKoZIhvcNAQkBFg9jYUBjb25mbHVlbnQuaW8wggIiMA0GCSqGSIb3
DQEBAQUAA4ICDwAwggIKAoICAQDMTnQGumpx9Byuq+z1APGFFp3lYDIY8rbScRkk
dXQ1tukad9aOtQtGWfShOeQ7wndKvdYzcPN+AnGoszjd/gVeRDiXo0rEMGYgyglW
vC1C88LWy/Wg26mgGTkMpeUfgN25lpfOUlzh0bVtPfcw154DXd5HZiHOiJ6CXytx
bf+8M99SkQ0+X6MH2EBXMbOJBGGzZycMON7ONhdGhBrmClMHPMomMnprdL/W2TKa
6SGXpxh3lLGYREqBSUlP6Owt0SMf/V5enbNrtllzmliBZbiUraGLohKDz8c8rcCX
zz3zoBGkEGAJEFbXqoMa7OcVb2InWJTkfN2E4Z2akU6+9u6pvQ+q2e3KxHR42ure
P4ilI6wzJ43UaPyFPzFpRd16akvIEDkdACU1FmlajW69Haye7Ey1HoI2tiJHDPAN
JJbWUl59FS2DeFt7KvlBHdy81Z3D/G9QjSkOGCItabQLF8Jum2d5qMdekb3X8fTi
2SPeDcgPyRat+6gKqsINzM6ChnXA0CFMbSGN6XpxARcxd95HR9zIStGdVKcPtnaN
Q8o3/ehTNC8DVuT2LLlSwdAAElgZ/EHtfW7rrOBDgIMV8946m5r2tmf/K/A1vEUU
4+QD66IX4nleK+OGuUexF1xv2UTzGhmk/VJsvx3AjPQgQLGqT2aRm6I6NEeWK/CY
jB597QIDAQABo2YwZDAdBgNVHQ4EFgQUJscsGzI7qLGpXsGdjRsvE6NXG0EwHwYD
VR0jBBgwFoAU1K1JidhNRLmWIeRKobCfr8QhHQUwEgYDVR0TAQH/BAgwBgEB/wIB
ADAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBAMCpI/ojEZc28fYF
VbM9XYfPY6M8WRU39SSPZO5dknVmP16r6TInD2ZECfW/5XkqXPtvj3wcvXJ4/tLN
iS0DIzXbUlsO9sedDSCcAq251c2MIfROKj3E93X66PAVSn1SWVRgqlyD62G3Nv2E
Z+V6vnE6oEXdrIucM4dUdvoz9Qnr0pZCchMYyFVdydAfvHu7wJ8PqVGgNCSgStuI
aQQUiKLFSnWOr+EJ8rZ8eGpgCWbeQ34ww8SlyjEPbkXAblsv0rH/R17GITIkFa69
SHjOChvpOWcvZ0SLwisDShVBlRcO2ypwfo5avQCYBmj27D2U7htUmn+QwhntOSAB
sAGUGeeoBgCKI7uNBuP3lXeaSXNWqsNXm7wSqsNJAPGVQzgA7kM1yjzdawCuAEzR
JdFp0Y5juUaZ1FL6xnpvzSpBYBBs2o9CwtuBkBAnuG88AUwC7JR4URb0qb7/DbS0
efbFPvUDohMAfYgUPXu1FemIuRicN99JoQLArEutKteGw4tX6W5oGckb6iBeQMe/
C+Aw+RLEOyVwbfeF6lVfn4iZfn8A0SCqYRD+vStgKrb7LxPPl8/vTpgoiDH3qulD
mV1vqeT3ESFvEnEXVd/QozDTn/AZsna3C2cp902GPEH3vV+h3ECRDZoaxuSdSzlg
0d/kGEPjjbgBDD5IJ6mNgsiIBfiX
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIF6jCCA9KgAwIBAgIJAIopqcNHix7tMA0GCSqGSIb3DQEBCwUAMIGBMQswCQYD
VQQGEwJERTEPMA0GA1UECAwGQmVybGluMQ8wDQYDVQQHDAZCZXJsaW4xFjAUBgNV
BAoMDUNvbmZsdWVudCBMdGQxCzAJBgNVBAsMAlBTMQswCQYDVQQDDAJjYTEeMBwG
CSqGSIb3DQEJARYPY2FAY29uZmx1ZW50LmlvMB4XDTE5MTExMjExMTA1OVoXDTM5
MTEwNzExMTA1OVowgYExCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZCZXJsaW4xDzAN
BgNVBAcMBkJlcmxpbjEWMBQGA1UECgwNQ29uZmx1ZW50IEx0ZDELMAkGA1UECwwC
UFMxCzAJBgNVBAMMAmNhMR4wHAYJKoZIhvcNAQkBFg9jYUBjb25mbHVlbnQuaW8w
ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDOZyDRn9Zmv8WqhR6a8Sr5
GXa3QO4yNB5F9PSAe4xXPOkDj14Om0cS/oXYfy/uJyVb3zxPMcF4Mz/WSQTATh2Y
IunkYWqsrDZbgMG2ERdmSKvALKTd8mVFpCSzrhAvv8lZcRX0/jegjyMo2fFjRHfb
dtwE341Ywgvf5gm0HH6UzbPIY9Xd/PmHJ1kRb+wJnRxs7T00l0pXZMYafs9tbJPv
8tfrYjIM5PLxLAtlMZ1yLR1Ay8QMm5q4ZtTOQjlGOovWpPHHEhM3RealpYk1dC84
ngDpTI5zpf/Q0o4cn8aOACiy7fmm1dNMBR0Yu1JSLINxU+P0g3fNZu1PQjF13crm
hGkcNAm6k0AMYwspNLthsPdOTgHfnv6/L/VkqGIBYU+gd0G2ZjbgpEHesQM91A42
jBneY1uFhcTjmAmx+MV63BWutFrkl1ErG4raYNIdUtGlsGhJ71AmOtGj3A8YyQAm
LuzjZTQmkyP0saDa0wpsI7+lSZ5K5cJSQBMUf0bOQPspMmHfsO7H//RbmxdCiIHx
g0fCfKX3oyungProDglptDYaNwE8oF0B1vl28cwlOJm7N9DVUsDc1bZwnfdquYD7
q3rBkjZHyqmnsSVAOHvkToaHkkS7x+pFz/mPj/2Yo6h4Ujgrk4GO+vqQszk2Rp4Y
58WbQ4Nq1qyrVIF0FgYjFwIDAQABo2MwYTAdBgNVHQ4EFgQU1K1JidhNRLmWIeRK
obCfr8QhHQUwHwYDVR0jBBgwFoAU1K1JidhNRLmWIeRKobCfr8QhHQUwDwYDVR0T
AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBABmT
rHBFhz55quEG+/h3CiMKXpdld3cf27zgO2liEw/F3Xu04QaSpwsVZEOhuWjmzUA/
lSfrbWeV/jNQz0Gs/rDdMUEDbNN+vsC4+u4xbAe9OYLsRJ5v1/PB7P/JxfhnRQLr
3vQSUm7oEkhVxmTO0mFxpbrkcglAgA0siSWI2Kj0qTNeSxOW9PKOQuPqH9bTHRBt
KoiR5CAealT7EoWUhZaNgLLOJ0fsIlPbXfAeat2BVRZAYQFQOJIOouZES49Yc+qF
r6T7AVysk7pRilQWVovBOqSKzHTOKBKTJvbO23lYAOREc+UM694dPh480i6TcGEI
4iYyC2/GCK4aUCCE0WTTqSErL0fNmjVz68lpuBjUS06+dGy2p6bW8fso6ttqAry+
jbEq6Ir+P9vZbvHOPc3JyaBKGyTRwA4jcrXQ5KD5LJwBd+9/h3elciNsJtd8mHdE
2T3IqqsXdqN7gyaqwNQsk59iSkCr3cJGqvpsdVPXseadv9rcV5hxAjvWa4mLRiTL
Cxhs4CuaXz9kyss9oM1kWD0Dz3sIGwxtOcSREIqc8vclztjnpPMdly2lsK16hyfs
q2P7yFurz6ruIxLR/f8SEalQ/Eqn8kOgwwHNuhaMHsaeS3o3Guwr+KHRm0jGNO/N
ZSm2YS9n3ghEYhUqfzuJCWuaeFELeOBb5wUx01Va
-----END CERTIFICATE-----
================================================
FILE: kafka-connect-mtls/connect/secrets/connect.cert.pem
================================================
-----BEGIN CERTIFICATE-----
MIIGIDCCBAigAwIBAgICEAEwDQYJKoZIhvcNAQELBQAwfTELMAkGA1UEBhMCREUx
DzANBgNVBAgMBkJlcmxpbjEWMBQGA1UECgwNQ29uZmx1ZW50IEx0ZDELMAkGA1UE
CwwCUFMxGDAWBgNVBAMMD0ludGVybWVkaWF0ZS1DQTEeMBwGCSqGSIb3DQEJARYP
Y2FAY29uZmx1ZW50LmlvMB4XDTE5MTExMzExMjMxOVoXDTIwMTEyMjExMjMxOVow
gYwxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIDAZCZXJsaW4xDzANBgNVBAcMBkJlcmxp
bjEWMBQGA1UECgwNQ29uZmx1ZW50IEx0ZDELMAkGA1UECwwCUFMxFjAUBgNVBAMM
DWthZmthLWNvbm5lY3QxHjAcBgkqhkiG9w0BCQEWD2NhQGNvbmZsdWVudC5pbzCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMjwmO0YAZV/k5DgfC48DZsp
djQixKEi6TLAdt2CovemFGe38DhltaIs+BiGl9ZpTC1WTbNnkwqFm3N+7+cg4I3d
v4nYQw4RN68os9vQ+Jp/jIdVSXDP7n17pw8YOhrhQx5XMnudtghCHtp+rwG8a5KF
IKe9zpWeu2mXlF1LxrWybbaBzJ8E7u4Gsr+suhED5dH9ckFCnLk0/9NMym7XSMnK
/158JKhmElCanZmLLGwq+38ko6C/BgPbdaRwlKG+tHWY9Iqrt+tRHgvXclutMHci
ZgiAApS4pqOey5MWisb8yZs5SP14x8wAzyygLeNGr7+CcP+Ubcn71FvwhSodQ7EC
AwEAAaOCAZgwggGUMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgbAMDMGCWCG
SAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUw
HQYDVR0OBBYEFAYy4GQt1eh6QaxmF4OyFBsK8xGWMIGvBgNVHSMEgacwgaSAFCbH
LBsyO6ixqV7BnY0bLxOjVxtBoYGHpIGEMIGBMQswCQYDVQQGEwJERTEPMA0GA1UE
CAwGQmVybGluMQ8wDQYDVQQHDAZCZXJsaW4xFjAUBgNVBAoMDUNvbmZsdWVudCBM
dGQxCzAJBgNVBAsMAlBTMQswCQYDVQQDDAJjYTEeMBwGCSqGSIb3DQEJARYPY2FA
Y29uZmx1ZW50LmlvggIQADAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB
BQUHAwIGCCsGAQUFBwMBMCkGA1UdHwQiMCAwHqAcoBqGGGh0dHA6Ly9odHRwZDo4
MC9jcmxzLnBlbTAUBgNVHREEDTALgglsb2NhbGhvc3QwDQYJKoZIhvcNAQELBQAD
ggIBAIpE7+Cu5jwv1u+TpQb2qxmR5kt6W90GySiSW0Ky+NQUL9WpbTYuGE6avpO/
83hIf620JGTwgnwVULBaC9i8+wux7stM2vPLOaC54eDYtKgY91wWnmt061UZsie6
FgqpNnev2U8WukxdFWdjvNQ3sWnz/aeTiaeQYmuZjSjm/sgwh92pSGqpYnuGdyc+
e/dXxkV+lRHbMdRMoQi2ONdNQ+UZ1tJVBLjpRRaa4mq6/uemYCZ8T/BLs6Tuob2C
NmC/x6LAntbMUaHD8PGWNlqoX//XGKwZlUvn9h+eVHgjBBbdl+69IJG6UXx7T+sV
i3dY1RhQewfOAAdh6nFHL3tHfpkH+RnsEswq5DJXQtwQXGwtUv6fu9Aq/J+dGlua
YpXDwdTZYCh6i2H8WfbPFAAgZfJEupNuC5EJh/BMoYEcCkLABs7ZHgQs5xVzHL/i
li51D8gu1IqcH3+JQfQ74knDp5tWxvhQe+4m5rwct1D8IvTEOeqclvbPI4tzHmNF
rNbk7lSCicq9axGuyrUB/Y8zDnCNThhnxjQdHm1rhJYi1IXV4BvwSBpzLocjVpRa
T8C+fzAtk/VejGBf4zHjnu8xy7AO5hrF+ho7lAhIWZ7bVe7sZCe9D/+xgkxLP1uA
KKhCtMklziYuNoRMpZ+be9e2OHu7p05Yj4AjhHTyMvizQLm2
-----END CERTIFICATE-----
================================================
FILE: kafka-connect-mtls/connect/secrets/connect.key.pem
================================================
-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-256-CBC,03578ECB28A28BA14408AF4EB2F82DCB
hQkZlH8VnKBOMDtLMQT85fvYzoBYCEyjif/1/6QPid1asfxp7LGAiIEVM/sSecRz
axGneC1B9dp1gEY09XISa2ChZwGY+vUfmpOdpyi+aeSLUeF05jsPS16fbvDfo8t4
JnMk8XsvXs1SmXBtMvcFDnqVxWYp/kBNk2CrM24/aRgN14pykSEu0Uy5NhUGbQ/D
vdhFq1BdWOapQQN0Oi7O5vtttOapiLKPPKaewGGV1LhyG2rJEoxj5JzrpIj2N6mB
yjzGaHkNdvFi2BoHb/UIAaKSX5Kn+B1MW4m7eJ3YuogvBnlmr6pZaa9xxYOI+fV8
eM1JG0U1P477dqLJLRCRTtYZACmXbRy5/WV1TewKjw/ij7QHQ68ISQa2748X1gL6
09jL5Grux/gJSDuJrhuSMyPwYSJNx5585/HLLQseKOFdvqFbHAjfd2/ZptaBpxp4
jbkylbVxvroPZQjRhj1p0v3CkUCWYUg3CSkzNLR5Y21AvqH5ZHCbjerfZvrPp3Wc
nHShzc18wUruHmT8dwdDSb7s5OJRFkEDLNFsCsijtl47yDwaQoazeJ8UXkQ/q+FR
iIfctz0JZrWXbH89nr3i3cjwGOxQmPuMiCypYov0YezuWwyiqQMM2r62nyFTCUQQ
ooqh3OV02suBNn2GXnrXwzdCoCgcL61a2l4+rHu8rsKHKX3VzEk+SP/WsgO65KAg
jCSRV3zVTWTRbXmvFM+tv6ARDKgvhJUYAC4zuP5ZQJHsoLXhsJ/nHjlUVFVub9aT
+BieN64UHih7lfKzK3OZJAuu5hSMY8vA8JuAkBoNNKB+CEwQnakhEQB6u87s9xgO
GBu6med8u0isWI9uSwZ0u2/MaELRmcx6MvjdOFh8TIWU1vdtcf8F8avyP9wxGBMq
PeFxYJ+qCx8tRUHgO6QmE/cZKmQr//mtZgOMjVwVnysmQLh8Shn1WW2FHhqWauAo
FC2PJPw4aribkG8/O/mVx0P5/bcgHL8N7S1DWLUFsMzMYJJ35CNOG94cOFSAWFRr
2mJLRzJFwxuh1S9S/SwqMhdz96I4OsmKAVp6iKDVusR6qBoAAjkbLCAJqF883FHo
Urfgr5lLx/9wG6E+zGgQOC1bKsFTlSEVQ7vVLizG2etDytOvGM70Gz5ecyDY1zFl
1D9hGLhoJtFWE8U4CLPI6pQrKXRftnUV2RbvmsgRBuHBm/HeBLbNBsuDNK3WCzvI
YRzBpyXOblcENvj258yVtfqjRAR2b5hWeRjdyCZjxNq2S7f1Qow0nhPxe2Fq3JLu
nGstpUt1gwKNstoMEVwYI8TzFP4kRzx5H3w2EgjzxWoybJXqANW3XHySBMcim6NR
QMnn30bcjMI8vIe1AaL+AKskNBf4aVj+4IzvC6L+1yrzI5l4KfWbcJJk+q/rTXdQ
mwy4DW5LfenlZoh8zQIGdHKAbdrFwI0gk0pX3Bjy69+1QAy1gNPqe5L9IUMmbsZE
hueSRSsPgI7PDT2hv8XeoWuy+Un6/l4E34F2WvtR802kaYwgeRZIcJrFV8+yALvt
awVcFBkjmWFRjGLFG7/f29+n998g31FqynKU9NmPL49aB8UfQBrtLY07f6snYPA2
-----END RSA PRIVATE KEY-----
================================================
FILE: kafka-connect-mtls/docker-compose.yml
================================================
---
version: '3'
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.3.1
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
kafka:
image: confluentinc/cp-enterprise-kafka:5.3.1
depends_on:
- zookeeper
ports:
- 9092:9092
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 100
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka:29092
CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT: zookeeper:2181
CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1
CONFLUENT_METRICS_ENABLE: 'true'
CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous'
schema-registry:
image: confluentinc/cp-schema-registry:5.3.1
depends_on:
- zookeeper
- kafka
ports:
- 8081:8081
environment:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper:2181
kafka-connect-cp:
image: confluentinc/cp-kafka-connect:5.3.1
container_name: kafka-connect
depends_on:
- zookeeper
- kafka
- schema-registry
ports:
- 18083:18083
volumes:
- ./connect/secrets:/etc/kafka-connect/secrets
environment:
CONNECT_BOOTSTRAP_SERVERS: "kafka:29092"
CONNECT_REST_PORT: 18083
CONNECT_GROUP_ID: kafka-connect-cp
CONNECT_CONFIG_STORAGE_TOPIC: docker-kafka-connect-cp-configs
CONNECT_OFFSET_STORAGE_TOPIC: docker-kafka-connect-cp-offsets
CONNECT_STATUS_STORAGE_TOPIC: docker-kafka-connect-cp-status
CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081'
CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
CONNECT_REST_ADVERTISED_HOST_NAME: "kafka-connect-cp"
CONNECT_LOG4J_ROOT_LOGLEVEL: "INFO"
CONNECT_LOG4J_LOGGERS: "org.apache.kafka.connect.runtime.rest=WARN,org.reflections=ERROR"
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: "1"
CONNECT_PLUGIN_PATH: '/usr/share/java'
CONNECT_LISTENERS: 'https://0.0.0.0:18083'
CONNECT_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: "HTTPS"
CONNECT_LISTENERS_HTTPS_SSL_TRUSTSTORE_LOCATION: /etc/kafka-connect/secrets/server.truststore
CONNECT_LISTENERS_HTTPS_SSL_TRUSTSTORE_PASSWORD: confluent
CONNECT_LISTENERS_HTTPS_SSL_KEYSTORE_LOCATION: /etc/kafka-connect/secrets/server.keystore
CONNECT_LISTENERS_HTTPS_SSL_KEYSTORE_PASSWORD: confluent
CONNECT_LISTENERS_HTTPS_SSL_CLIENT_AUTH: required
================================================
FILE: kafka-connect-mtls/up
================================================
#!/usr/bin/env bash
docker-compose up -d
echo "to verify the connection use the check-ssl-client-auth.sh script"
================================================
FILE: kerberos/README.md
================================================
# Kerberos configuration demo
This demo sets up a basic Kafka cluster secured with Kerberos authentication, and sets up some basic ACLs to demonstrate authorisation.
The documentation below introduces the relevant components you need to understand to set up Kerberos in a Linux / JVM environment.
## Kerberos authentication process
Before configuring Kafka for Kerberos authentication, it is useful to understand the basics of Kerberos; the authentication process and some key terms.
_A note on what Kerberos is and is not: Kerberos is a *network authentication protocol* which allows a client application to connect to a network service in a way that allows the components to mutually verify each other's identities._
_It is put to good use in and integrated with network directory services, notably Windows Active Directory._
_Here, Kerberos identities are bound to network accounts and access privileges and, in the case of Windows, the SSPI API supports single sign-on and privilege impersonation natively in the OS._
_This is enabled by Kerberos, but Kerberos itself is not bound to such accounts and does not provide any such capability._
With that, let's work through the process for a client application making a connection into Kafka.
Kerberos involves three parties:
- a Kerberos Client, in this case our client application.
- a Kerberized Service, in this case Kafka.
- the Kerberos **Key Distribution Center (KDC)**
An important point to understand in this process is that the Client and Service each shares their own cryptographic key with the KDC.
By using this key to encrypt/decrypt tokens passed over the network, two network systems can verify each other's identities.
The Client and Service trust that they have only shared their secret with the KDC and so any correctly signed token must have originated from the KDC.
This is crucial.
During the Kerberos process the Client requests a token from the KDC _signed with the Service's key_ and presents this when making a connection.
The Service can then trust that the Client has valid credentials with the KDC and can be authenticated.
Other information is shared during the process to enable integrity checking and protection against various spoofing attacks.
For example, each signed token is:
* timestamped to bound the window for which it is valid
* linked to a network IP so that it is valid only from a single host
The first stage is that the Client application must authenticate itself with the KDC by proving that the Client knows the private credentials relating to the Client's Kerberos **Principal**.
The Principal is a a unique identity in the form {primary}/{instance}@{REALM} (more on these later).
The KDC authenticates the client using their shared cryptographic key and results in the client receiving a **Ticket Granting Ticket (TGT)**.
This is a cryptographic token that the Client may now use to prove that it has recently authenticated with the KDC.
The TGT is timestamped and includes an expiry time, typically a day.
The TGT is cached by the client to avoid having to re-authenticate unnecessarily.
Next, the Client wants to authenticate itself to the Kerberized Service.
For this to happen, the client must get a cryptographic token encrypted with the Kerberized Service's key - this token is a **Service Ticket** and is requested by the client from the KDC using the TGT and the requested service's principal name.
Including the TGT in this request is sufficient to prove that the client has already authenticated with the KDC allowing the service ticket to be returned.
Here is an important point to note - how does the client know the service principal name?
Very simply, it builds the principal with:
- {primary} = a client-side configured name for the service
- {instance} = the network address used to connect to the service
- {REALM} = the realm of the client and KDC.
In our example, our Client attempts to connect to the `kafka` Service on the host `kafka.kerberos-demo.local` in the realm `TEST.CONFLUENT.IO`.
Therefore, the service must be configured with a Service Principal Name of `kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO`.
Now the Client can connect directly to the Kerberized Service, and include the Service Ticket.
As the Service ticket is signed with the Service principal's key, the Service can decrypt the token to authenticate the request.
Based on the above, each connection in the cluster must be established with the following in place:
* On the Kerberos Client:
* A client principal and key to authenticate with the KDC, `{client name}@REALM`
* This is the *User Principal Name*.
* a configured name for the service to connect to, `{service name}`
* the network address for the service, `{network address}`.
* On the server:
* a principal name & key in the form `{service name}/{network address}@REALM`.
* This is the *Service Principal Name*.
As can be seen, the service principal must be constructed correctly to work.
However, the `{client name}` format is not mandated in the same way and is not bound to a network address.
Often the client name is a simple alphanumeric username, let's say 'john'.
However, you may sometimes see a client principal such as 'john/admin'.
In this form, 'admin' is called an _instance_ of the 'john' principal and can be used by 'john' to run services on the system with different credentials and privileges from the main account.
From the Kerberos perspective, the two principals are completely separate, but it can nonetheless be convenient to use this naming convention.
# Technical Components
## KDC
The KDC could be provided by MIT Kerberos, Windows Active Directory, Redhat Identity Manager and many others.
In this demo we use MIT kerberos.
## Kerberos libraries and tools
All the hosts must include Kerberos libraries and a shared configuration (krb5.conf) in order to use and trust the same KDC.
`kinit` is used to authenticate to the Kerberos server as principal, or if none is given, a system generated default (typically your login name at the default realm), and acquire a ticket granting ticket that can later be used to obtain tickets for other services.
`klist` reads and displays the current tickets in the credential cache (also known as the ticket file).
`kvno` acquires a service ticket for the specified Kerberos principals and prints out the key version numbers of each.
`kadmin` is an admin utility for working with the Kerberos database.
A common task when configuring for Kerberos is to build *keytab* files (short for Key Table).
Keytabs are files containing one or more Kerberos principal/credential pairs.
By having these in a file, services can automatically authenticate with the KDC without prompting the user and it is common to build and distribute keytabs as part of a deployment.
However, _as these files contain secret credentials, it is important to take care to protect against loss of these files_.
See [kerberos cheatsheet](../KerberosCheatsheet.md) for examples of using the Kerberos toolset.
## Simple Authentication and Security Layer (SASL)
SASL is a framework for authentication in network communications which in principle decouples authentication concerns from the application protocol.
Kafka and Zookeeper can use SASL as the authentication layer in communications (Mutual TLS being the notable alternative).
When SASL has been enabled, you must further specify a SASL *mechanism* to use - the process and protocol to use when authenticating a connection.
Applications must build support for each SASL mechanism - Kafka supports SCRAM(-SHA-256 | -SHA-512), PLAIN, OAUTHBEARER and GSSAPI.
*GSSAPI is the SASL mechanism which implements Kerberos*.
## Java Authentication and Authorization Services (JAAS)
JAAS is a Java's integrated, pluggable security service and Kafka uses the JAAS APIs to implement SASL authentication.
SASL authentication is configured using JAAS.
Kerberos is configured using the JAAS *LoginModule* `com.sun.security.auth.module.Krb5LoginModule`.
JAAS may be configured in a couple of places:
* By default it uses a .jaas file, a reference to which is passed in the `-Djava.security.auth.login.config=` JVM flag.
Each jaas file includes multiple named stanzas, representing different login contexts.
* An application can override this configuration and configure JAAS from application config.
Kafka configurations expose this option using properties `sasl.jaas.config`, which can variously be prefixed.
The value is the inline configuration for a single login context and, in Kafka, takes precedence over entries in a .jaas files.
https://docs.oracle.com/javase/8/docs/jre/api/security/jaas/spec/com/sun/security/auth/module/Krb5LoginModule.html
A Kerberos enabled Client or Service can be initiated in two ways:
1. Use `kinit` to cache a TGT locally, and then launch the process with this shared cache.
2. Configure a keytab to be used directly.
Configuration of the former is straight-forward as follows:
```
SomeLoginContext {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache = true;
};
```
The `useTicketCache = true` setting specifies that the TGT cache should be used.
By comparison, the latter approach has `useTicketCache = false` (the default) and then continues to specify details for using a keytab file:
```
SomeLoginContext {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka1.key"
principal="kafka/kafka1.kerberos-demo.local@TEST.CONFLUENT.IO";
};
```
The login context, as identified with `SomeLoginContext` above, can be used by a Client, a Service or both.
For Kafka, the names are defined in the application code as we will describe later.
# Kerberizing Kafka
To fully understand the steps required to Kerberize Kafka, we should understand each Client → Service connection which we wish to configure.
Each of these connections has a prototypical set of configurations required on the Client side and on the Service side.
The following are values you must decide upon at the cluster level:
* `{kafka-kerberos-service-name}` - name for the Kerberized Kafka service.
Typically `kafka` or `cp-kafka`.
* `{zookeeper-kerberos-service-name}` - name for the Kerberized Zookeeper service.
By default this is `zookeeper`.
* `{security-protocol}` - either `SASL_PLAINTEXT` of `SASL_SSL` if using in conjunction with TLS.
## Service Configurations
In a single node Broker/Zookeeper environment there are just two Kerberized services running.
We will configure these first and then the clients.
### Kafka Service
* Broker JAAS:
* Login Context: `KafkaServer`
* Use *keytab* method.
* Ensure that the principal is a correctly formed service principal for each node: `{kafka-kerberos-service-name}/{FQDN}@{realm}`.
* Example: [kafka/kafka.sasl.jaas.config](kafka/kafka.sasl.jaas.config)
* Broker Server Properties:
* `sasl.enabled.mechanisms=GSSAPI` (more SASL mechanisms may be specified in a comma-separated list)
* `sasl.kerberos.service.name={kafka-kerberos-service-name}`
* `{listener_name}.{sasl_mechanism}.sasl.jaas.config` - jaas configuration on a per-listener basis.
* Example: [kafka/server.properties](kafka/server.properties)
### Zookeeper Service
* Zookeeper JAAS:
* Client API - Kerberize access to ZooKeeper data.
* Login Context: `Server`.
* Use *keytab* method.
* Ensure that the principal is a correctly formed service principal for each node: `{zookeeper-kerberos-service-name}/{FQDN}@{realm}`.
* Example: [zookeeper/zookeeper.sasl.jaas.config](zookeeper/zookeeper.sasl.jaas.config)
* Zookeeper Properties:
* authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
* requireClientAuthScheme=sasl
* Example: [zookeeper/zookeeper.properties](zookeeper/zookeeper.properties)
## Client Configurations
### Kafka Broker → Zookeeper Service
Brokers connect to Zookeeper for cluster operations.
* Broker JAAS:
* Login Context: `Client`
* Use *keytab* method.
* *Ensure that the same principal is configured for use on each broker.*
* Example: [kafka/kafka.sasl.jaas.config](kafka/kafka.sasl.jaas.config).
* Broker JVM flags:
* `-Dzookeeper.sasl.client.username={zookeeper-kerberos-service-name}` (OPTIONAL)
### Client → Kafka Service
Clients connecting in to Kafka may be any of:
* A Kafka producer
* A Kafka consumer
* A Kafka Admin client
Note that many applications are a combination of many of these - notably Streams applications and Kafka Connect.
* Client JAAS:
* Login Context: `KafkaClient`
* Can use *kinit* or *keytab* method.
* Example: [client/client.sasl.jaas.config](client/client.sasl.jaas.config)
* Client Properties:
* `sasl.kerberos.service.name={kafka-kerberos-service-name}`
* `security.protocol={security-protocol}`
* `sasl.jaas.config` - jaas override.
* Examples: [client/producer.properties](client/producer.properties), [client/consumer.properties](client/consumer.properties), [client/command.properties](client/command.properties)
### Client → Zookeeper (Optional)
Historically, clients needed to connect directly to ZooKeeper for service discovery and admin operations.
However, the new Kafka Admin API allows all this functionality via Client → Kafka Broker connection, so this direct connection should not be required.
* JAAS:
* LoginContext: `Client`
* Can use *kinit* or *keytab* method.
* JVM flags:
* `-Dzookeeper.sasl.client.username={zookeeper-kerberos-service-name}` (OPTIONAL)
### Confluent Metrics Reporter → Kafka Service (Optional)
The Confluent metrics reporter runs as a plugin within the Kafka broker, but from a Kerberos point of view is configured as a network client.
The configuration, including inline Jaas, is specified within the broker properties using a keytab:
* `confluent.metrics.reporter.sasl.mechanism=GSSAPI`
* `confluent.metrics.reporter.security.protocol={security-protocol}`
* `confluent.metrics.reporter.sasl.kerberos.service.name={kafka-kerberos-service-name}`
* `confluent.metrics.reporter.sasl.jaas.config={inline jaas configuration}`
* Example: [kafka/server.properties](kafka/server.properties)
# Authentication is not enough!
The steps above are sufficient to support Kerberos authenticated connections within the cluster.
This does not make your cluster secure though!
The demo also applies a minimal level of authorisation to prevent unauthenticated network access to the brokers and Zookeeper.
The following should be reviewed in the broker server properties:
* `allow.everyone.if.no.acl.found=false` - default to no access.
* `authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer` - enable the default authoriser for Kafka.
* `zookeeper.set.acl=true` - when storing ACL data in Zookeeper, apply Zookeeper access controls so that only the Broker → Zookeeper client principal can read and modify the lists.
* Example: [kafka/server.properties](kafka/server.properties)
# Putting it into action
In this demo we configure:
* A simple KDC to generate principals and keytabs.
* A single node Zookeeper with a Kerberized data access API.
* A single node Kafka broker with a Kerberized listener.
* Set up ACLs allowing `kafka-console-producer` and `kafka-console-consumer` usage.
_A basic knowledge of Docker is useful to follow the code, though only basic Docker techniques are used to keep the code readable._
_Each node is built using a `Dockerfile` into which configuration values are hard-coded, and the services are brought up using `docker-compose`._
_Kerberos keytabs and the krb5.conf file are shared amongst all nodes on the cluster using a shared Docker volume._
The demo is run using the [up](up) script, which orchestrates the following process:
* Builds and starts the KDC.
All nodes are joined to the KDC's realm by sharing `krb5.conf` amongst all nodes.
* Generates Kerberos principals and keytabs, sharing these on the shared Docker volume.
* Builds and starts Zookeeper, Kafka broker and Client.
* Uses the `admin` super user to configure ACLs for the `producer` and `consumer` users.
* Prints example usage to connect into Kafka with a Kerberos principal.
This is actually executed via the `client` node.
# Next up
* Extending Kerberos configuration to a full cluster (coming soon)
* Hardening access controls
# References
* https://www.youtube.com/watch?v=KD2Q-2ToloE Video overview of Kerberos authentication process.
================================================
FILE: kerberos/client/Dockerfile
================================================
FROM centos:centos7
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install confluent kafka tools:
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-kafka-2.12
# 3. Install Kerberos libaries
RUN yum install -y krb5-workstation krb5-libs
# 4. Copy in required settings for client access to Kafka
COPY consumer.properties /etc/kafka/consumer.properties
COPY producer.properties /etc/kafka/producer.properties
COPY command.properties /etc/kafka/command.properties
COPY client.sasl.jaas.config /etc/kafka/client_jaas.conf
ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
CMD sleep infinity
================================================
FILE: kerberos/client/client.sasl.jaas.config
================================================
/*
* Credentials to use when connecting to ZooKeeper directly.
*
* Whenever possible you should use the Kafka AdminClient API instead of ZooKeeper.
*/
Client {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=true;
};
/*
* Credentials to connect to Kafka.
*/
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=true;
};
================================================
FILE: kerberos/client/command.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
serviceName=kafka \
useTicketCache=true;
================================================
FILE: kerberos/client/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: kerberos/client/consumer.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useTicketCache=true;
================================================
FILE: kerberos/client/producer.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useTicketCache=true;
================================================
FILE: kerberos/docker-compose.yml
================================================
version: '3.5'
services:
kdc:
hostname: kdc.kerberos-demo.local
#domainname: kerberos_default
build: kdc/
container_name: kdc
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/etc/kdc/krb5.conf
zookeeper:
build: zookeeper/
container_name: zookeeper
hostname: zookeeper.kerberos-demo.local
#domainname: kerberos_default
depends_on:
- kdc
# Required to wait for the keytab to get generated
restart: on-failure
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/etc/krb5.conf
kafka:
build: kafka/
container_name: kafka
hostname: kafka.kerberos-demo.local
#domainname: kerberos_default
depends_on:
- zookeeper
- kdc
# Required to wait for the keytab to get generated
restart: on-failure
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/etc/krb5.conf
client:
build: client/
container_name: client
hostname: client.kerberos-demo.local
#domainname: kerberos_default
depends_on:
- kafka
- kdc
# Required to wait for the keytab to get generated
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/etc/krb5.conf
volumes:
secret: {}
networks:
default:
name: kerberos-demo.local
================================================
FILE: kerberos/kafka/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-kafka-2.12
RUN yum install -y confluent-control-center
# 3. Configure Kafka for Kerberos
RUN yum install -y krb5-workstation krb5-libs
COPY server.properties /etc/kafka/server.properties
COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf
EXPOSE 9093
ENV KAFKA_OPTS="-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dzookeeper.sasl.client.username=zkservice"
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: kerberos/kafka/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: kerberos/kafka/kafka.sasl.jaas.config
================================================
/*
* The service principal
*/
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO";
};
/*
* Zookeeper client principal
*/
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/zookeeper-client.key"
principal="zkclient@TEST.CONFLUENT.IO";
};
================================================
FILE: kerberos/kafka/server.properties
================================================
# Basic broker and listener configuration
broker.id=0
listeners=SASL_PLAINTEXT://kafka.kerberos-demo.local:9093
zookeeper.connect=zookeeper.kerberos-demo.local:2181
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
num.partitions=12
# Kerberos / GSSAPI Authentication mechanism
sasl.enabled.mechanisms=GSSAPI
sasl.kerberos.service.name=kafka
# Configure replication to require Kerberos:
sasl.mechanism.inter.broker.protocol=GSSAPI
security.inter.broker.protocol=SASL_PLAINTEXT
# Authorization config:
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
zookeeper.set.acl=true
allow.everyone.if.no.acl.found=false
super.users=User:admin;User:kafka
# Demonstrate setting up the Confluent Metrics Reporter with required *client* credentials
metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter
confluent.metrics.reporter.bootstrap.servers=kafka:9093
confluent.metrics.reporter.sasl.mechanism=GSSAPI
confluent.metrics.reporter.security.protocol=SASL_PLAINTEXT
confluent.metrics.reporter.sasl.kerberos.service.name=kafka
confluent.metrics.reporter.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useKeyTab=true \
storeKey=true \
keyTab="/var/lib/secret/kafka-admin.key" \
principal="admin/for-kafka@TEST.CONFLUENT.IO";
confluent.metrics.reporter.topic.replicas=1
confluent.support.metrics.enable=false
confluent.support.customer.id=anonymous
================================================
FILE: kerberos/kdc/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Installing Kerberos server, admin and client
RUN yum install -y krb5-server krb5-libs
RUN yum install -y krb5-workstation krb5-libs
# 2. Configuring Kerberos and KDC
COPY krb5.conf /etc/krb5.conf
RUN mkdir /var/log/kerberos
RUN mkdir /etc/kdc
RUN mkdir -p /var/kerberos/krb5kdc/
RUN ln -s /etc/krb5.conf /etc/kdc/krb5.conf
EXPOSE 88
RUN kdb5_util -P confluent -r TEST.CONFLUENT.IO create -s
CMD /usr/sbin/krb5kdc -n
================================================
FILE: kerberos/kdc/krb5.conf
================================================
[libdefaults]
default_realm = TEST.CONFLUENT.IO
forwardable = true
rdns = false
dns_lookup_kdc = no
dns_lookup_realm = no
[realms]
TEST.CONFLUENT.IO = {
kdc = kdc
admin_server = kadmin
}
[domain_realm]
.test.confluent.io = TEST.CONFLUENT.IO
test.confluent.io = TEST.CONFLUENT.IO
kerberos-demo.local = TEST.CONFLUENT.IO
.kerberos-demo.local = TEST.CONFLUENT.IO
[logging]
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmin.log
default = FILE:/var/log/kerberos/krb5lib.log
================================================
FILE: kerberos/up
================================================
#!/bin/sh
set -e
# Starting kerberos,
# Avoiding starting up all services at the begining to generate the keytab first
docker-compose build
docker-compose up -d kdc
### Create the required identities:
# Kafka service principal:
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO" > /dev/null
# Zookeeper service principal:
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey zkservice/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO" > /dev/null
# Create a principal with which to connect to Zookeeper from brokers - NB use the same credential on all brokers!
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey zkclient@TEST.CONFLUENT.IO" > /dev/null
# Create client principals to connect in to the cluster:
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_producer@TEST.CONFLUENT.IO" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_producer/instance_demo@TEST.CONFLUENT.IO" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_consumer@TEST.CONFLUENT.IO" > /dev/null
# Create an admin principal for the cluster, which we'll use to setup ACLs.
# Look after this - its also declared a super user in broker config.
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey admin/for-kafka@TEST.CONFLUENT.IO" > /dev/null
# Create keytabs to use for Kafka
docker exec -ti kdc rm -f /var/lib/secret/kafka.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/zookeeper.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/zookeeper-client.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/kafka-client.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/kafka-admin.key 2>&1 > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka.key -norandkey kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/zookeeper.key -norandkey zkservice/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/zookeeper-client.key -norandkey zkclient@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-client.key -norandkey kafka_producer@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-client.key -norandkey kafka_producer/instance_demo@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-client.key -norandkey kafka_consumer@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-admin.key -norandkey admin/for-kafka@TEST.CONFLUENT.IO " > /dev/null
# Starting zookeeper and kafka now that the keytab has been created with the required credentials and services
docker-compose up -d
# Adding ACLs for consumer and producer user:
docker exec client bash -c "kinit -k -t /var/lib/secret/kafka-admin.key admin/for-kafka && kafka-acls --bootstrap-server kafka:9093 --command-config /etc/kafka/command.properties --add --allow-principal User:kafka_producer --producer --topic=*"
docker exec client bash -c "kinit -k -t /var/lib/secret/kafka-admin.key admin/for-kafka && kafka-acls --bootstrap-server kafka:9093 --command-config /etc/kafka/command.properties --add --allow-principal User:kafka_consumer --consumer --topic=* --group=*"
# Output example usage:
echo "Example configuration to access kafka:"
echo "-> docker-compose exec client bash -c 'kinit -k -t /var/lib/secret/kafka-client.key kafka_producer && kafka-console-producer --broker-list kafka:9093 --topic test --producer.config /etc/kafka/producer.properties'"
echo "-> docker-compose exec client bash -c 'kinit -k -t /var/lib/secret/kafka-client.key kafka_consumer && kafka-console-consumer --bootstrap-server kafka:9093 --topic test --consumer.config /etc/kafka/consumer.properties --from-beginning'"
================================================
FILE: kerberos/zookeeper/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-kafka-2.12
# 3. Configure zookeeper for Kerberos
RUN yum install -y krb5-workstation krb5-libs
COPY zookeeper.properties /etc/kafka/zookeeper.properties
COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf
EXPOSE 2181
ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf
CMD zookeeper-server-start /etc/kafka/zookeeper.properties
================================================
FILE: kerberos/zookeeper/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: kerberos/zookeeper/zookeeper.properties
================================================
dataDir=/var/lib/zookeeper
clientPort=2181
maxClientCnxns=0
authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
zookeeper.allowSaslFailedClients=false
requireClientAuthScheme=sasl
================================================
FILE: kerberos/zookeeper/zookeeper.sasl.jaas.config
================================================
Server {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
keyTab="/var/lib/secret/zookeeper.key"
storeKey=true
useTicketCache=false
principal="zkservice/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=true;
};
================================================
FILE: kerberos-multi-node/README.md
================================================
# Kerberos multi-node deployment example
This example shows how-to deploy multiple kafka nodes in an example kerberos enabled environment.
The only thing that's different then your normal environment is that this example uses a different principal for each zookeeper client.
https://issues.apache.org/jira/browse/KAFKA-7710 Jira contains a more information.
TLDR; we have to set two configs in the zookeeper.properties to make this work
```
kerberos.removeHostFromPrincipal = true
kerberos.removeRealmFromPrincipal = false
```
The first removes the hostname from the principal name.
So that anyone authenticated with the principal 'kafka/*@REALM' is allowed by ZK ACLs.
================================================
FILE: kerberos-multi-node/docker-compose.yml
================================================
version: '3.8'
services:
kdc:
hostname: kdc
domainname: kerberos-multi-node_default
build: kdc/
container_name: kdc
volumes:
- secret:/var/lib/secret
zookeeper:
build: zookeeper/
container_name: zookeeper
hostname: zookeeper
domainname: kerberos-multi-node_default
depends_on:
- kdc
# Required to wait for the keytab to get generated
restart: on-failure
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf -Dsun.security.krb5.debug=true -Djava.security.krb5.conf=/tmp/krb5.conf
- KRB5_CONFIG=/tmp/krb5.conf
#- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/tmp/krb5.conf
kafka:
build: kafka/
container_name: kafka
hostname: kafka
domainname: kerberos-multi-node_default
environment:
# - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dsun.security.krb5.debug=true -Dzookeeper.sasl.client.username=zkservice
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dzookeeper.sasl.client.username=zkservice -Djava.security.krb5.conf=/tmp/krb5.conf
- KRB5_CONFIG=/tmp/krb5.conf
depends_on:
- zookeeper
- kdc
# Required to wait for the keytab to get generated
restart: on-failure
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/tmp/krb5.conf
kafka1:
build: kafka1/
container_name: kafka1
hostname: kafka1
domainname: kerberos-multi-node_default
environment:
# - KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dsun.security.krb5.debug=true -Dzookeeper.sasl.client.username=zkservice
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dzookeeper.sasl.client.username=zkservice -Djava.security.krb5.conf=/tmp/krb5.conf
- KRB5_CONFIG=/tmp/krb5.conf
depends_on:
- zookeeper
- kdc
# Required to wait for the keytab to get generated
restart: on-failure
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/tmp/krb5.conf
volumes:
secret: {}
================================================
FILE: kerberos-multi-node/down
================================================
#!/bin/bash
DESTROY=no
if [ ! -f "${PWD}/docker-compose.yml" ]; then
echo "No docker-compose found. Exiting."
exit 2
fi
usage()
{
echo "Usage: $0 [-h] [-d]"
echo "-d destroy images. They will be rebuilt next time"
exit 2
}
destroy()
{
docker-compose rm --force
}
stop_docker-compose()
{
docker-compose stop
}
# Should use getopts here but, why?
if [[ "${1}" == "-h" ]]; then
usage
exit 2
fi
if [[ "${1}" == "-d" ]]; then
echo "Stopping and destroying containers"
DESTROY=yes
fi
stop_docker-compose
if [[ $? != 0 ]]; then
echo "Stopping the docker-compose failed. Exiting for manual cleanup"
echo "I suggest 'docker-compose ps'"
exit 2
fi
if [[ "${DESTROY}" == "yes" ]]; then
destroy
fi
================================================
FILE: kerberos-multi-node/kafka/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-platform-2.12
RUN yum install -y confluent-control-center
# 3. Configure Kafka for Kerberos
RUN yum install -y krb5-workstation krb5-libs
COPY server.properties /etc/kafka/server.properties
COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf
COPY consumer.properties /etc/kafka/consumer.properties
EXPOSE 9093
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: kerberos-multi-node/kafka/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/6.0/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/6.0/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/6.0
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/6.0/archive.key
enabled=1
================================================
FILE: kerberos-multi-node/kafka/consumer.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useTicketCache=true;
================================================
FILE: kerberos-multi-node/kafka/kafka.sasl.jaas.config
================================================
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="kafka/kafka.kerberos-multi-node_default@TEST.CONFLUENT.IO";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="admin@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/kafka.key"
principal="kafka@TEST.CONFLUENT.IO";
};
================================================
FILE: kerberos-multi-node/kafka/server.properties
================================================
broker.id=0
listeners=SASL_PLAINTEXT://kafka:9093
advertised.listeners=SASL_PLAINTEXT://kafka:9093
security.inter.broker.protocol=SASL_PLAINTEXT
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
zookeeper.connect=zookeeper.kerberos-multi-node_default:2181
zookeeper.set.acl=true
# Kerberos / GSSAPI Authentication mechanism
sasl.enabled.mechanisms=GSSAPI
sasl.mechanism.inter.broker.protocol=GSSAPI
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
allow.everyone.if.no.acl.found=false
super.users=User:admin;User:kafka
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# metric reporter configuration with Kerberos
metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter
confluent.metrics.reporter.bootstrap.servers=kafka:9093
confluent.metrics.reporter.sasl.mechanism=GSSAPI
confluent.metrics.reporter.security.protocol=SASL_PLAINTEXT
confluent.metrics.reporter.sasl.kerberos.service.name=kafka
confluent.metrics.reporter.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useKeyTab=true \
storeKey=true \
keyTab="/var/lib/secret/kafka.key" \
principal="kafka@TEST.CONFLUENT.IO";
confluent.metrics.reporter.topic.replicas=1
================================================
FILE: kerberos-multi-node/kafka1/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-platform-2.12
RUN yum install -y confluent-control-center
# 3. Configure Kafka for Kerberos
RUN yum install -y krb5-workstation krb5-libs
COPY server.properties /etc/kafka/server.properties
COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf
COPY consumer.properties /etc/kafka/consumer.properties
EXPOSE 9093
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: kerberos-multi-node/kafka1/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/6.0/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/6.0/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/6.0
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/6.0/archive.key
enabled=1
================================================
FILE: kerberos-multi-node/kafka1/consumer.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useTicketCache=true;
================================================
FILE: kerberos-multi-node/kafka1/kafka.sasl.jaas.config
================================================
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="kafka/kafka1.kerberos-multi-node_default@TEST.CONFLUENT.IO";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="admin@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/kafka.key"
principal="kafka@TEST.CONFLUENT.IO";
};
================================================
FILE: kerberos-multi-node/kafka1/server.properties
================================================
broker.id=1
listeners=SASL_PLAINTEXT://kafka1:9093
advertised.listeners=SASL_PLAINTEXT://kafka1:9093
security.inter.broker.protocol=SASL_PLAINTEXT
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
zookeeper.connect=zookeeper.kerberos-multi-node_default:2181
zookeeper.set.acl=true
# Kerberos / GSSAPI Authentication mechanism
sasl.enabled.mechanisms=GSSAPI
sasl.mechanism.inter.broker.protocol=GSSAPI
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
allow.everyone.if.no.acl.found=false
super.users=User:admin;User:kafka
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# metric reporter configuration with Kerberos
metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter
confluent.metrics.reporter.bootstrap.servers=kafka:9093
confluent.metrics.reporter.sasl.mechanism=GSSAPI
confluent.metrics.reporter.security.protocol=SASL_PLAINTEXT
confluent.metrics.reporter.sasl.kerberos.service.name=kafka
confluent.metrics.reporter.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useKeyTab=true \
storeKey=true \
keyTab="/var/lib/secret/kafka.key" \
principal="kafka@TEST.CONFLUENT.IO";
confluent.metrics.reporter.topic.replicas=1
================================================
FILE: kerberos-multi-node/kdc/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Installing Kerberos server, admin and client
RUN yum install -y krb5-server krb5-libs
RUN yum install -y krb5-workstation krb5-libs
# 2. Configuring Kerberos and KDC
COPY krb5.conf /etc/krb5.conf
RUN mkdir /var/log/kerberos
RUN mkdir /etc/kdc
RUN mkdir -p /var/kerberos/krb5kdc/
RUN ln -s /etc/krb5.conf /etc/kdc/krb5.conf
EXPOSE 88
RUN kdb5_util -P confluent -r TEST.CONFLUENT.IO create -s
CMD /usr/sbin/krb5kdc -n
================================================
FILE: kerberos-multi-node/kdc/krb5.conf
================================================
[libdefaults]
default_realm = TEST.CONFLUENT.IO
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
rdns = false
dns_lookup_kdc = no
dns_lookup_realm = no
[realms]
TEST.CONFLUENT.IO = {
kdc = kdc
admin_server = kadmin
}
[domain_realm]
.test.confluent.io = TEST.CONFLUENT.IO
test.confluent.io = TEST.CONFLUENT.IO
kerberos_default = TEST.CONFLUENT.IO
.kerberos_default = TEST.CONFLUENT.IO
[logging]
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmin.log
default = FILE:/var/log/kerberos/krb5lib.log
================================================
FILE: kerberos-multi-node/up
================================================
#!/bin/sh
# Starting kerberos,
# Avoiding starting up all services at the begining to generate the keytab first
docker-compose build
docker-compose up -d kdc
# Create the required credential
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_producer@TEST.CONFLUENT.IO" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_consumer@TEST.CONFLUENT.IO" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka@TEST.CONFLUENT.IO" > /dev/null
# Create server keys
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey zkservice/zookeeper.kerberos-multi-node_default@TEST.CONFLUENT.IO" > /dev/null
# princ for kafka0
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka/kafka.kerberos-multi-node_default@TEST.CONFLUENT.IO" > /dev/null
# princ for kafka1
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka/kafka1.kerberos-multi-node_default@TEST.CONFLUENT.IO" > /dev/null
# Create the keytab to use for Kafka
docker exec -ti kdc rm -f /var/lib/secret/kafka.key 2>&1 > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka.key -glob zkservice/*" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka.key -glob kafka*" > /dev/null
# Starting zookeeper and kafka now that the keytab has been created with the required credentials and services
docker-compose up -d
# Adding ACLs for consumer and producer user
docker-compose exec kafka bash -c "kinit -k -t /var/lib/secret/kafka.key kafka && kafka-acls --authorizer-properties zookeeper.connect=zookeeper:2181 --add --allow-principal User:kafka_producer --producer --topic=*"
docker-compose exec kafka bash -c "kinit -k -t /var/lib/secret/kafka.key kafka && kafka-acls --authorizer-properties zookeeper.connect=zookeeper:2181 --add --allow-principal User:kafka_consumer --consumer --topic=* --group=*"
echo "Example configuration to access kafka:"
echo "-> docker-compose exec kafka bash -c 'kinit -k -t /var/lib/secret/kafka.key kafka_producer && kafka-console-producer --broker-list kafka:9093 --topic test --producer.config /etc/kafka/consumer.properties'"
echo "-> docker-compose exec kafka bash -c 'kinit -k -t /var/lib/secret/kafka.key kafka_consumer && kafka-console-consumer --bootstrap-server kafka:9093 --topic test --consumer.config /etc/kafka/consumer.properties --from-beginning'"
================================================
FILE: kerberos-multi-node/zookeeper/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/6.0/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-platform-2.12
# 3. Configure zookeeper for Kerberos
RUN yum install -y krb5-workstation krb5-libs
COPY zookeeper.properties /etc/kafka/zookeeper.properties
COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf
EXPOSE 2181
CMD zookeeper-server-start /etc/kafka/zookeeper.properties
================================================
FILE: kerberos-multi-node/zookeeper/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/6.0/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/6.0/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/6.0
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/6.0/archive.key
enabled=1
================================================
FILE: kerberos-multi-node/zookeeper/zookeeper.properties
================================================
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the directory where the snapshot is stored.
dataDir=/var/lib/zookeeper
# the port at which the clients will connect
clientPort=2181
# disable the per-ip limit on the number of connections since this is a non-production config
maxClientCnxns=0
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
zookeeper.allowSaslFailedClients=false
requireClientAuthScheme=sasl
kerberos.removeHostFromPrincipal = true
kerberos.removeRealmFromPrincipal = false
================================================
FILE: kerberos-multi-node/zookeeper/zookeeper.sasl.jaas.config
================================================
Server {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/kafka.key"
principal="zkservice/zookeeper.kerberos-multi-node_default@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=true;
};
================================================
FILE: kerberos-multi-sasl/README.md
================================================
# Kerberos configuration demo
This demo sets up a basic Kafka cluster secured with Kerberos authentication, and sets up some basic ACLs to demonstrate authorisation.
The documentation below introduces the relevant components you need to understand to set up Kerberos in a Linux / JVM environment.
## Kerberos authentication process
Before configuring Kafka for Kerberos authentication, it is useful to understand the basics of Kerberos; the authentication process and some key terms.
_A note on what Kerberos is and is not: Kerberos is a *network authentication protocol* which allows a client application to connect to a network service in a way that allows the components to mutually verify each other's identities._
_It is put to good use in and integrated with network directory services, notably Windows Active Directory._
_Here, Kerberos identities are bound to network accounts and access privileges and, in the case of Windows, the SSPI API supports single sign-on and privilege impersonation natively in the OS._
_This is enabled by Kerberos, but Kerberos itself is not bound to such accounts and does not provide any such capability._
With that, let's work through the process for a client application making a connection into Kafka.
Kerberos involves three parties:
- a Kerberos Client, in this case our client application.
- a Kerberized Service, in this case Kafka.
- the Kerberos **Key Distribution Center (KDC)**
An important point to understand in this process is that the Client and Service each shares their own cryptographic key with the KDC.
By using this key to encrypt/decrypt tokens passed over the network, two network systems can verify each other's identities.
The Client and Service trust that they have only shared their secret with the KDC and so any correctly signed token must have originated from the KDC.
This is crucial.
During the Kerberos process the Client requests a token from the KDC _signed with the Service's key_ and presents this when making a connection.
The Service can then trust that the Client has valid credentials with the KDC and can be authenticated.
Other information is shared during the process to enable integrity checking and protection against various spoofing attacks.
For example, each signed token is:
* timestamped to bound the window for which it is valid
* linked to a network IP so that it is valid only from a single host
The first stage is that the Client application must authenticate itself with the KDC by proving that the Client knows the private credentials relating to the Client's Kerberos **Principal**.
The Principal is a a unique identity in the form {primary}/{instance}@{REALM} (more on these later).
The KDC authenticates the client using their shared cryptographic key and results in the client receiving a **Ticket Granting Ticket (TGT)**.
This is a cryptographic token that the Client may now use to prove that it has recently authenticated with the KDC.
The TGT is timestamped and includes an expiry time, typically a day.
The TGT is cached by the client to avoid having to re-authenticate unnecessarily.
Next, the Client wants to authenticate itself to the Kerberized Service.
For this to happen, the client must get a cryptographic token encrypted with the Kerberized Service's key - this token is a **Service Ticket** and is requested by the client from the KDC using the TGT and the requested service's principal name.
Including the TGT in this request is sufficient to prove that the client has already authenticated with the KDC allowing the service ticket to be returned.
Here is an important point to note - how does the client know the service principal name?
Very simply, it builds the principal with:
- {primary} = a client-side configured name for the service
- {instance} = the network address used to connect to the service
- {REALM} = the realm of the client and KDC.
In our example, our Client attempts to connect to the `kafka` Service on the host `kafka.kerberos-demo.local` in the realm `TEST.CONFLUENT.IO`.
Therefore, the service must be configured with a Service Principal Name of `kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO`.
Now the Client can connect directly to the Kerberized Service, and include the Service Ticket.
As the Service ticket is signed with the Service principal's key, the Service can decrypt the token to authenticate the request.
Based on the above, each connection in the cluster must be established with the following in place:
* On the Kerberos Client:
* A client principal and key to authenticate with the KDC, `{client name}@REALM`
* This is the *User Principal Name*.
* a configured name for the service to connect to, `{service name}`
* the network address for the service, `{network address}`.
* On the server:
* a principal name & key in the form `{service name}/{network address}@REALM`.
* This is the *Service Principal Name*.
As can be seen, the service principal must be constructed correctly to work.
However, the `{client name}` format is not mandated in the same way and is not bound to a network address.
Often the client name is a simple alphanumeric username, let's say 'john'.
However, you may sometimes see a client principal such as 'john/admin'.
In this form, 'admin' is called an _instance_ of the 'john' principal and can be used by 'john' to run services on the system with different credentials and privileges from the main account.
From the Kerberos perspective, the two principals are completely separate, but it can nonetheless be convenient to use this naming convention.
# Technical Components
## KDC
The KDC could be provided by MIT Kerberos, Windows Active Directory, Redhat Identity Manager and many others.
In this demo we use MIT kerberos.
## Kerberos libraries and tools
All the hosts must include Kerberos libraries and a shared configuration (krb5.conf) in order to use and trust the same KDC.
`kinit` is used to authenticate to the Kerberos server as principal, or if none is given, a system generated default (typically your login name at the default realm), and acquire a ticket granting ticket that can later be used to obtain tickets for other services.
`klist` reads and displays the current tickets in the credential cache (also known as the ticket file).
`kvno` acquires a service ticket for the specified Kerberos principals and prints out the key version numbers of each.
`kadmin` is an admin utility for working with the Kerberos database.
A common task when configuring for Kerberos is to build *keytab* files (short for Key Table).
Keytabs are files containing one or more Kerberos principal/credential pairs.
By having these in a file, services can automatically authenticate with the KDC without prompting the user and it is common to build and distribute keytabs as part of a deployment.
However, _as these files contain secret credentials, it is important to take care to protect against loss of these files_.
See [kerberos cheatsheet](../KerberosCheatsheet.md) for examples of using the Kerberos toolset.
## Simple Authentication and Security Layer (SASL)
SASL is a framework for authentication in network communications which in principle decouples authentication concerns from the application protocol.
Kafka and Zookeeper can use SASL as the authentication layer in communications (Mutual TLS being the notable alternative).
When SASL has been enabled, you must further specify a SASL *mechanism* to use - the process and protocol to use when authenticating a connection.
Applications must build support for each SASL mechanism - Kafka supports SCRAM(-SHA-256 | -SHA-512), PLAIN, OAUTHBEARER and GSSAPI.
*GSSAPI is the SASL mechanism which implements Kerberos*.
## Java Authentication and Authorization Services (JAAS)
JAAS is a Java's integrated, pluggable security service and Kafka uses the JAAS APIs to implement SASL authentication.
SASL authentication is configured using JAAS.
Kerberos is configured using the JAAS *LoginModule* `com.sun.security.auth.module.Krb5LoginModule`.
JAAS may be configured in a couple of places:
* By default it uses a .jaas file, a reference to which is passed in the `-Djava.security.auth.login.config=` JVM flag.
Each jaas file includes multiple named stanzas, representing different login contexts.
* An application can override this configuration and configure JAAS from application config.
Kafka configurations expose this option using properties `sasl.jaas.config`, which can variously be prefixed.
The value is the inline configuration for a single login context and, in Kafka, takes precedence over entries in a .jaas files.
https://docs.oracle.com/javase/8/docs/jre/api/security/jaas/spec/com/sun/security/auth/module/Krb5LoginModule.html
A Kerberos enabled Client or Service can be initiated in two ways:
1. Use `kinit` to cache a TGT locally, and then launch the process with this shared cache.
2. Configure a keytab to be used directly.
Configuration of the former is straight-forward as follows:
```
SomeLoginContext {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache = true;
};
```
The `useTicketCache = true` setting specifies that the TGT cache should be used.
By comparison, the latter approach has `useTicketCache = false` (the default) and then continues to specify details for using a keytab file:
```
SomeLoginContext {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka1.key"
principal="kafka/kafka1.kerberos-demo.local@TEST.CONFLUENT.IO";
};
```
The login context, as identified with `SomeLoginContext` above, can be used by a Client, a Service or both.
For Kafka, the names are defined in the application code as we will describe later.
# Kerberizing Kafka
To fully understand the steps required to Kerberize Kafka, we should understand each Client → Service connection which we wish to configure.
Each of these connections has a prototypical set of configurations required on the Client side and on the Service side.
The following are values you must decide upon at the cluster level:
* `{kafka-kerberos-service-name}` - name for the Kerberized Kafka service.
Typically `kafka` or `cp-kafka`.
* `{zookeeper-kerberos-service-name}` - name for the Kerberized Zookeeper service.
By default this is `zookeeper`.
* `{security-protocol}` - either `SASL_PLAINTEXT` of `SASL_SSL` if using in conjunction with TLS.
## Service Configurations
In a single node Broker/Zookeeper environment there are just two Kerberized services running.
We will configure these first and then the clients.
### Kafka Service
* Broker JAAS:
* Login Context: `KafkaServer`
* Use *keytab* method.
* Ensure that the principal is a correctly formed service principal for each node: `{kafka-kerberos-service-name}/{FQDN}@{realm}`.
* Example: [kafka/kafka.sasl.jaas.config](kafka/kafka.sasl.jaas.config)
* Broker Server Properties:
* `sasl.enabled.mechanisms=GSSAPI` (more SASL mechanisms may be specified in a comma-separated list)
* `sasl.kerberos.service.name={kafka-kerberos-service-name}`
* `{listener_name}.{sasl_mechanism}.sasl.jaas.config` - jaas configuration on a per-listener basis.
* Example: [kafka/server.properties](kafka/server.properties)
### Zookeeper Service
* Zookeeper JAAS:
* Client API - Kerberize access to ZooKeeper data.
* Login Context: `Server`.
* Use *keytab* method.
* Ensure that the principal is a correctly formed service principal for each node: `{zookeeper-kerberos-service-name}/{FQDN}@{realm}`.
* Example: [zookeeper/zookeeper.sasl.jaas.config](zookeeper/zookeeper.sasl.jaas.config)
* Zookeeper Properties:
* authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
* requireClientAuthScheme=sasl
* Example: [zookeeper/zookeeper.properties](zookeeper/zookeeper.properties)
## Client Configurations
### Kafka Broker → Zookeeper Service
Brokers connect to Zookeeper for cluster operations.
* Broker JAAS:
* Login Context: `Client`
* Use *keytab* method.
* *Ensure that the same principal is configured for use on each broker.*
* Example: [kafka/kafka.sasl.jaas.config](kafka/kafka.sasl.jaas.config).
* Broker JVM flags:
* `-Dzookeeper.sasl.client.username={zookeeper-kerberos-service-name}` (OPTIONAL)
### Client → Kafka Service
Clients connecting in to Kafka may be any of:
* A Kafka producer
* A Kafka consumer
* A Kafka Admin client
Note that many applications are a combination of many of these - notably Streams applications and Kafka Connect.
* Client JAAS:
* Login Context: `KafkaClient`
* Can use *kinit* or *keytab* method.
* Example: [client/client.sasl.jaas.config](client/client.sasl.jaas.config)
* Client Properties:
* `sasl.kerberos.service.name={kafka-kerberos-service-name}`
* `security.protocol={security-protocol}`
* `sasl.jaas.config` - jaas override.
* Examples: [client/producer.properties](client/producer.properties), [client/consumer.properties](client/consumer.properties), [client/command.properties](client/command.properties)
### Client → Zookeeper (Optional)
Historically, clients needed to connect directly to ZooKeeper for service discovery and admin operations.
However, the new Kafka Admin API allows all this functionality via Client → Kafka Broker connection, so this direct connection should not be required.
* JAAS:
* LoginContext: `Client`
* Can use *kinit* or *keytab* method.
* JVM flags:
* `-Dzookeeper.sasl.client.username={zookeeper-kerberos-service-name}` (OPTIONAL)
### Confluent Metrics Reporter → Kafka Service (Optional)
The Confluent metrics reporter runs as a plugin within the Kafka broker, but from a Kerberos point of view is configured as a network client.
The configuration, including inline Jaas, is specified within the broker properties using a keytab:
* `confluent.metrics.reporter.sasl.mechanism=GSSAPI`
* `confluent.metrics.reporter.security.protocol={security-protocol}`
* `confluent.metrics.reporter.sasl.kerberos.service.name={kafka-kerberos-service-name}`
* `confluent.metrics.reporter.sasl.jaas.config={inline jaas configuration}`
* Example: [kafka/server.properties](kafka/server.properties)
# Authentication is not enough!
The steps above are sufficient to support Kerberos authenticated connections within the cluster.
This does not make your cluster secure though!
The demo also applies a minimal level of authorisation to prevent unauthenticated network access to the brokers and Zookeeper.
The following should be reviewed in the broker server properties:
* `allow.everyone.if.no.acl.found=false` - default to no access.
* `authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer` - enable the default authoriser for Kafka.
* `zookeeper.set.acl=true` - when storing ACL data in Zookeeper, apply Zookeeper access controls so that only the Broker → Zookeeper client principal can read and modify the lists.
* Example: [kafka/server.properties](kafka/server.properties)
# Putting it into action
In this demo we configure:
* A simple KDC to generate principals and keytabs.
* A single node Zookeeper with a Kerberized data access API.
* A single node Kafka broker with a Kerberized listener.
* Set up ACLs allowing `kafka-console-producer` and `kafka-console-consumer` usage.
_A basic knowledge of Docker is useful to follow the code, though only basic Docker techniques are used to keep the code readable._
_Each node is built using a `Dockerfile` into which configuration values are hard-coded, and the services are brought up using `docker-compose`._
_Kerberos keytabs and the krb5.conf file are shared amongst all nodes on the cluster using a shared Docker volume._
The demo is run using the [up](up) script, which orchestrates the following process:
* Builds and starts the KDC.
All nodes are joined to the KDC's realm by sharing `krb5.conf` amongst all nodes.
* Generates Kerberos principals and keytabs, sharing these on the shared Docker volume.
* Builds and starts Zookeeper, Kafka broker and Client.
* Uses the `admin` super user to configure ACLs for the `producer` and `consumer` users.
* Prints example usage to connect into Kafka with a Kerberos principal.
This is actually executed via the `client` node.
# Next up
* Extending Kerberos configuration to a full cluster (coming soon)
* Hardening access controls
# References
* https://www.youtube.com/watch?v=KD2Q-2ToloE Video overview of Kerberos authentication process.
================================================
FILE: kerberos-multi-sasl/client/Dockerfile
================================================
FROM centos:centos7
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install confluent kafka tools:
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-platform-2.12
# 3. Install Kerberos libaries
RUN yum install -y krb5-workstation krb5-libs
# 4. Copy in required settings for client access to Kafka
COPY consumer.properties /etc/kafka/consumer.properties
COPY producer.properties /etc/kafka/producer.properties
COPY command.properties /etc/kafka/command.properties
COPY scram.properties /etc/kafka/scram.properties
COPY client.sasl.jaas.config /etc/kafka/client_jaas.conf
ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
CMD sleep infinity
================================================
FILE: kerberos-multi-sasl/client/client.sasl.jaas.config
================================================
/*
* Credentials to use when connecting to ZooKeeper directly.
*
* Whenever possible you should use the Kafka AdminClient API instead of ZooKeeper.
*/
Client {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=true;
};
/*
* Credentials to connect to Kafka.
*/
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=true;
};
================================================
FILE: kerberos-multi-sasl/client/command.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
serviceName=kafka \
useTicketCache=true;
================================================
FILE: kerberos-multi-sasl/client/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: kerberos-multi-sasl/client/consumer.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useTicketCache=true;
================================================
FILE: kerberos-multi-sasl/client/producer.properties
================================================
bootstrap.servers=kafka:9093
security.protocol=SASL_PLAINTEXT
sasl.kerberos.service.name=kafka
sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useTicketCache=true;
================================================
FILE: kerberos-multi-sasl/client/scram.properties
================================================
sasl.mechanism=SCRAM-SHA-512
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka";
================================================
FILE: kerberos-multi-sasl/docker-compose.yml
================================================
version: '3.5'
services:
kdc:
hostname: kdc.kerberos-demo.local
#domainname: kerberos_default
build: kdc/
container_name: kdc
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/etc/kdc/krb5.conf
zookeeper:
build: zookeeper/
container_name: zookeeper
hostname: zookeeper.kerberos-demo.local
#domainname: kerberos_default
depends_on:
- kdc
# Required to wait for the keytab to get generated
restart: on-failure
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/etc/krb5.conf
kafka:
build: kafka/
container_name: kafka
hostname: kafka.kerberos-demo.local
#domainname: kerberos_default
depends_on:
- zookeeper
- kdc
# Required to wait for the keytab to get generated
restart: on-failure
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/etc/krb5.conf
client:
build: client/
container_name: client
hostname: client.kerberos-demo.local
#domainname: kerberos_default
depends_on:
- kafka
- kdc
# Required to wait for the keytab to get generated
volumes:
- secret:/var/lib/secret
- ./kdc/krb5.conf:/etc/krb5.conf
volumes:
secret: {}
networks:
default:
name: kerberos-demo.local
================================================
FILE: kerberos-multi-sasl/kafka/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-platform-2.12
RUN yum install -y confluent-control-center
# 3. Configure Kafka for Kerberos
RUN yum install -y krb5-workstation krb5-libs
COPY server.properties /etc/kafka/server.properties
COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf
EXPOSE 9093
ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: kerberos-multi-sasl/kafka/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: kerberos-multi-sasl/kafka/kafka.sasl.jaas.config
================================================
/*
* The service principal
*/
/*
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.key"
principal="kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO";
};
*/
/*
* Zookeeper client principal
*/
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
useTicketCache=false
keyTab="/var/lib/secret/zookeeper-client.key"
principal="zkclient@TEST.CONFLUENT.IO";
};
================================================
FILE: kerberos-multi-sasl/kafka/server.properties
================================================
# Basic broker and listener configuration
broker.id=0
listeners=SASL_PLAINTEXT://kafka.kerberos-demo.local:9093
zookeeper.connect=zookeeper.kerberos-demo.local:2181
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
num.partitions=12
# Kerberos / GSSAPI Authentication mechanism
sasl.enabled.mechanisms=SCRAM-SHA-512,GSSAPI
sasl.kerberos.service.name=kafka
listener.name.sasl_plaintext.gssapi.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useKeyTab=true \
storeKey=true \
keyTab="/var/lib/secret/kafka.key" \
principal="kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO";
listener.name.sasl_plaintext.scram-sha-512.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka";
# Configure replication to require Kerberos:
sasl.mechanism.inter.broker.protocol=GSSAPI
security.inter.broker.protocol=SASL_PLAINTEXT
# Authorization config:
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
zookeeper.set.acl=true
allow.everyone.if.no.acl.found=false
super.users=User:admin;User:kafka
# Demonstrate setting up the Confluent Metrics Reporter with required *client* credentials
metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter
confluent.metrics.reporter.bootstrap.servers=kafka:9093
confluent.metrics.reporter.sasl.mechanism=GSSAPI
confluent.metrics.reporter.security.protocol=SASL_PLAINTEXT
confluent.metrics.reporter.sasl.kerberos.service.name=kafka
confluent.metrics.reporter.sasl.jaas.config=com.sun.security.auth.module.Krb5LoginModule required \
useKeyTab=true \
storeKey=true \
keyTab="/var/lib/secret/kafka-admin.key" \
principal="admin/for-kafka@TEST.CONFLUENT.IO";
confluent.metrics.reporter.topic.replicas=1
confluent.support.metrics.enable=false
confluent.support.customer.id=anonymous
================================================
FILE: kerberos-multi-sasl/kdc/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Installing Kerberos server, admin and client
RUN yum install -y krb5-server krb5-libs
RUN yum install -y krb5-workstation krb5-libs
# 2. Configuring Kerberos and KDC
COPY krb5.conf /etc/krb5.conf
RUN mkdir /var/log/kerberos
RUN mkdir /etc/kdc
RUN mkdir -p /var/kerberos/krb5kdc/
RUN ln -s /etc/krb5.conf /etc/kdc/krb5.conf
EXPOSE 88
RUN kdb5_util -P confluent -r TEST.CONFLUENT.IO create -s
CMD /usr/sbin/krb5kdc -n
================================================
FILE: kerberos-multi-sasl/kdc/krb5.conf
================================================
[libdefaults]
default_realm = TEST.CONFLUENT.IO
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
rdns = false
dns_lookup_kdc = no
dns_lookup_realm = no
[realms]
TEST.CONFLUENT.IO = {
kdc = kdc
admin_server = kadmin
}
[domain_realm]
.test.confluent.io = TEST.CONFLUENT.IO
test.confluent.io = TEST.CONFLUENT.IO
kerberos-demo.local = TEST.CONFLUENT.IO
.kerberos-demo.local = TEST.CONFLUENT.IO
[logging]
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmin.log
default = FILE:/var/log/kerberos/krb5lib.log
================================================
FILE: kerberos-multi-sasl/up
================================================
#!/bin/sh
set -e
# Starting kerberos,
# Avoiding starting up all services at the begining to generate the keytab first
docker-compose build
docker-compose up -d kdc
### Create the required identities:
# Kafka service principal:
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO" > /dev/null
# Zookeeper service principal:
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey zookeeper/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO" > /dev/null
# Create a principal with which to connect to Zookeeper from brokers - NB use the same credential on all brokers!
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey zkclient@TEST.CONFLUENT.IO" > /dev/null
# Create client principals to connect in to the cluster:
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_producer@TEST.CONFLUENT.IO" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_producer/instance_demo@TEST.CONFLUENT.IO" > /dev/null
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey kafka_consumer@TEST.CONFLUENT.IO" > /dev/null
# Create an admin principal for the cluster, which we'll use to setup ACLs.
# Look after this - its also declared a super user in broker config.
docker exec -ti kdc kadmin.local -w password -q "add_principal -randkey admin/for-kafka@TEST.CONFLUENT.IO" > /dev/null
# Create keytabs to use for Kafka
docker exec -ti kdc rm -f /var/lib/secret/kafka.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/zookeeper.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/zookeeper-client.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/kafka-client.key 2>&1 > /dev/null
docker exec -ti kdc rm -f /var/lib/secret/kafka-admin.key 2>&1 > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka.key -norandkey kafka/kafka.kerberos-demo.local@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/zookeeper.key -norandkey zookeeper/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/zookeeper-client.key -norandkey zkclient@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-client.key -norandkey kafka_producer@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-client.key -norandkey kafka_producer/instance_demo@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-client.key -norandkey kafka_consumer@TEST.CONFLUENT.IO " > /dev/null
docker exec -ti kdc kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka-admin.key -norandkey admin/for-kafka@TEST.CONFLUENT.IO " > /dev/null
# Starting zookeeper and kafka now that the keytab has been created with the required credentials and services
docker-compose up -d
# Adding SCRAM user
docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka
# Adding ACLs for consumer and producer user:
docker exec client bash -c "kinit -k -t /var/lib/secret/kafka-admin.key admin/for-kafka && kafka-acls --bootstrap-server kafka:9093 --command-config /etc/kafka/command.properties --add --allow-principal User:kafka_producer --producer --topic=*"
docker exec client bash -c "kinit -k -t /var/lib/secret/kafka-admin.key admin/for-kafka && kafka-acls --bootstrap-server kafka:9093 --command-config /etc/kafka/command.properties --add --allow-principal User:kafka_consumer --consumer --topic=* --group=*"
# Output example usage:
echo "Example configuration to access kafka:"
echo "-> docker-compose exec client bash -c 'kinit -k -t /var/lib/secret/kafka-client.key kafka_producer && kafka-console-producer --broker-list kafka:9093 --topic test --producer.config /etc/kafka/producer.properties'"
echo "-> docker-compose exec client bash -c 'kafka-console-producer --broker-list kafka:9093 --topic test --producer.config /etc/kafka/scram.properties'"
echo "-> docker-compose exec client bash -c 'kinit -k -t /var/lib/secret/kafka-client.key kafka_consumer && kafka-console-consumer --bootstrap-server kafka:9093 --topic test --consumer.config /etc/kafka/consumer.properties --from-beginning'"
================================================
FILE: kerberos-multi-sasl/zookeeper/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-platform-2.12
# 3. Configure zookeeper for Kerberos
RUN yum install -y krb5-workstation krb5-libs
COPY zookeeper.properties /etc/kafka/zookeeper.properties
COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf
EXPOSE 2181
ENV KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf
CMD zookeeper-server-start /etc/kafka/zookeeper.properties
================================================
FILE: kerberos-multi-sasl/zookeeper/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: kerberos-multi-sasl/zookeeper/zookeeper.properties
================================================
dataDir=/var/lib/zookeeper
clientPort=2181
maxClientCnxns=0
authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
zookeeper.allowSaslFailedClients=false
requireClientAuthScheme=sasl
================================================
FILE: kerberos-multi-sasl/zookeeper/zookeeper.sasl.jaas.config
================================================
Server {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
keyTab="/var/lib/secret/zookeeper.key"
storeKey=true
useTicketCache=false
principal="zookeeper/zookeeper.kerberos-demo.local@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useTicketCache=true;
};
================================================
FILE: kraft/none/docker-compose.yml
================================================
---
version: '3'
services:
kafka-controller-1:
build: ./image/kafka-images/kafka/
hostname: kafka-controller-1
container_name: kafka-controller-1
environment:
KAFKA_LISTENERS: CONTROLLER://:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,OUTSIDE:PLAINTEXT
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_PROCESS_ROLES: controller
KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka-controller-1:9092,2@kafka-controller-2:9092,3@kafka-controller-3:9092
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
ports:
- 9093:9093
kafka-controller-2:
build: ./image/kafka-images/kafka/
hostname: kafka-controller-2
container_name: kafka-controller-2
environment:
KAFKA_LISTENERS: CONTROLLER://:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,OUTSIDE:PLAINTEXT
KAFKA_PROCESS_ROLES: controller
KAFKA_NODE_ID: 2
KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka-controller-1:9092,2@kafka-controller-2:9092,3@kafka-controller-3:9092
KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
ports:
- 9094:9094
kafka-controller-3:
build: ./image/kafka-images/kafka/
hostname: kafka-controller-3
container_name: kafka-controller-3
environment:
KAFKA_LISTENERS: CONTROLLER://:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,OUTSIDE:PLAINTEXT
KAFKA_PROCESS_ROLES: controller
KAFKA_NODE_ID: 3
KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka-controller-1:9092,2@kafka-controller-2:9092,3@kafka-controller-3:9092
KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
ports:
- 9095:9095
kafka-1:
build: ./image/kafka-images/kafka/
hostname: kafka-1
container_name: kafka-1
environment:
KAFKA_BROKER_ID: 11
KAFKA_LISTENERS: INTERNAL://:9092,OUTSIDE://:9091
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka-1:9092,OUTSIDE://localhost:9091
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT,CONTROLLER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: kafka-1
KAFKA_PROCESS_ROLES: broker
KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka-controller-1:9092,2@kafka-controller-2:9092,3@kafka-controller-3:9092
KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
ports:
- 9091:9091
kafka-2:
build: ./image/kafka-images/kafka/
hostname: kafka-2
container_name: kafka-2
environment:
KAFKA_BROKER_ID: 12
KAFKA_LISTENERS: INTERNAL://:9092,OUTSIDE://:9090
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka-1:9092,OUTSIDE://localhost:9090
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT,CONTROLLER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: kafka-2
KAFKA_PROCESS_ROLES: broker
KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka-controller-1:9092,2@kafka-controller-2:9092,3@kafka-controller-3:9092
KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
ports:
- 9090:9090
kafka-3:
build: ./image/kafka-images/kafka/
hostname: kafka-3
container_name: kafka-3
environment:
KAFKA_BROKER_ID: 13
KAFKA_LISTENERS: INTERNAL://:9092,OUTSIDE://:9089
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka-1:9092,OUTSIDE://localhost:9089
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT,CONTROLLER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: kafka-3
KAFKA_PROCESS_ROLES: broker
KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka-controller-1:9092,2@kafka-controller-2:9092,3@kafka-controller-3:9092
KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
ports:
- 9089:9089
================================================
FILE: kraft/none/image/kafka-images/kafka/Dockerfile
================================================
#
# Copyright 2019 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ARG DOCKER_UPSTREAM_REGISTRY
ARG DOCKER_UPSTREAM_TAG=7.2.1
FROM ${DOCKER_UPSTREAM_REGISTRY}confluentinc/cp-base-new:${DOCKER_UPSTREAM_TAG}
ARG PROJECT_VERSION
ARG ARTIFACT_ID
ARG GIT_COMMIT
LABEL maintainer="partner-support@confluent.io"
LABEL vendor="Confluent"
LABEL version=$GIT_COMMIT
LABEL release=$PROJECT_VERSION
LABEL name=$ARTIFACT_ID
LABEL summary="Confluent platform Kafka."
LABEL io.confluent.docker=true
LABEL io.confluent.docker.git.id=$GIT_COMMIT
ARG BUILD_NUMBER=-1
LABEL io.confluent.docker.build.number=$BUILD_NUMBER
LABEL io.confluent.docker.git.repo="confluentinc/kafka-images"
ARG CONFLUENT_VERSION=7.2.1
ARG CONFLUENT_PACKAGES_REPO=https://packages.confluent.io/rpm/7.2
ARG CONFLUENT_PLATFORM_LABEL=7.2.1
ENV COMPONENT=kafka
# primary
EXPOSE 9092
USER root
RUN echo "===> Installing ${COMPONENT}..." \
&& echo "===> Adding confluent repository...${CONFLUENT_PACKAGES_REPO}" \
&& rpm --import ${CONFLUENT_PACKAGES_REPO}/archive.key \
&& printf "[Confluent.dist] \n\
name=Confluent repository (dist) \n\
baseurl=${CONFLUENT_PACKAGES_REPO}/\$releasever \n\
gpgcheck=1 \n\
gpgkey=${CONFLUENT_PACKAGES_REPO}/archive.key \n\
enabled=1 \n\
\n\
[Confluent] \n\
name=Confluent repository \n\
baseurl=${CONFLUENT_PACKAGES_REPO}/ \n\
gpgcheck=1 \n\
gpgkey=${CONFLUENT_PACKAGES_REPO}/archive.key \n\
enabled=1 " > /etc/yum.repos.d/confluent.repo \
&& yum install -y confluent-kafka-${CONFLUENT_VERSION} \
&& echo "===> clean up ..." \
&& yum clean all \
&& rm -rf /tmp/* /etc/yum.repos.d/confluent.repo \
&& echo "===> Setting up ${COMPONENT} dirs" \
&& mkdir -p /var/lib/${COMPONENT}/data /etc/${COMPONENT}/secrets \
&& chown appuser:root -R /etc/kafka /var/log/kafka /var/log/confluent /var/lib/kafka /var/lib/zookeeper /etc/${COMPONENT}/secrets /var/lib/${COMPONENT} /etc/${COMPONENT} \
&& chmod -R ug+w /etc/kafka /var/log/kafka /var/log/confluent /var/lib/kafka /var/lib/zookeeper /var/lib/${COMPONENT} /etc/${COMPONENT}/secrets /etc/${COMPONENT}
VOLUME ["/var/lib/${COMPONENT}/data", "/etc/${COMPONENT}/secrets"]
COPY --chown=appuser:appuser include/etc/confluent/docker /etc/confluent/docker
USER appuser
CMD ["/etc/confluent/docker/run"]
================================================
FILE: kraft/none/image/kafka-images/kafka/Dockerfile.ubi8
================================================
#
# Copyright 2019 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ARG DOCKER_UPSTREAM_REGISTRY
ARG DOCKER_UPSTREAM_TAG=7.2.1
FROM ${DOCKER_UPSTREAM_REGISTRY}confluentinc/cp-base-new:${DOCKER_UPSTREAM_TAG}
ARG PROJECT_VERSION
ARG ARTIFACT_ID
ARG GIT_COMMIT
LABEL maintainer="partner-support@confluent.io"
LABEL vendor="Confluent"
LABEL version=$GIT_COMMIT
LABEL release=$PROJECT_VERSION
LABEL name=$ARTIFACT_ID
LABEL summary="Confluent platform Kafka."
LABEL io.confluent.docker=true
LABEL io.confluent.docker.git.id=$GIT_COMMIT
ARG BUILD_NUMBER=-1
LABEL io.confluent.docker.build.number=$BUILD_NUMBER
LABEL io.confluent.docker.git.repo="confluentinc/kafka-images"
ARG CONFLUENT_VERSION=7.2.1
ARG CONFLUENT_PACKAGES_REPO=https://packages.confluent.io/rpm/7.2
ARG CONFLUENT_PLATFORM_LABEL=7.2.1
ENV COMPONENT=kafka
# primary
EXPOSE 9092
USER root
RUN echo "===> Installing ${COMPONENT}..." \
&& echo "===> Adding confluent repository...${CONFLUENT_PACKAGES_REPO}" \
&& rpm --import ${CONFLUENT_PACKAGES_REPO}/archive.key \
&& printf "[Confluent.dist] \n\
name=Confluent repository (dist) \n\
baseurl=${CONFLUENT_PACKAGES_REPO}/\$releasever \n\
gpgcheck=1 \n\
gpgkey=${CONFLUENT_PACKAGES_REPO}/archive.key \n\
enabled=1 \n\
\n\
[Confluent] \n\
name=Confluent repository \n\
baseurl=${CONFLUENT_PACKAGES_REPO}/ \n\
gpgcheck=1 \n\
gpgkey=${CONFLUENT_PACKAGES_REPO}/archive.key \n\
enabled=1 " > /etc/yum.repos.d/confluent.repo \
&& yum install -y confluent-kafka-${CONFLUENT_VERSION} \
&& echo "===> clean up ..." \
&& yum clean all \
&& rm -rf /tmp/* /etc/yum.repos.d/confluent.repo \
&& echo "===> Setting up ${COMPONENT} dirs" \
&& mkdir -p /var/lib/${COMPONENT}/data /etc/${COMPONENT}/secrets \
&& chown appuser:root -R /etc/kafka /var/log/kafka /var/log/confluent /var/lib/kafka /var/lib/zookeeper /etc/${COMPONENT}/secrets /var/lib/${COMPONENT} /etc/${COMPONENT} \
&& chmod -R ug+w /etc/kafka /var/log/kafka /var/log/confluent /var/lib/kafka /var/lib/zookeeper /var/lib/${COMPONENT} /etc/${COMPONENT}/secrets /etc/${COMPONENT}
VOLUME ["/var/lib/${COMPONENT}/data", "/etc/${COMPONENT}/secrets"]
COPY --chown=appuser:appuser include/etc/confluent/docker /etc/confluent/docker
USER appuser
CMD ["/etc/confluent/docker/run"]
================================================
FILE: kraft/none/image/kafka-images/kafka/README.md
================================================
# Confluent Community Docker Image for Apache Kafka
Docker image for deploying and running the Community Version of Kafka packaged with the Confluent Community download. Please see the [cp-server](https://hub.docker.com/r/confluentinc/cp-server) image for additional commercial features that are only part of [Confluent Server](https://docs.confluent.io/platform/current/installation/available_packages.html#confluent-server).
## Using the image
* [Notes on using the image](https://docs.confluent.io/platform/current/installation/docker/installation.html)
* [Configuration Reference](https://docs.confluent.io/platform/current/installation/docker/config-reference.html#confluent-ak-configuration)
## Resources
* [Docker Quick Start for Apache Kafka using Confluent Platform](https://docs.confluent.io/platform/current/quickstart/ce-docker-quickstart.html#ce-docker-quickstart)
* [Learn Kafka](https://developer.confluent.io/learn-kafka)
* [Confluent Developer](https://developer.confluent.io): blogs, tutorials, videos, and podcasts for learning all about Apache Kafka and Confluent Platform
* [confluentinc/cp-demo](https://github.com/confluentinc/cp-demo): GitHub demo that you can run locally. The demo uses this Docker image to showcase Confluent Server in a secured, end-to-end event streaming platform. It has an accompanying playbook that shows users how to use Confluent Control Center to manage and monitor Kafka connect, Schema Registry, REST Proxy, KSQL, and Kafka Streams.
* [confluentinc/examples](https://github.com/confluentinc/examples): additional curated examples in GitHub that you can run locally.
## Contribute
Start by reading our guidelines on contributing to this project found here.
* [Source Code](https://github.com/confluentinc/kafka-images)
* [Issue Tracker](https://github.com/confluentinc/kafka-images/issues)
## License
This Docker image is licensed under the Apache 2 license. For more information on the licenses for each of the individual Confluent Platform components packaged in this image, please refer to the respective [Confluent Platform documentation](https://docs.confluent.io/platform/current/installation/docker/image-reference.html).
================================================
FILE: kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/configure
================================================
#!/usr/bin/env bash
#
# Copyright 2016 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. /etc/confluent/docker/bash-config
dub path /etc/kafka/ writable
if [[ -z "${KAFKA_LOG_DIRS-}" ]]
then
export KAFKA_LOG_DIRS
KAFKA_LOG_DIRS="/var/lib/kafka/data"
fi
# advertised.host, advertised.port, host and port are deprecated. Exit if these properties are set.
if [[ -n "${KAFKA_ADVERTISED_PORT-}" ]]
then
echo "advertised.port is deprecated. Please use KAFKA_ADVERTISED_LISTENERS instead."
exit 1
fi
if [[ -n "${KAFKA_ADVERTISED_HOST-}" ]]
then
echo "advertised.host is deprecated. Please use KAFKA_ADVERTISED_LISTENERS instead."
exit 1
fi
if [[ -n "${KAFKA_HOST-}" ]]
then
echo "host is deprecated. Please use KAFKA_ADVERTISED_LISTENERS instead."
exit 1
fi
if [[ -n "${KAFKA_PORT-}" ]]
then
echo "port is deprecated. Please use KAFKA_ADVERTISED_LISTENERS instead."
exit 1
fi
dub template "/etc/confluent/docker/${COMPONENT}.properties.template" "/etc/${COMPONENT}/${COMPONENT}.properties"
dub template "/etc/confluent/docker/log4j.properties.template" "/etc/${COMPONENT}/log4j.properties"
dub template "/etc/confluent/docker/tools-log4j.properties.template" "/etc/${COMPONENT}/tools-log4j.properties"
================================================
FILE: kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/ensure
================================================
#!/usr/bin/env bash
#
# Copyright 2020 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. /etc/confluent/docker/bash-config
export KAFKA_DATA_DIRS=${KAFKA_DATA_DIRS:-"/var/lib/kafka/data"}
echo "===> Check if $KAFKA_DATA_DIRS is writable ..."
dub path "$KAFKA_DATA_DIRS" writable
================================================
FILE: kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/kafka.properties.template
================================================
{% set excluded_props = ['KAFKA_VERSION',
'KAFKA_HEAP_OPTS'
'KAFKA_LOG4J_OPTS',
'KAFKA_OPTS',
'KAFKA_JMX_OPTS',
'KAFKA_JVM_PERFORMANCE_OPTS',
'KAFKA_GC_LOG_OPTS',
'KAFKA_LOG4J_ROOT_LOGLEVEL',
'KAFKA_LOG4J_LOGGERS',
'KAFKA_TOOLS_LOG4J_LOGLEVEL',
'KAFKA_ZOOKEEPER_CLIENT_CNXN_SOCKET']
-%}
{# properties that don't fit the standard format #}
{% set other_props = {
'KAFKA_ZOOKEEPER_CLIENT_CNXN_SOCKET' : 'zookeeper.clientCnxnSocket'
} -%}
{% set kafka_props = env_to_props('KAFKA_', '', exclude=excluded_props) -%}
{% for name, value in kafka_props.items() -%}
{{name}}={{value}}
{% endfor -%}
{% for k, property in other_props.items() -%}
{% if env.get(k) != None -%}
{{property}}={{env[k]}}
{% endif -%}
{% endfor -%}
{% set confluent_support_props = env_to_props('CONFLUENT_SUPPORT_', 'confluent.support.') -%}
{% for name, value in confluent_support_props.items() -%}
{{name}}={{value}}
{% endfor -%}
================================================
FILE: kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/launch
================================================
#!/usr/bin/env bash
#
# Copyright 2016 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Override this section from the script to include the com.sun.management.jmxremote.rmi.port property.
if [ -z "$KAFKA_JMX_OPTS" ]; then
export KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
fi
# The JMX client needs to be able to connect to java.rmi.server.hostname.
# The default for bridged n/w is the bridged IP so you will only be able to connect from another docker container.
# For host n/w, this is the IP that the hostname on the host resolves to.
# If you have more that one n/w configured, hostname -i gives you all the IPs,
# the default is to pick the first IP (or network).
export KAFKA_JMX_HOSTNAME=${KAFKA_JMX_HOSTNAME:-$(hostname -i | cut -d" " -f1)}
if [ "$KAFKA_JMX_PORT" ]; then
# This ensures that the "if" section for JMX_PORT in kafka launch script does not trigger.
export JMX_PORT=$KAFKA_JMX_PORT
export KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Djava.rmi.server.hostname=$KAFKA_JMX_HOSTNAME -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT -Dcom.sun.management.jmxremote.port=$JMX_PORT"
fi
echo "===> Launching ${COMPONENT} ... "
cat /etc/"${COMPONENT}"/"${COMPONENT}".properties
kafka-storage format -c "/etc/${COMPONENT}/${COMPONENT}.properties" -t Uo9ADuclRIG1r2kcxn4Alw
exec "${COMPONENT}"-server-start /etc/"${COMPONENT}"/"${COMPONENT}".properties
================================================
FILE: kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/log4j.properties.template
================================================
log4j.rootLogger={{ env["KAFKA_LOG4J_ROOT_LOGLEVEL"] | default('INFO') }}, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
{% set loggers = {
'kafka': 'INFO',
'kafka.network.RequestChannel$': 'WARN',
'kafka.producer.async.DefaultEventHandler': 'DEBUG',
'kafka.request.logger': 'WARN',
'kafka.controller': 'TRACE',
'kafka.log.LogCleaner': 'INFO',
'state.change.logger': 'TRACE',
'kafka.authorizer.logger': 'WARN'
} -%}
{% if env['KAFKA_LOG4J_LOGGERS'] %}
{% set loggers = parse_log4j_loggers(env['KAFKA_LOG4J_LOGGERS'], loggers) %}
{% endif %}
{% for logger,loglevel in loggers.items() %}
log4j.logger.{{logger}}={{loglevel}}
{% endfor %}
================================================
FILE: kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/run
================================================
#!/usr/bin/env bash
#
# Copyright 2016 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. /etc/confluent/docker/bash-config
# Set environment values if they exist as arguments
if [ $# -ne 0 ]; then
echo "===> Overriding env params with args ..."
for var in "$@"
do
export "$var"
done
fi
echo "===> User"
id
echo "===> Configuring ..."
/etc/confluent/docker/configure
echo "===> Running preflight checks ... "
/etc/confluent/docker/ensure
echo "===> Launching ... "
exec /etc/confluent/docker/launch
================================================
FILE: kraft/none/image/kafka-images/kafka/include/etc/confluent/docker/tools-log4j.properties.template
================================================
log4j.rootLogger={{ env["KAFKA_TOOLS_LOG4J_LOGLEVEL"] | default('WARN') }}, stderr
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stderr.Target=System.err
================================================
FILE: kraft/none/image/kafka-images/kafka/pom.xml
================================================
4.0.0
io.confluent.kafka-images
kafka-images-parent
7.4.0-0
io.confluent.kafka-images
cp-kafka
Kafka Docker Image
false
true
junit
junit
test
org.apache.maven.plugins
maven-jar-plugin
2.6
none
================================================
FILE: kraft/none/image/kafka-images/kafka/requirements.txt
================================================
git+https://github.com/confluentinc/confluent-docker-utils@v0.0.32
================================================
FILE: kraft/none/image/kafka-images/kafka/setup.py
================================================
import setuptools
setuptools.setup(
name='kafka-tests',
version='0.0.1',
author="Confluent, Inc.",
author_email="core-kafka-eng@confluent.io",
description='Kafka docker image tests',
url="https://github.com/confluentinc/kafka-images",
dependency_links=open("requirements.txt").read().split("\n"),
packages=['test'],
include_package_data=True,
python_requires='>=2.7',
setup_requires=['setuptools-git'],
)
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/cluster-bridged-plain.yml
================================================
---
version: '2'
networks:
zk:
driver: bridge
services:
zookeeper-1:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:32888:33888;zookeeper-3:42888:43888
networks:
- default
- zk
ports:
- 22181:2181
- 22888:2888
- 23888:3888
labels:
- io.confluent.docker.testing=true
zookeeper-2:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 2
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:32888:33888;zookeeper-3:42888:43888
networks:
- default
- zk
ports:
- 32181:2181
- 32888:2888
- 33888:3888
labels:
- io.confluent.docker.testing=true
zookeeper-3:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 3
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:32888:33888;zookeeper-3:42888:43888
networks:
- default
- zk
ports:
- 42181:2181
- 42888:2888
- 43888:3888
labels:
- io.confluent.docker.testing=true
kafka-1:
image: confluentinc/cp-kafka:latest
networks:
- default
- zk
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1:9092
labels:
- io.confluent.docker.testing=true
kafka-2:
image: confluentinc/cp-kafka:latest
networks:
- default
- zk
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-2:9092
labels:
- io.confluent.docker.testing=true
kafka-3:
image: confluentinc/cp-kafka:latest
networks:
- default
- zk
environment:
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-3:9092
labels:
- io.confluent.docker.testing=true
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/cluster-bridged-sasl.yml
================================================
---
version: '2'
networks:
zk:
driver: bridge
services:
zookeeper-1:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:32888:33888;zookeeper-3:42888:43888
networks:
- default
- zk
ports:
- 22181:2181
- 22888:2888
- 23888:3888
labels:
- io.confluent.docker.testing=true
zookeeper-2:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 2
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:32888:33888;zookeeper-3:42888:43888
networks:
- default
- zk
ports:
- 32181:2181
- 32888:2888
- 33888:3888
labels:
- io.confluent.docker.testing=true
zookeeper-3:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 3
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:32888:33888;zookeeper-3:42888:43888
networks:
- default
- zk
ports:
- 42181:2181
- 42888:2888
- 43888:3888
labels:
- io.confluent.docker.testing=true
kerberos:
image: confluentinc/cp-kerberos
networks:
- default
- zk
environment:
BOOTSTRAP: 0
volumes:
- /tmp/kafka-cluster-bridge-test/secrets:/tmp/keytab
- /dev/urandom:/dev/random
labels:
- io.confluent.docker.testing=true
kafka-sasl-ssl-1:
image: confluentinc/cp-kafka:latest
hostname: kafka-sasl-ssl-1
networks:
- default
- zk
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/saslssl
KAFKA_ADVERTISED_LISTENERS: SASL_SSL://kafka-sasl-ssl-1:9094
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker1.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker1_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker1_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker1.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker1_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_SSL
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: GSSAPI
KAFKA_SASL_ENABLED_MECHANISMS: GSSAPI
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
KAFKA_LOG4J_ROOT_LOGLEVEL: DEBUG
ZOOKEEPER_SASL_ENABLED: 'FALSE'
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/bridged_broker1_jaas.conf
-Djava.security.krb5.conf=/etc/kafka/secrets/bridged_krb.conf -Dsun.net.spi.nameservice.provider.1=sun
-Dsun.security.krb5.debug=true
volumes:
- /tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
kafka-sasl-ssl-2:
image: confluentinc/cp-kafka:latest
hostname: kafka-sasl-ssl-2
networks:
- default
- zk
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/saslssl
KAFKA_ADVERTISED_LISTENERS: SASL_SSL://kafka-sasl-ssl-2:9094
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker2.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker2_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker2_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker2.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker2_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_SSL
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: GSSAPI
KAFKA_SASL_ENABLED_MECHANISMS: GSSAPI
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
KAFKA_LOG4J_ROOT_LOGLEVEL: DEBUG
ZOOKEEPER_SASL_ENABLED: 'FALSE'
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/bridged_broker2_jaas.conf
-Djava.security.krb5.conf=/etc/kafka/secrets/bridged_krb.conf -Dsun.net.spi.nameservice.provider.1=sun
-Dsun.security.krb5.debug=true
volumes:
- /tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
kafka-sasl-ssl-3:
image: confluentinc/cp-kafka:latest
hostname: kafka-sasl-ssl-3
networks:
- default
- zk
environment:
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/saslssl
KAFKA_ADVERTISED_LISTENERS: SASL_SSL://kafka-sasl-ssl-3:9094
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker3.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker3_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker3_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker3.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker3_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_SSL
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: GSSAPI
KAFKA_SASL_ENABLED_MECHANISMS: GSSAPI
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
KAFKA_OG4J_ROOT_LOGLEVEL: DEBUG
ZOOKEEPER_SASL_ENABLED: 'FALSE'
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/bridged_broker3_jaas.conf
-Djava.security.krb5.conf=/etc/kafka/secrets/bridged_krb.conf -Dsun.net.spi.nameservice.provider.1=sun
-Dsun.security.krb5.debug=true
volumes:
- /tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/cluster-bridged-ssl.yml
================================================
---
version: '2'
networks:
zk:
driver: bridge
services:
zookeeper-1:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:32888:33888;zookeeper-3:42888:43888
networks:
- default
- zk
ports:
- 22181:2181
- 22888:2888
- 23888:3888
labels:
- io.confluent.docker.testing=true
zookeeper-2:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 2
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:32888:33888;zookeeper-3:42888:43888
networks:
- default
- zk
ports:
- 32181:2181
- 32888:2888
- 33888:3888
labels:
- io.confluent.docker.testing=true
zookeeper-3:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 3
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: zookeeper-1:2888:3888;zookeeper-2:32888:33888;zookeeper-3:42888:43888
networks:
- default
- zk
ports:
- 42181:2181
- 42888:2888
- 43888:3888
labels:
- io.confluent.docker.testing=true
kafka-ssl-1:
image: confluentinc/cp-kafka:latest
networks:
- default
- zk
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/ssl
KAFKA_ADVERTISED_LISTENERS: SSL://kafka-ssl-1:9093
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker1.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker1_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker1_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker1.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker1_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SSL
volumes:
- /tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
kafka-ssl-2:
image: confluentinc/cp-kafka:latest
networks:
- default
- zk
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/ssl
KAFKA_ADVERTISED_LISTENERS: SSL://kafka-ssl-2:9093
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker2.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker2_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker2_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker2.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker2_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SSL
volumes:
- /tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
kafka-ssl-3:
image: confluentinc/cp-kafka:latest
networks:
- default
- zk
environment:
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/ssl
KAFKA_ADVERTISED_LISTENERS: SSL://kafka-ssl-3:9093
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker3.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker3_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker3_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker3.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker3_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SSL
volumes:
- /tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/cluster-host-plain.yml
================================================
---
version: '2'
services:
zookeeper-1:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 22181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888
network_mode: host
labels:
- io.confluent.docker.testing=true
zookeeper-2:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 2
ZOOKEEPER_CLIENT_PORT: 32181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888
network_mode: host
labels:
- io.confluent.docker.testing=true
zookeeper-3:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 3
ZOOKEEPER_CLIENT_PORT: 42181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888
network_mode: host
labels:
- io.confluent.docker.testing=true
kafka-1:
image: confluentinc/cp-kafka:latest
network_mode: host
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:19092
labels:
- io.confluent.docker.testing=true
kafka-2:
image: confluentinc/cp-kafka:latest
network_mode: host
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:29092
labels:
- io.confluent.docker.testing=true
kafka-3:
image: confluentinc/cp-kafka:latest
network_mode: host
environment:
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:39092
labels:
- io.confluent.docker.testing=true
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/cluster-host-sasl.yml
================================================
---
version: '2'
services:
zookeeper-sasl-1:
image: confluentinc/cp-zookeeper:latest
# This is required because Zookeeper can fail if kerberos is still initializing.
restart: on-failure:3
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 22181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: sasl.kafka.com:22888:23888;sasl.kafka.com:32888:33888;sasl.kafka.com:42888:43888
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/host_zookeeper_1_jaas.conf
-Djava.security.krb5.conf=/etc/kafka/secrets/host_krb.conf
-Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
-Dsun.security.krb5.debug=true
volumes:
- /tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets
network_mode: host
labels:
- io.confluent.docker.testing=true
zookeeper-sasl-2:
image: confluentinc/cp-zookeeper:latest
# This is required because Zookeeper can fail if kerberos is still initializing.
restart: on-failure:3
environment:
ZOOKEEPER_SERVER_ID: 2
ZOOKEEPER_CLIENT_PORT: 32181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: sasl.kafka.com:22888:23888;sasl.kafka.com:32888:33888;sasl.kafka.com:42888:43888
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/host_zookeeper_2_jaas.conf
-Djava.security.krb5.conf=/etc/kafka/secrets/host_krb.conf
-Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
-Dsun.security.krb5.debug=true
volumes:
- /tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets
network_mode: host
labels:
- io.confluent.docker.testing=true
zookeeper-sasl-3:
image: confluentinc/cp-zookeeper:latest
# This is required because Zookeeper can fail if kerberos is still initializing.
restart: on-failure:3
environment:
ZOOKEEPER_SERVER_ID: 3
ZOOKEEPER_CLIENT_PORT: 42181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: sasl.kafka.com:22888:23888;sasl.kafka.com:32888:33888;sasl.kafka.com:42888:43888
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/host_zookeeper_3_jaas.conf
-Djava.security.krb5.conf=/etc/kafka/secrets/host_krb.conf
-Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
-Dsun.security.krb5.debug=true
volumes:
- /tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets
network_mode: host
labels:
- io.confluent.docker.testing=true
kerberos:
image: confluentinc/cp-kerberos
network_mode: host
environment:
BOOTSTRAP: 0
volumes:
- /tmp/kafka-cluster-host-test/secrets:/tmp/keytab
- /dev/urandom:/dev/random
labels:
- io.confluent.docker.testing=true
kafka-sasl-ssl-1:
image: confluentinc/cp-kafka:latest
network_mode: host
# This is required because Kafka can fail if kerberos is still initializing.
restart: on-failure:3
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: sasl.kafka.com:22181,sasl.kafka.com:32181,sasl.kafka.com:42181/saslssl
KAFKA_ADVERTISED_LISTENERS: SASL_SSL://sasl.kafka.com:19094
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker1.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker1_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker1_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker1.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker1_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_SSL
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: GSSAPI
KAFKA_SASL_ENABLED_MECHANISMS: GSSAPI
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
KAFKA_LOG4J_ROOT_LOGLEVEL: DEBUG
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/host_broker1_jaas.conf
-Djava.security.krb5.conf=/etc/kafka/secrets/host_krb.conf
-Dsun.security.krb5.debug=true
volumes:
- /tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
kafka-sasl-ssl-2:
image: confluentinc/cp-kafka:latest
network_mode: host
restart: on-failure:3
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: sasl.kafka.com:22181,sasl.kafka.com:32181,sasl.kafka.com:42181/saslssl
KAFKA_ADVERTISED_LISTENERS: SASL_SSL://sasl.kafka.com:29094
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker2.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker2_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker2_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker2.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker2_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_SSL
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: GSSAPI
KAFKA_SASL_ENABLED_MECHANISMS: GSSAPI
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
KAFKA_LOG4J_ROOT_LOGLEVEL: DEBUG
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/host_broker2_jaas.conf
-Djava.security.krb5.conf=/etc/kafka/secrets/host_krb.conf
-Dsun.security.krb5.debug=true
volumes:
- /tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
kafka-sasl-ssl-3:
image: confluentinc/cp-kafka:latest
network_mode: host
restart: on-failure:3
environment:
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: sasl.kafka.com:22181,sasl.kafka.com:32181,sasl.kafka.com:42181/saslssl
KAFKA_ADVERTISED_LISTENERS: SASL_SSL://sasl.kafka.com:39094
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker3.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker3_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker3_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker3.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker3_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_SSL
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: GSSAPI
KAFKA_SASL_ENABLED_MECHANISMS: GSSAPI
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
KAFKA_LOG4J_ROOT_LOGLEVEL: DEBUG
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/host_broker3_jaas.conf
-Djava.security.krb5.conf=/etc/kafka/secrets/host_krb.conf
-Dsun.security.krb5.debug=true
volumes:
- /tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/cluster-host-ssl.yml
================================================
---
version: '2'
services:
zookeeper-1:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 22181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888
network_mode: host
labels:
- io.confluent.docker.testing=true
zookeeper-2:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 2
ZOOKEEPER_CLIENT_PORT: 32181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888
network_mode: host
labels:
- io.confluent.docker.testing=true
zookeeper-3:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 3
ZOOKEEPER_CLIENT_PORT: 42181
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_INIT_LIMIT: 5
ZOOKEEPER_SYNC_LIMIT: 2
ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888
network_mode: host
labels:
- io.confluent.docker.testing=true
kafka-ssl-1:
image: confluentinc/cp-kafka:latest
network_mode: host
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181/ssl
KAFKA_ADVERTISED_LISTENERS: SSL://localhost:19093
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker1.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker1_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker1_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker1.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker1_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SSL
volumes:
- /tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
kafka-ssl-2:
image: confluentinc/cp-kafka:latest
network_mode: host
environment:
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181/ssl
KAFKA_ADVERTISED_LISTENERS: SSL://localhost:29093
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker2.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker2_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker2_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker2.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker2_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SSL
volumes:
- /tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
kafka-ssl-3:
image: confluentinc/cp-kafka:latest
network_mode: host
environment:
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181/ssl
KAFKA_ADVERTISED_LISTENERS: SSL://localhost:39093
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker3.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker3_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker3_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker3.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker3_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SSL
volumes:
- /tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged.consumer.ssl.config
================================================
group.id=ssl-bridged
ssl.truststore.location=/etc/kafka/secrets/kafka.consumer.truststore.jks
ssl.truststore.password=confluent
ssl.keystore.location=/etc/kafka/secrets/kafka.consumer.keystore.jks
ssl.keystore.password=confluent
ssl.key.password=confluent
security.protocol=SSL
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged.consumer.ssl.sasl.config
================================================
group.id=ssl-sasl-bridged
ssl.truststore.location=/etc/kafka/secrets/kafka.consumer.truststore.jks
ssl.truststore.password=confluent
ssl.keystore.location=/etc/kafka/secrets/kafka.consumer.keystore.jks
ssl.keystore.password=confluent
ssl.key.password=confluent
security.protocol=SASL_SSL
sasl.mechanism=GSSAPI
sasl.kerberos.service.name=kafka
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged.producer.ssl.config
================================================
ssl.truststore.location=/etc/kafka/secrets/kafka.producer.truststore.jks
ssl.truststore.password=confluent
ssl.keystore.location=/etc/kafka/secrets/kafka.producer.keystore.jks
ssl.keystore.password=confluent
ssl.key.password=confluent
security.protocol=SSL
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged.producer.ssl.sasl.config
================================================
ssl.truststore.location=/etc/kafka/secrets/kafka.producer.truststore.jks
ssl.truststore.password=confluent
ssl.keystore.location=/etc/kafka/secrets/kafka.producer.keystore.jks
ssl.keystore.password=confluent
ssl.key.password=confluent
security.protocol=SASL_SSL
sasl.mechanism=GSSAPI
sasl.kerberos.service.name=kafka
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_broker1_jaas.conf
================================================
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/bridged_broker1.keytab"
principal="kafka/kafka-sasl-ssl-1@TEST.CONFLUENT.IO";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/bridged_broker1.keytab"
principal="kafka/kafka-sasl-ssl-1@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_broker2_jaas.conf
================================================
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/bridged_broker2.keytab"
principal="kafka/kafka-sasl-ssl-2@TEST.CONFLUENT.IO";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/bridged_broker2.keytab"
principal="kafka/kafka-sasl-ssl-2@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_broker3_jaas.conf
================================================
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/bridged_broker3.keytab"
principal="kafka/kafka-sasl-ssl-3@TEST.CONFLUENT.IO";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/bridged_broker3.keytab"
principal="kafka/kafka-sasl-ssl-3@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_consumer_jaas.conf
================================================
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/bridged_consumer.keytab"
principal="bridged_consumer/kafka-sasl-ssl-consumer@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_krb.conf
================================================
[logging]
default = FILE:/var/log/kerberos/krb5libs.log
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmind.log
[libdefaults]
default_realm = TEST.CONFLUENT.IO
dns_lookup_realm = false
dns_lookup_kdc = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
# WARNING: We use weaker key types to simplify testing as stronger key types
# require the enhanced security JCE policy file to be installed. You should
# NOT run with this configuration in production or any real environment. You
# have been warned.
default_tkt_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
default_tgs_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
permitted_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
[realms]
TEST.CONFLUENT.IO = {
kdc = kerberos
admin_server = kerberos
}
[domain_realm]
.test.confluent.io = TEST.CONFLUENT.IO
test.confluent.io = TEST.CONFLUENT.IO
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/bridged_producer_jaas.conf
================================================
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/bridged_producer.keytab"
principal="bridged_producer/kafka-sasl-ssl-producer@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker1-ca1-signed.crt
================================================
-----BEGIN CERTIFICATE-----
MIIC0jCCAjsCCQC4Ge6Xmxv2ajANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj
YTEudGVzdC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNP
TkZMVUVOVDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMB4XDTE2MDcw
OTE4MTQyOVoXDTQzMTEyNDE4MTQyOVowdDELMAkGA1UEBhMCVVMxCzAJBgNVBAgT
AkNhMREwDwYDVQQHEwhQYWxvQWx0bzESMBAGA1UEChMJQ09ORkxVRU5UMQ0wCwYD
VQQLEwRURVNUMSIwIAYDVQQDExlicm9rZXIxLnRlc3QuY29uZmx1ZW50LmlvMIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoxhAcghALuWHLZtKFjSKoA6z
EUE5djS4iudQ7hHLV68JReMWJR4fO2OfsZHKNo8uIzF836SgH0ZYepmt0PtRLspi
kKmSwlJQjBmB9/JSOnUuWX53DxWaIKZUaB/OwdxPgo3qpLXciGwOffip68loo7XG
bhYStZfDCiDw2w+N7Px93a9xA0ZNgWHFsal8qoMLg3V0xW3BkS+jcjsPud2muXOq
E1a93P/40ZkgfoGkFHvh+HJITXhVtRDIoJJMJO6UFr7jfqnhvC07nDVCJjCIOl7C
ebid3e2gplBwsyBeL9ulc6EfMa/URaAFvGMkIy0Qkcr9hlPHPcNvnglVvISocQID
AQABMA0GCSqGSIb3DQEBBQUAA4GBADXwLG913lSI05RqyT0Ph/mtA4NyfPUkOnuJ
JxJzHWzp+G68QjmWMGycN6fqN1MosjZtu9/p4Z5Rjx2ywJCOO4wcOteLuIvJkfHm
4gB+NvzajUZ1YIg/LD09TPSYXmPX5juR/zxMaChcZigGTpADnpoLfsyydUNSLw3L
boKeqh+v
-----END CERTIFICATE-----
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker1_keystore_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker1_sslkey_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker1_truststore_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker2-ca1-signed.crt
================================================
-----BEGIN CERTIFICATE-----
MIIC0jCCAjsCCQC4Ge6Xmxv2azANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj
YTEudGVzdC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNP
TkZMVUVOVDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMB4XDTE2MDcw
OTE4MTQzM1oXDTQzMTEyNDE4MTQzM1owdDELMAkGA1UEBhMCVVMxCzAJBgNVBAgT
AkNhMREwDwYDVQQHEwhQYWxvQWx0bzESMBAGA1UEChMJQ09ORkxVRU5UMQ0wCwYD
VQQLEwRURVNUMSIwIAYDVQQDExlicm9rZXIyLnRlc3QuY29uZmx1ZW50LmlvMIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAud6NF7S4nl4USma/Dvxq0Ftx
1OiuGZVKw5qo3KL+v87GP2Uyoqqu7YsB/dFpfl8j49My3Bo4Jy5g60oGgHEYh0lj
0vb0NqNKUSu4/7jNHuN4hGnplPMTtshlrU0VaDCKM6M1IHJYsGzcpsEoVmjA5NaT
zq9WKKXzdbM/5n0NHvvnESyR9Ug1RKuEPAGKxqc0AwMHiqWCLb8jHtbjkzf7PuEy
MIAzeb/BcJygqBYvKHqQKtumWQy0YdT5vDl4M7Aywv2p188s5vBEgzCmjCQRjPph
1lLBWka1OGoebmPt5DqWNvtHzXA1Bit2aOA3BxnXN50Jq0MtEq46NGCQ4Up8XQID
AQABMA0GCSqGSIb3DQEBBQUAA4GBAIVf4TckPp7iGjMIx1wJjPv9RSPynF/hljgS
7indUs9vjijLMuMR4E66a/JQiKqZxGUefBvtYX0oGKfRZDO5DgwHx50Kv9Yx6Ux6
VqTq+CFpWJLSsdlNHHbjVYCVjCEFZAUX/Y4ULVCNihYNaLIB+NWNj5jEuemqAnUl
eyM5UwLQ
-----END CERTIFICATE-----
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker2_keystore_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker2_sslkey_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker2_truststore_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker3-ca1-signed.crt
================================================
-----BEGIN CERTIFICATE-----
MIIC0jCCAjsCCQC4Ge6Xmxv2bDANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj
YTEudGVzdC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNP
TkZMVUVOVDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMB4XDTE2MDcw
OTE4MTQzNloXDTQzMTEyNDE4MTQzNlowdDELMAkGA1UEBhMCVVMxCzAJBgNVBAgT
AkNhMREwDwYDVQQHEwhQYWxvQWx0bzESMBAGA1UEChMJQ09ORkxVRU5UMQ0wCwYD
VQQLEwRURVNUMSIwIAYDVQQDExlicm9rZXIzLnRlc3QuY29uZmx1ZW50LmlvMIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAhFw0H4/NksLIyFu+8YnrKr9J
D1iupm5mpavXFiB3UrCUYjBPc+bkXRExFtAnhwB1q7R/E+zJtzlmLl5FojWFSaiv
t2bjugPHs6bZ1D3p0UqouoQf7AgQQNSB+wZoZCp3yiKxI+s1U3NKT6MRbAN5eaUx
1JNo1fV2zVIIvsKFe1Zldt5uSML9OBtcixaJCsPkyGenV1kajkoiHC8UHgzu5obr
9QSOdkRWTgq5LX+gyWhAC4hF+ApA5QGQYT8m5paj5c2YHhpZcTFepRnZSh3fq7Xq
hXPqzQyX6v9Kxii9QaVwY2zwgON09OJ5KF9UK4FPQfZmd4dJEVV4CybhHTP3kQID
AQABMA0GCSqGSIb3DQEBBQUAA4GBANstHkSvQjumHlwQSAaQ4pA6YION0GcY+Lzl
vUIE2DFRwzsV87wFa2sc46XOSpjhUxaYEqtyzHYCaPaZ/n2t07857AqNXJjeJZhW
L/l17cAFdPToP63cpMBQF9deQyhHQTMEMhPKBYg9ym9B3wh2emGSxriD1nhU0cbK
KlEkqnUP
-----END CERTIFICATE-----
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker3_keystore_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker3_sslkey_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/broker3_truststore_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/client-plain.config
================================================
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/config_krb.conf
================================================
[logging]
default = FILE:/var/log/kerberos/krb5libs.log
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmind.log
[libdefaults]
default_realm = TEST.CONFLUENT.IO
dns_lookup_realm = false
dns_lookup_kdc = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
[realms]
TEST.CONFLUENT.IO = {
kdc = kerberos
admin_server = confluent
}
[domain_realm]
.test.confluent.io = TEST.CONFLUENT.IO
test.confluent.io = TEST.CONFLUENT.IO
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/config_server1_jaas.conf
================================================
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/broker1.keytab"
principal="kafka/sasl-ssl-config@TEST.CONFLUENT.IO";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/broker1.keytab"
principal="kafka/sasl-ssl-config@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/consumer-ca1-signed.crt
================================================
-----BEGIN CERTIFICATE-----
MIIC0zCCAjwCCQC4Ge6Xmxv2bjANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj
YTEudGVzdC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNP
TkZMVUVOVDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMB4XDTE2MDcw
OTE4MTQ0MloXDTQzMTEyNDE4MTQ0MlowdTELMAkGA1UEBhMCVVMxCzAJBgNVBAgT
AkNhMREwDwYDVQQHEwhQYWxvQWx0bzESMBAGA1UEChMJQ09ORkxVRU5UMQ0wCwYD
VQQLEwRURVNUMSMwIQYDVQQDExpjb25zdW1lci50ZXN0LmNvbmZsdWVudC5pbzCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJMlGunTtQd2dTY+EPTXMLvO
+QSznU/JdLM0liqBGInJ2J1yC76avKjBhXqbJRA+cYYq7EvBFuaeFAeRPfTXLJYv
67cmVN2QSAmd5jGiyOkP2w3q9WYyhczIQLE87NcC0/E3UE9SY25sUsZneJifhJLC
JpEaQS+JSP8yWMwyGm67ccTIHanvGoha0s2aP97BhTqxAarBzSjW/IDO4r5yCwPJ
dAbiI00rnt7zgeLwjzJBrvpzYbp4IhAQy8dnPeZ3PbL4qz+tyJECONn7lYIFFHMX
SVw9GZe4KGnCyhnY/t4aKXRSTk8mBEjIybhfXV/mEE94z+KOAwUWV+uXWLQRUDUC
AwEAATANBgkqhkiG9w0BAQUFAAOBgQBi9i8ynwSosbyP5FvX1wAHs3QYrD/izFxj
d8W8STg7/TJbb8jJwl5ievUASiH0rvHrorEHs3Vyijc5W6nLoAL+KLjUYlNyd2b+
78YYxgUFMFAzvrXJ1oFOWTkDb66LGWCj75z6hKPX2U33PY5+A7YSNMnjYlqQBSgN
sorDym1cfQ==
-----END CERTIFICATE-----
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/consumer_keystore_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/consumer_sslkey_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/consumer_truststore_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/create-certs.sh
================================================
#!/bin/bash
set -o nounset \
-o errexit \
-o verbose \
-o xtrace
# Generate CA key
openssl req -new -x509 -keyout snakeoil-ca-1.key -out snakeoil-ca-1.crt -days 365 -subj '/CN=ca1.test.confluent.io/OU=TEST/O=CONFLUENT/L=PaloAlto/S=Ca/C=US' -passin pass:confluent -passout pass:confluent
# openssl req -new -x509 -keyout snakeoil-ca-2.key -out snakeoil-ca-2.crt -days 365 -subj '/CN=ca2.test.confluent.io/OU=TEST/O=CONFLUENT/L=PaloAlto/S=Ca/C=US' -passin pass:confluent -passout pass:confluent
# Kafkacat
openssl genrsa -des3 -passout "pass:confluent" -out kafkacat.client.key 1024
openssl req -passin "pass:confluent" -passout "pass:confluent" -key kafkacat.client.key -new -out kafkacat.client.req -subj '/CN=kafkacat.test.confluent.io/OU=TEST/O=CONFLUENT/L=PaloAlto/S=Ca/C=US'
openssl x509 -req -CA snakeoil-ca-1.crt -CAkey snakeoil-ca-1.key -in kafkacat.client.req -out kafkacat-ca1-signed.pem -days 9999 -CAcreateserial -passin "pass:confluent"
for i in broker1 broker2 broker3 producer consumer
do
echo $i
# Create keystores
keytool -genkey -noprompt \
-alias $i \
-dname "CN=$i.test.confluent.io, OU=TEST, O=CONFLUENT, L=PaloAlto, S=Ca, C=US" \
-keystore kafka.$i.keystore.jks \
-keyalg RSA \
-storepass confluent \
-keypass confluent
# Create CSR, sign the key and import back into keystore
keytool -keystore kafka.$i.keystore.jks -alias $i -certreq -file $i.csr -storepass confluent -keypass confluent
openssl x509 -req -CA snakeoil-ca-1.crt -CAkey snakeoil-ca-1.key -in $i.csr -out $i-ca1-signed.crt -days 9999 -CAcreateserial -passin pass:confluent
keytool -keystore kafka.$i.keystore.jks -alias CARoot -import -file snakeoil-ca-1.crt -storepass confluent -keypass confluent
keytool -keystore kafka.$i.keystore.jks -alias $i -import -file $i-ca1-signed.crt -storepass confluent -keypass confluent
# Create truststore and import the CA cert.
keytool -keystore kafka.$i.truststore.jks -alias CARoot -import -file snakeoil-ca-1.crt -storepass confluent -keypass confluent
echo "confluent" > ${i}_sslkey_creds
echo "confluent" > ${i}_keystore_creds
echo "confluent" > ${i}_truststore_creds
done
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host.consumer.ssl.config
================================================
group.id=ssl-host
ssl.truststore.location=/etc/kafka/secrets/kafka.consumer.truststore.jks
ssl.truststore.password=confluent
ssl.keystore.location=/etc/kafka/secrets/kafka.consumer.keystore.jks
ssl.keystore.password=confluent
ssl.key.password=confluent
security.protocol=SSL
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host.consumer.ssl.sasl.config
================================================
group.id=ssl-sasl-host
ssl.truststore.location=/etc/kafka/secrets/kafka.consumer.truststore.jks
ssl.truststore.password=confluent
ssl.keystore.location=/etc/kafka/secrets/kafka.consumer.keystore.jks
ssl.keystore.password=confluent
ssl.key.password=confluent
security.protocol=SASL_SSL
sasl.mechanism=GSSAPI
sasl.kerberos.service.name=kafka
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host.producer.ssl.config
================================================
ssl.truststore.location=/etc/kafka/secrets/kafka.producer.truststore.jks
ssl.truststore.password=confluent
ssl.keystore.location=/etc/kafka/secrets/kafka.producer.keystore.jks
ssl.keystore.password=confluent
ssl.key.password=confluent
security.protocol=SSL
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host.producer.ssl.sasl.config
================================================
ssl.truststore.location=/etc/kafka/secrets/kafka.producer.truststore.jks
ssl.truststore.password=confluent
ssl.keystore.location=/etc/kafka/secrets/kafka.producer.keystore.jks
ssl.keystore.password=confluent
ssl.key.password=confluent
security.protocol=SASL_SSL
sasl.mechanism=GSSAPI
sasl.kerberos.service.name=kafka
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_broker1_jaas.conf
================================================
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/host_broker1.keytab"
principal="kafka/sasl.kafka.com@TEST.CONFLUENT.IO";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/host_broker1.keytab"
principal="kafka/sasl.kafka.com@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/zkclient-host-1.keytab"
principal="zkclient/sasl.kafka.com@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_broker2_jaas.conf
================================================
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/host_broker2.keytab"
principal="kafka/sasl.kafka.com@TEST.CONFLUENT.IO";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/host_broker2.keytab"
principal="kafka/sasl.kafka.com@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/zkclient-host-2.keytab"
principal="zkclient/sasl.kafka.com@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_broker3_jaas.conf
================================================
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/host_broker3.keytab"
principal="kafka/sasl.kafka.com@TEST.CONFLUENT.IO";
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/host_broker3.keytab"
principal="kafka/sasl.kafka.com@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/zkclient-host-3.keytab"
principal="zkclient/sasl.kafka.com@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_consumer_jaas.conf
================================================
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/host_consumer.keytab"
principal="host_consumer/sasl.kafka.com@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_krb.conf
================================================
[logging]
default = FILE:/var/log/kerberos/krb5libs.log
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmind.log
[libdefaults]
default_realm = TEST.CONFLUENT.IO
dns_lookup_realm = false
dns_lookup_kdc = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
[realms]
TEST.CONFLUENT.IO = {
kdc = localhost
admin_server = localhost
}
[domain_realm]
.TEST.CONFLUENT.IO = TEST.CONFLUENT.IO
TEST.CONFLUENT.IO = TEST.CONFLUENT.IO
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_producer_jaas.conf
================================================
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/host_producer.keytab"
principal="host_producer/sasl.kafka.com@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_zookeeper_1_jaas.conf
================================================
Server {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/zookeeper-host-1.keytab"
principal="zookeeper/sasl.kafka.com@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/zkclient-host-1.keytab"
principal="zkclient/sasl.kafka.com@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_zookeeper_2_jaas.conf
================================================
Server {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/zookeeper-host-1.keytab"
principal="zookeeper/sasl.kafka.com@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/zkclient-host-1.keytab"
principal="zkclient/sasl.kafka.com@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/host_zookeeper_3_jaas.conf
================================================
Server {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/zookeeper-host-1.keytab"
principal="zookeeper/sasl.kafka.com@TEST.CONFLUENT.IO";
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka/secrets/zkclient-host-1.keytab"
principal="zkclient/sasl.kafka.com@TEST.CONFLUENT.IO";
};
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafkacat-ca1-signed.pem
================================================
-----BEGIN CERTIFICATE-----
MIICQjCCAasCCQC4Ge6Xmxv2aTANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj
YTEudGVzdC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNP
TkZMVUVOVDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMB4XDTE2MDcw
OTE4MTQyOFoXDTQzMTEyNDE4MTQyOFowaDEjMCEGA1UEAxMaa2Fma2FjYXQudGVz
dC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNPTkZMVUVO
VDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMIGfMA0GCSqGSIb3DQEB
AQUAA4GNADCBiQKBgQDGtDOf/EYZY08D82ehsAITjLprXDMGnfuiXxdsiZyqCIxc
JPM6gKtxzU8DnkWTY5xEnWxjIwDjQGXwBCnXaNBq7kgBL3P13rtnX34ZQar49NX3
6RR8IUTM5HxDzxOkmg3aZ2dkKWZU5B1VRTZzWA7mxQEZMPjV8DrhHUa3XdWH7wID
AQABMA0GCSqGSIb3DQEBBQUAA4GBAJHd35NpxhDY43LtmHMqGdObBaiUBuB7jai4
QRzdq7J+bafQ28sIjXo03lV7YRMd9r0gPBXhWymHH838xJh7TnbpHHyJ/CBjVnqG
Mc+cDTMudNWXOrayYeN1WkF/ufP+gJfRl084Lg8BKFaKntRIW/kG/1CniJYRs/JD
amRxX3iB
-----END CERTIFICATE-----
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/kafkacat.client.key
================================================
-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,F8EEC4C2219A7603
Jh4EwEWHP6DwOImmfwjgUODepVQ9RoBD5PZLngTuCMGI8xrWbINGi1ejd3H+61Tx
bwSirRCwKHqr0JVqvaA8VxZEeIleQ+ZqRASEUhnvGU2gTJ7sEF6KD+4i5n4pKBOQ
0/F0rIHLB/slongr1JlTP7F22XC3DXkHsmsjtTvkqAPXh4tgM5SGwlE8sYKqL4Py
moSQBN2+yGsjP96Mg+mskzvSTucKrCJ9cXnTbCQfimgxk9kjNzcrVZN51r5fiQ+Z
0x0muyQh/G9gvDKP4xq6PAUopUNlzPutVm0rcY7SIE3Q0WK65AfMvbLYPulnAJua
JJVYGjF0y/Zze4p7hfJUqzfto5+bwfD/AgwUOzCIpPcARKb7LsYoZNJqqkcuXc00
XQ8evsA/DF9DWXmWNpE6LOqAe5k38c+NYMC7gIYltoFwPr4tq71jkpJfZ+QetzOf
TuxuYjcXf8FHx12JgOybeglFjK85aZ2nDgI+F5yUx7jvp/Qkpo4tJ/uXhr2D4m9U
Q+bcas1PJDv+aYRa9pV09hkHTKb0wlnk3r1Fs0lNjyTKhBqhwrhDwAuaw2uj2tgA
ulX3zHp1vNzTecrDdsUNn99xDLls40Uqh3Wlsg56ck8i18DLRre4fyg6Rk33n369
hobAb1fujIvBpdtzGbeqvo6YGfqb77JVLrr/f3wt62A0ocoYpm7YbD0RFYKC14Tz
ogLii/59KtIaKIeCc/eHs+WiXoyoLqVjMk4/+TXUrKBsuOwxVeOk1FUlJuJpmo5o
3b7ehgVXprn0xO8lC9xjMIWhbRUl6scE6LyYiUyX12XBAif2pMhV9w==
-----END RSA PRIVATE KEY-----
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/krb_server.conf
================================================
[logging]
default = FILE:/var/log/kerberos/krb5libs.log
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmind.log
[libdefaults]
default_realm = TEST.CONFLUENT.IO
dns_lookup_realm = false
dns_lookup_kdc = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
udp_preference_limit = 1000000
# WARNING: We use weaker key types to simplify testing as stronger key types
# require the enhanced security JCE policy file to be installed. You should
# NOT run with this configuration in production or any real environment. You
# have been warned.
default_tkt_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
default_tgs_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
permitted_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1
[realms]
TEST.CONFLUENT.IO = {
kdc = kerberos
admin_server = kerberos
}
[domain_realm]
.TEST.CONFLUENT.IO = TEST.CONFLUENT.IO
TEST.CONFLUENT.IO = TEST.CONFLUENT.IO
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/producer-ca1-signed.crt
================================================
-----BEGIN CERTIFICATE-----
MIIC0zCCAjwCCQC4Ge6Xmxv2bTANBgkqhkiG9w0BAQUFADBjMR4wHAYDVQQDExVj
YTEudGVzdC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNP
TkZMVUVOVDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMB4XDTE2MDcw
OTE4MTQzOVoXDTQzMTEyNDE4MTQzOVowdTELMAkGA1UEBhMCVVMxCzAJBgNVBAgT
AkNhMREwDwYDVQQHEwhQYWxvQWx0bzESMBAGA1UEChMJQ09ORkxVRU5UMQ0wCwYD
VQQLEwRURVNUMSMwIQYDVQQDExpwcm9kdWNlci50ZXN0LmNvbmZsdWVudC5pbzCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5M+oKxIzpuCLM3/O/RFTjn
mkdKAvygCkKDrzLvQHaVUQhWtObUOyzxObk+mVj9SS8K1HpGwX88USdQqHuGBsrZ
5OHUU9yS5TJ3J+wNR8Wf/ki84Z/tM8NCKv9MxCnSoy6s9Wk4Lk8S1lvAp9sPQhaX
Y9d55z+j3LNKsa8YJqX6XHJc3XtyFMshY2LsFS/s3YPGYl54tVVvGQJ7qxf7cVhI
/ISH1LVfkhA4XO6KSGf/mBg2XTkcGDO5kHVAnmIfGeZ45B17HAy4UI2LMM0Q7xKm
mgFDMWpdV47JeM2bfP3wNMSREfkepjyZk5PTvevYZJpQJ7/U841RPIkCgE6CCNkC
AwEAATANBgkqhkiG9w0BAQUFAAOBgQAMVY7TqXsMXnoVb1aWwmNruOKfAlubS/sQ
4tfxyY1SMfhBYCRR+ZxlGrXY0GmKfzRUjjaH+8rwHn6WRpI3Qk7IHIU5LO+3jrKh
3DNraRokKBFz35TBmkEJY7Xc5KOzRA3g5739TvDXwKPNtOsI41GpbOl3HSBjnEGG
01c4XSxmeQ==
-----END CERTIFICATE-----
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/producer-ssl.config
================================================
security.protocol=SSL
ssl.truststore.location=/Users/sumit/code/confluent/cp-docker/security/kafka.producer.truststore.jks
ssl.truststore.password=confluent
ssl.keystore.location=/Users/sumit/code/confluent/cp-docker/security/kafka.producer.keystore.jks
ssl.keystore.password=confluent
ssl.key.password=confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/producer_keystore_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/producer_sslkey_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/producer_truststore_creds
================================================
confluent
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/snakeoil-ca-1.crt
================================================
-----BEGIN CERTIFICATE-----
MIIDDTCCAnagAwIBAgIJAPgq7cn3Z8iiMA0GCSqGSIb3DQEBBQUAMGMxHjAcBgNV
BAMTFWNhMS50ZXN0LmNvbmZsdWVudC5pbzENMAsGA1UECxMEVEVTVDESMBAGA1UE
ChMJQ09ORkxVRU5UMREwDwYDVQQHEwhQYWxvQWx0bzELMAkGA1UEBhMCVVMwHhcN
MTYwNzA5MTgxNDI4WhcNMTcwNzA5MTgxNDI4WjBjMR4wHAYDVQQDExVjYTEudGVz
dC5jb25mbHVlbnQuaW8xDTALBgNVBAsTBFRFU1QxEjAQBgNVBAoTCUNPTkZMVUVO
VDERMA8GA1UEBxMIUGFsb0FsdG8xCzAJBgNVBAYTAlVTMIGfMA0GCSqGSIb3DQEB
AQUAA4GNADCBiQKBgQDt72SR2FPC1HWqghQO8DNlxjPnqgW6RJhDLiA8+iLGPVYc
MoBtsxMtMoWVx7WUciOWO2Az/v92J7QPhO8KkdRIrv4yTRTd/sYPA5Ky4P19Rc3l
Zr+iWB73EBkliVzEkXkC5mS9Qsx83bl32+d0fMk/GccKlgtJ5Ramf0RNB9a5EwID
AQABo4HIMIHFMB0GA1UdDgQWBBR4t05HUOhx4JSsfEF5l6PJ8/pzzzCBlQYDVR0j
BIGNMIGKgBR4t05HUOhx4JSsfEF5l6PJ8/pzz6FnpGUwYzEeMBwGA1UEAxMVY2Ex
LnRlc3QuY29uZmx1ZW50LmlvMQ0wCwYDVQQLEwRURVNUMRIwEAYDVQQKEwlDT05G
TFVFTlQxETAPBgNVBAcTCFBhbG9BbHRvMQswCQYDVQQGEwJVU4IJAPgq7cn3Z8ii
MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEADLxsXBPIddsnW7nLe3Zb
pvVLSf3cwF6SUewgBvxmrcRbuVynsODJc6P0UU8+z6JyjqcYqBoyuupVksBPa1aJ
wtB/5YCRmXnz1Af2P2NrlpSs6R4uJuCd47OuGhgoA4TPTDgDt3j9zDncCh2e6S2A
r8koy4uaoj7cNQMV6OZxgjE=
-----END CERTIFICATE-----
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/secrets/snakeoil-ca-1.key
================================================
-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,1A482A57BA230AD3
B5XZ6aSWAYrUgxLtLB+WlLFRN1EpZohUlKUr5kmEDsr2gaa2ELMuxda6ie84uNoQ
pb5PI0TKhdSf2YY0XBHHxPlZy/1LpRTi0typVE5JSsOOc8EqHE/hTas9C6+/Ipw1
bEybyBdxdvFyY5M7CIhKeEV2FXZnq+enIUi1iiMMmDRMw22VJRXcI+e+MUALUsD/
hcJk+5njYao6Cf8l+zTGl3Yxq9/mPqgQwfYxdTwkRk9P73TL7X8YC+BH1N1efJqS
P6xC0viH2xgrg3j6X0zjGGPRwLYONxtzDrCxJgZOoPKdPGLPtjkxN2JHH40kv0QC
91gdoDnd2n2L4de1hT8T54vwmh8UwGrYARykt+lIMUjZl/Nd715e6uAXQZYyxM4g
g2gHDXwp5EKcLsQdJI7CejEWgUnHQUOPXc2JQHXknmHTclgK2OBjgrFKLAQQIpjN
hbcf4MTOhhPqE9QztnNH9i37rqv+SWyhiSKWODpuruaT9+gXnUAZx7bViNKfC0zB
yQRxozCEr99CaOvZwClizDsz9kCMWbCMw5g9ISyIHm3oMf+sPwZQVT7i+z9+lGDO
jEp9Aoj+n6ywqVfVZAjE3JzvyX84jtX8K7X608JFAtG05wf0Fmb9QwrtL0gOL7C2
UuDcKYin2tlOX5JQ2CxWlElA4sCNMufvsYL492CYo+gZ0FxVptyu/r3swCujqKqd
IyYIgHiNRbgWeZ2QEom9GFWxVnWaUW7807tk+p++3tnYyhQY/lA35cmv2RK6jYR8
QXv9Vc4159fLEPwxBgHC2njgXU2USN+XvNI3n98KUtmNRF0S7a+npQ==
-----END RSA PRIVATE KEY-----
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/standalone-config.yml
================================================
---
version: '2'
services:
zookeeper:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_TICK_TIME: 2000
ZOOKEEPER_CLIENT_PORT: 2181
labels:
- io.confluent.docker.testing=true
failing-config:
image: confluentinc/cp-kafka:latest
labels:
- io.confluent.docker.testing=true
failing-config-zk-connect:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
labels:
- io.confluent.docker.testing=true
failing-config-adv-listeners:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: something
labels:
- io.confluent.docker.testing=true
failing-config-adv-port:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://foo:9092
KAFKA_ADVERTISED_PORT: kafka
labels:
- io.confluent.docker.testing=true
failing-config-adv-hostname:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://foo:9092
KAFKA_ADVERTISED_HOST: kafka
labels:
- io.confluent.docker.testing=true
failing-config-port:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://foo:9092
KAFKA_PORT: kafka
labels:
- io.confluent.docker.testing=true
failing-config-ssl-keystore:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: SSL://foo:9092
labels:
- io.confluent.docker.testing=true
failing-config-ssl-key-password:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker1.keystore.jks
KAFKA_ADVERTISED_LISTENERS: SSL://foo:9092
volumes:
- /tmp/kafka-config-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
failing-config-ssl-keystore-password:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker1.keystore.jks
KAFKA_SSL_KEY_CREDENTIALS: broker1_sslkey_creds
KAFKA_ADVERTISED_LISTENERS: SSL://foo:9092
volumes:
- /tmp/kafka-config-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
failing-config-ssl-truststore:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: SSL://foo:9092
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker1.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker1_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker1_sslkey_creds
volumes:
- /tmp/kafka-config-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
failing-config-sasl-jaas:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://foo:9092
labels:
- io.confluent.docker.testing=true
failing-config-sasl-missing-prop:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://foo:9092
KAFKA_OPTS: blah
labels:
- io.confluent.docker.testing=true
failing-config-ssl-truststore-password:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: SSL://foo:9092
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker1.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker1_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker1_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker1.truststore.jks
volumes:
- /tmp/kafka-config-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
failing-config-host:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://foo:9092
KAFKA_HOST: kafka
labels:
- io.confluent.docker.testing=true
default-config:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181/defaultconfig
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://default-config:9092
labels:
- io.confluent.docker.testing=true
full-config:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181/fullconfig
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://full-config:9092
KAFKA_LOG4J_LOGGERS: kafka.controller=WARN,kafka.foo.bar=DEBUG
KAFKA_LOG4J_ROOT_LOGLEVEL: WARN
KAFKA_TOOLS_LOG4J_LOGLEVEL: ERROR
labels:
- io.confluent.docker.testing=true
external-volumes:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181/externalvolumes
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://external-volumes:9092
volumes:
- /tmp/kafka-config-test/data:/var/lib/kafka/data
labels:
- io.confluent.docker.testing=true
random-user:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181/randomuser
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://random-user:9092
user: '12345'
labels:
- io.confluent.docker.testing=true
kitchen-sink:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181/kitchensink
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kitchen-sink:9092
CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
CONFLUENT_SUPPORT_CUSTOMER_ID: c0
volumes:
- /tmp/kafka-config-kitchen-sink-test/data:/var/lib/kafka/data
user: '12345'
labels:
- io.confluent.docker.testing=true
ssl-config:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181/sslconfig
KAFKA_ADVERTISED_LISTENERS: SSL://ssl-config:9092
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker1.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker1_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker1_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker1.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker1_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SSL
volumes:
- /tmp/kafka-config-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
kerberos:
image: confluentinc/cp-kerberos
environment:
BOOTSTRAP: 0
volumes:
- /tmp/kafka-config-test/secrets:/tmp/keytab
- /dev/urandom:/dev/random
labels:
- io.confluent.docker.testing=true
sasl-ssl-config:
image: confluentinc/cp-kafka:latest
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181/sslsaslconfig
KAFKA_ADVERTISED_LISTENERS: SSL://sasl-ssl-config:9092,SASL_SSL://sasl-ssl-config:9094
KAFKA_SSL_KEYSTORE_FILENAME: kafka.broker1.keystore.jks
KAFKA_SSL_KEYSTORE_CREDENTIALS: broker1_keystore_creds
KAFKA_SSL_KEY_CREDENTIALS: broker1_sslkey_creds
KAFKA_SSL_TRUSTSTORE_FILENAME: kafka.broker1.truststore.jks
KAFKA_SSL_TRUSTSTORE_CREDENTIALS: broker1_truststore_creds
KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_SSL
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: GSSAPI
KAFKA_SASL_ENABLED_MECHANISMS: GSSAPI
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
ZOOKEEPER_SASL_ENABLED: "FALSE"
KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/config_server1_jaas.conf
-Djava.security.krb5.conf=/etc/kafka/secrets/config_krb.conf
-Dsun.security.krb5.debug=true
volumes:
- /tmp/kafka-config-test/secrets:/etc/kafka/secrets
labels:
- io.confluent.docker.testing=true
================================================
FILE: kraft/none/image/kafka-images/kafka/test/fixtures/standalone-network.yml
================================================
---
version: '2'
networks:
zk:
driver: bridge
services:
zookeeper-bridge:
image: confluentinc/cp-zookeeper:latest
networks:
- zk
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- 22181:2181
- 32888:2888
- 33888:3888
labels:
- io.confluent.docker.testing=true
kafka-bridge:
image: confluentinc/cp-kafka:latest
networks:
- zk
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper-bridge:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:19092
ports:
- 19092:19092
labels:
- io.confluent.docker.testing=true
zookeeper-host:
image: confluentinc/cp-zookeeper:latest
network_mode: host
environment:
ZOOKEEPER_CLIENT_PORT: 32181
ZOOKEEPER_TICK_TIME: 2000
labels:
- io.confluent.docker.testing=true
kafka-host:
image: confluentinc/cp-kafka:latest
network_mode: host
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: localhost:32181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:29092
labels:
- io.confluent.docker.testing=true
kafka-bridged-jmx:
image: confluentinc/cp-kafka:latest
networks:
- zk
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper-bridge:2181/jmx
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:19092
KAFKA_JMX_PORT: 9999
ports:
- 9999:9999
labels:
- io.confluent.docker.testing=true
kafka-host-jmx:
image: confluentinc/cp-kafka:latest
network_mode: host
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: localhost:32181/jmx
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:39092
KAFKA_JMX_PORT: 39999
labels:
- io.confluent.docker.testing=true
================================================
FILE: kraft/none/image/kafka-images/kafka/test/test_kafka.py
================================================
import os
import unittest
import utils
import time
import string
import json
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_DIR = os.path.join(CURRENT_DIR, "fixtures", "debian", "kafka")
HEALTH_CHECK = """bash -c 'cp /etc/kafka/kafka.properties /tmp/cub.properties \
&& echo security.protocol={security_protocol} >> /tmp/cub.properties \
&& cub kafka-ready {brokers} 40 -b {host}:{port} -c /tmp/cub.properties -s {security_protocol}\
&& echo PASS || echo FAIL'
"""
ZK_READY = "bash -c 'cub zk-ready {servers} 40 && echo PASS || echo FAIL'"
KAFKA_CHECK = "bash -c 'kafkacat -L -b {host}:{port} -J' "
KAFKA_SASL_SSL_CHECK = """bash -c "kafkacat -X 'security.protocol=sasl_ssl' \
-X 'ssl.ca.location=/etc/kafka/secrets/snakeoil-ca-1.crt' \
-X 'ssl.certificate.location=/etc/kafka/secrets/kafkacat-ca1-signed.pem' \
-X 'ssl.key.location=/etc/kafka/secrets/kafkacat.client.key' \
-X 'ssl.key.password=confluent' \
-X 'sasl.kerberos.service.name={broker_principal}' \
-X 'sasl.kerberos.keytab=/etc/kafka/secrets/{client_principal}.keytab' \
-X 'sasl.kerberos.principal={client_principal}/{client_host}' \
-L -b {host}:{port} -J "
"""
KAFKA_SSL_CHECK = """kafkacat -X security.protocol=ssl \
-X ssl.ca.location=/etc/kafka/secrets/snakeoil-ca-1.crt \
-X ssl.certificate.location=/etc/kafka/secrets/kafkacat-ca1-signed.pem \
-X ssl.key.location=/etc/kafka/secrets/kafkacat.client.key \
-X ssl.key.password=confluent \
-L -b {host}:{port} -J"""
KADMIN_KEYTAB_CREATE = """bash -c \
'kadmin.local -q "addprinc -randkey {principal}/{hostname}@TEST.CONFLUENT.IO" && \
kadmin.local -q "ktadd -norandkey -k /tmp/keytab/{filename}.keytab {principal}/{hostname}@TEST.CONFLUENT.IO"'
"""
PRODUCER = """bash -c "\
kafka-topics --create --topic {topic} --partitions 1 --replication-factor 3 --if-not-exists --zookeeper $KAFKA_ZOOKEEPER_CONNECT \
&& seq {messages} | kafka-console-producer --broker-list {brokers} --topic {topic} --producer.config /etc/kafka/secrets/{config} \
&& seq {messages} | kafka-console-producer --broker-list {brokers} --topic {topic} --producer.config /etc/kafka/secrets/{config} \
&& echo PRODUCED {messages} messages."
"""
CONSUMER = """bash -c "\
export KAFKA_TOOLS_LOG4J_LOGLEVEL=DEBUG \
&& dub template "/etc/confluent/docker/tools-log4j.properties.template" "/etc/kafka/tools-log4j.properties" \
&& kafka-console-consumer --bootstrap-server {brokers} --topic foo --from-beginning --consumer.config /etc/kafka/secrets/{config} --max-messages {messages}"
"""
KAFKACAT_SSL_CONSUMER = """kafkacat -X security.protocol=ssl \
-X ssl.ca.location=/etc/kafka/secrets/snakeoil-ca-1.crt \
-X ssl.certificate.location=/etc/kafka/secrets/kafkacat-ca1-signed.pem \
-X ssl.key.location=/etc/kafka/secrets/kafkacat.client.key \
-X ssl.key.password=confluent \
-b {brokers} \
-C -t {topic} -c {messages}
"""
PLAIN_CLIENTS = """bash -c "\
export KAFKA_TOOLS_LOG4J_LOGLEVEL=DEBUG \
&& dub template /etc/confluent/docker/tools-log4j.properties.template /etc/kafka/tools-log4j.properties \
&& kafka-topics --create --topic {topic} --partitions 1 --replication-factor 3 --if-not-exists --zookeeper $KAFKA_ZOOKEEPER_CONNECT \
&& seq {messages} | kafka-console-producer --broker-list {brokers} --topic {topic} \
&& echo PRODUCED {messages} messages. \
&& kafka-console-consumer --bootstrap-server {brokers} --topic foo --from-beginning --max-messages {messages}"
"""
JMX_CHECK = """bash -c "\
echo 'get -b kafka.server:id=1,type=app-info Version' |
java -jar jmxterm-1.0-alpha-4-uber.jar -l {jmx_hostname}:{jmx_port} -n -v silent "
"""
class ConfigTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
machine_name = os.environ["DOCKER_MACHINE_NAME"]
cls.machine = utils.TestMachine(machine_name)
# Create directories with the correct permissions for test with userid and external volumes.
cls.machine.ssh("mkdir -p /tmp/kafka-config-kitchen-sink-test/data")
cls.machine.ssh("sudo chown -R 12345 /tmp/kafka-config-kitchen-sink-test/data")
# Copy SSL files.
cls.machine.ssh("mkdir -p /tmp/kafka-config-test/secrets")
local_secrets_dir = os.path.join(FIXTURES_DIR, "secrets")
cls.machine.scp_to_machine(local_secrets_dir, "/tmp/kafka-config-test")
cls.cluster = utils.TestCluster("config-test", FIXTURES_DIR, "standalone-config.yml")
cls.cluster.start()
# Create keytabs
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="broker1", principal="kafka", hostname="sasl-ssl-config"))
assert "PASS" in cls.cluster.run_command_on_service("zookeeper", ZK_READY.format(servers="localhost:2181"))
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
cls.machine.ssh("sudo rm -rf /tmp/kafka-config-kitchen-sink-test")
cls.machine.ssh("sudo rm -rf /tmp/kafka-config-test/secrets")
@classmethod
def is_kafka_healthy_for_service(cls, service, port, num_brokers, host="localhost", security_protocol="PLAINTEXT"):
output = cls.cluster.run_command_on_service(service, HEALTH_CHECK.format(host=host, port=port, brokers=num_brokers, security_protocol=security_protocol))
assert "PASS" in output
def test_required_config_failure(self):
self.assertTrue("KAFKA_ZOOKEEPER_CONNECT is required." in self.cluster.service_logs("failing-config-zk-connect", stopped=True))
self.assertTrue("KAFKA_ADVERTISED_LISTENERS is required." in self.cluster.service_logs("failing-config-adv-listeners", stopped=True))
# Deprecated props.
self.assertTrue("advertised.host is deprecated. Please use KAFKA_ADVERTISED_LISTENERS instead." in self.cluster.service_logs("failing-config-adv-hostname", stopped=True))
self.assertTrue("advertised.port is deprecated. Please use KAFKA_ADVERTISED_LISTENERS instead." in self.cluster.service_logs("failing-config-adv-port", stopped=True))
self.assertTrue("port is deprecated. Please use KAFKA_ADVERTISED_LISTENERS instead." in self.cluster.service_logs("failing-config-port", stopped=True))
self.assertTrue("host is deprecated. Please use KAFKA_ADVERTISED_LISTENERS instead." in self.cluster.service_logs("failing-config-host", stopped=True))
# SSL
self.assertTrue("KAFKA_SSL_KEYSTORE_FILENAME is required." in self.cluster.service_logs("failing-config-ssl-keystore", stopped=True))
self.assertTrue("KAFKA_SSL_KEYSTORE_CREDENTIALS is required." in self.cluster.service_logs("failing-config-ssl-keystore-password", stopped=True))
self.assertTrue("KAFKA_SSL_KEY_CREDENTIALS is required." in self.cluster.service_logs("failing-config-ssl-key-password", stopped=True))
self.assertTrue("KAFKA_SSL_TRUSTSTORE_FILENAME is required." in self.cluster.service_logs("failing-config-ssl-truststore", stopped=True))
self.assertTrue("KAFKA_SSL_TRUSTSTORE_CREDENTIALS is required." in self.cluster.service_logs("failing-config-ssl-truststore-password", stopped=True))
self.assertTrue("KAFKA_OPTS is required." in self.cluster.service_logs("failing-config-sasl-jaas", stopped=True))
self.assertTrue("KAFKA_OPTS should contain 'java.security.auth.login.config' property." in self.cluster.service_logs("failing-config-sasl-missing-prop", stopped=True))
def test_default_config(self):
self.is_kafka_healthy_for_service("default-config", 9092, 1)
props = self.cluster.run_command_on_service("default-config", "bash -c 'cat /etc/kafka/kafka.properties | sort'")
expected = """
advertised.listeners=PLAINTEXT://default-config:9092
listeners=PLAINTEXT://0.0.0.0:9092
log.dirs=/var/lib/kafka/data
zookeeper.connect=zookeeper:2181/defaultconfig
"""
self.assertEquals(props.translate(None, string.whitespace), expected.translate(None, string.whitespace))
logs = utils.run_docker_command(
image="confluentinc/cp-kafkacat",
command=KAFKA_CHECK.format(host="default-config", port=9092),
host_config={'NetworkMode': 'config-test_default'})
parsed_logs = json.loads(logs)
expected_brokers = [{"id": 1001, "name": "default-config:9092"}]
self.assertEquals(sorted(expected_brokers), sorted(parsed_logs["brokers"]))
def test_default_logging_config(self):
self.is_kafka_healthy_for_service("default-config", 9092, 1)
log4j_props = self.cluster.run_command_on_service("default-config", "cat /etc/kafka/log4j.properties")
expected_log4j_props = """log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.logger.kafka.authorizer.logger=WARN
log4j.logger.kafka.log.LogCleaner=INFO
log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG
log4j.logger.kafka.controller=TRACE
log4j.logger.kafka.network.RequestChannel$=WARN
log4j.logger.kafka.request.logger=WARN
log4j.logger.state.change.logger=TRACE
log4j.logger.kafka=INFO
"""
self.assertEquals(log4j_props.translate(None, string.whitespace), expected_log4j_props.translate(None, string.whitespace))
tools_log4j_props = self.cluster.run_command_on_service("default-config", "cat /etc/kafka/tools-log4j.properties")
expected_tools_log4j_props = """log4j.rootLogger=WARN, stderr
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stderr.Target=System.err
"""
self.assertEquals(tools_log4j_props.translate(None, string.whitespace), expected_tools_log4j_props.translate(None, string.whitespace))
def test_full_config(self):
self.is_kafka_healthy_for_service("full-config", 9092, 1)
props = self.cluster.run_command_on_service("full-config", "bash -c 'cat /etc/kafka/kafka.properties | sort'")
expected = """
advertised.listeners=PLAINTEXT://full-config:9092
broker.id=1
listeners=PLAINTEXT://0.0.0.0:9092
log.dirs=/var/lib/kafka/data
zookeeper.connect=zookeeper:2181/fullconfig
"""
self.assertEquals(props.translate(None, string.whitespace), expected.translate(None, string.whitespace))
def test_full_logging_config(self):
self.is_kafka_healthy_for_service("full-config", 9092, 1)
log4j_props = self.cluster.run_command_on_service("full-config", "cat /etc/kafka/log4j.properties")
expected_log4j_props = """log4j.rootLogger=WARN, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.logger.kafka.authorizer.logger=WARN
log4j.logger.kafka.log.LogCleaner=INFO
log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG
log4j.logger.kafka.controller=WARN
log4j.logger.kafka.network.RequestChannel$=WARN
log4j.logger.kafka.request.logger=WARN
log4j.logger.state.change.logger=TRACE
log4j.logger.kafka.foo.bar=DEBUG
log4j.logger.kafka=INFO
"""
self.assertEquals(log4j_props.translate(None, string.whitespace), expected_log4j_props.translate(None, string.whitespace))
tools_log4j_props = self.cluster.run_command_on_service("full-config", "cat /etc/kafka/tools-log4j.properties")
expected_tools_log4j_props = """log4j.rootLogger=ERROR, stderr
log4j.appender.stderr=org.apache.log4j.ConsoleAppender
log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
log4j.appender.stderr.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stderr.Target=System.err
"""
self.assertEquals(tools_log4j_props.translate(None, string.whitespace), expected_tools_log4j_props.translate(None, string.whitespace))
def test_volumes(self):
self.is_kafka_healthy_for_service("external-volumes", 9092, 1)
def test_random_user(self):
self.is_kafka_healthy_for_service("random-user", 9092, 1)
def test_kitchen_sink(self):
self.is_kafka_healthy_for_service("kitchen-sink", 9092, 1)
zk_props = self.cluster.run_command_on_service("kitchen-sink", "bash -c 'cat /etc/kafka/kafka.properties | sort'")
expected = """
advertised.listeners=PLAINTEXT://kitchen-sink:9092
broker.id=1
confluent.support.customer.id=c0
confluent.support.metrics.enable=false
listeners=PLAINTEXT://0.0.0.0:9092
log.dirs=/var/lib/kafka/data
zookeeper.connect=zookeeper:2181/kitchensink
"""
self.assertEquals(zk_props.translate(None, string.whitespace), expected.translate(None, string.whitespace))
def test_ssl_config(self):
self.is_kafka_healthy_for_service("ssl-config", 9092, 1, "ssl-config", "SSL")
zk_props = self.cluster.run_command_on_service("ssl-config", "bash -c 'cat /etc/kafka/kafka.properties | sort'")
expected = """
advertised.listeners=SSL://ssl-config:9092
broker.id=1
listeners=SSL://0.0.0.0:9092
log.dirs=/var/lib/kafka/data
security.inter.broker.protocol=SSL
ssl.key.credentials=broker1_sslkey_creds
ssl.key.password=confluent
ssl.keystore.credentials=broker1_keystore_creds
ssl.keystore.filename=kafka.broker1.keystore.jks
ssl.keystore.location=/etc/kafka/secrets/kafka.broker1.keystore.jks
ssl.keystore.password=confluent
ssl.truststore.credentials=broker1_truststore_creds
ssl.truststore.filename=kafka.broker1.truststore.jks
ssl.truststore.location=/etc/kafka/secrets/kafka.broker1.truststore.jks
ssl.truststore.password=confluent
zookeeper.connect=zookeeper:2181/sslconfig
"""
self.assertEquals(zk_props.translate(None, string.whitespace), expected.translate(None, string.whitespace))
def test_sasl_config(self):
self.is_kafka_healthy_for_service("sasl-ssl-config", 9094, 1, "sasl-ssl-config", "SASL_SSL")
zk_props = self.cluster.run_command_on_service("sasl-ssl-config", "bash -c 'cat /etc/kafka/kafka.properties | sort'")
expected = """
advertised.listeners=SSL://sasl-ssl-config:9092,SASL_SSL://sasl-ssl-config:9094
broker.id=1
listeners=SSL://0.0.0.0:9092,SASL_SSL://0.0.0.0:9094
log.dirs=/var/lib/kafka/data
sasl.enabled.mechanisms=GSSAPI
sasl.kerberos.service.name=kafka
sasl.mechanism.inter.broker.protocol=GSSAPI
security.inter.broker.protocol=SASL_SSL
ssl.key.credentials=broker1_sslkey_creds
ssl.key.password=confluent
ssl.keystore.credentials=broker1_keystore_creds
ssl.keystore.filename=kafka.broker1.keystore.jks
ssl.keystore.location=/etc/kafka/secrets/kafka.broker1.keystore.jks
ssl.keystore.password=confluent
ssl.truststore.credentials=broker1_truststore_creds
ssl.truststore.filename=kafka.broker1.truststore.jks
ssl.truststore.location=/etc/kafka/secrets/kafka.broker1.truststore.jks
ssl.truststore.password=confluent
zookeeper.connect=zookeeper:2181/sslsaslconfig
"""
self.assertEquals(zk_props.translate(None, string.whitespace), expected.translate(None, string.whitespace))
class StandaloneNetworkingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cluster = utils.TestCluster("standalone-network-test", FIXTURES_DIR, "standalone-network.yml")
cls.cluster.start()
assert "PASS" in cls.cluster.run_command_on_service("zookeeper-bridge", ZK_READY.format(servers="localhost:2181"))
assert "PASS" in cls.cluster.run_command_on_service("zookeeper-host", ZK_READY.format(servers="localhost:32181"))
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
@classmethod
def is_kafka_healthy_for_service(cls, service, port, num_brokers, host="localhost", security_protocol="PLAINTEXT"):
output = cls.cluster.run_command_on_service(service, HEALTH_CHECK.format(host=host, port=port, brokers=num_brokers, security_protocol=security_protocol))
assert "PASS" in output
def test_bridged_network(self):
# Test from within the container
self.is_kafka_healthy_for_service("kafka-bridge", 19092, 1)
# Test from outside the container
logs = utils.run_docker_command(
image="confluentinc/cp-kafkacat",
command=KAFKA_CHECK.format(host="localhost", port=19092),
host_config={'NetworkMode': 'host'})
parsed_logs = json.loads(logs)
self.assertEquals(1, len(parsed_logs["brokers"]))
self.assertEquals(1, parsed_logs["brokers"][0]["id"])
self.assertEquals("localhost:19092", parsed_logs["brokers"][0]["name"])
def test_host_network(self):
# Test from within the container
self.is_kafka_healthy_for_service("kafka-host", 29092, 1)
# Test from outside the container
logs = utils.run_docker_command(
image="confluentinc/cp-kafkacat",
command=KAFKA_CHECK.format(host="localhost", port=29092),
host_config={'NetworkMode': 'host'})
parsed_logs = json.loads(logs)
self.assertEquals(1, len(parsed_logs["brokers"]))
self.assertEquals(1, parsed_logs["brokers"][0]["id"])
self.assertEquals("localhost:29092", parsed_logs["brokers"][0]["name"])
def test_jmx_host_network(self):
# Test from outside the container
logs = utils.run_docker_command(
image="confluentinc/cp-jmxterm",
command=JMX_CHECK.format(jmx_hostname="localhost", jmx_port="39999"),
host_config={'NetworkMode': 'host'})
self.assertTrue("Version = 0.11.0.0-cp1;" in logs)
def test_jmx_bridged_network(self):
# Test from outside the container
logs = utils.run_docker_command(
image="confluentinc/cp-jmxterm",
command=JMX_CHECK.format(jmx_hostname="kafka-bridged-jmx", jmx_port="9999"),
host_config={'NetworkMode': 'standalone-network-test_zk'})
self.assertTrue("Version = 0.11.0.0-cp1;" in logs)
class ClusterBridgedNetworkTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cluster = utils.TestCluster("cluster-test", FIXTURES_DIR, "cluster-bridged-plain.yml")
cls.cluster.start()
assert "PASS" in cls.cluster.run_command_on_service("zookeeper-1", ZK_READY.format(servers="zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181"))
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
def test_cluster_running(self):
self.assertTrue(self.cluster.is_running())
@classmethod
def is_kafka_healthy_for_service(cls, service, port, num_brokers, host="localhost", security_protocol="PLAINTEXT"):
output = cls.cluster.run_command_on_service(service, HEALTH_CHECK.format(host=host, port=port, brokers=num_brokers, security_protocol=security_protocol))
assert "PASS" in output
def test_bridge_network(self):
# Test from within the container
self.is_kafka_healthy_for_service("kafka-1", 9092, 3)
# Test from outside the container
logs = utils.run_docker_command(
image="confluentinc/cp-kafkacat",
command=KAFKA_CHECK.format(host="kafka-1", port=9092),
host_config={'NetworkMode': 'cluster-test_zk'})
parsed_logs = json.loads(logs)
self.assertEquals(3, len(parsed_logs["brokers"]))
expected_brokers = [{"id": 1, "name": "kafka-1:9092"}, {"id": 2, "name": "kafka-2:9092"}, {"id": 3, "name": "kafka-3:9092"}]
self.assertEquals(sorted(expected_brokers), sorted(parsed_logs["brokers"]))
client_logs = utils.run_docker_command(
300,
image="confluentinc/cp-kafka",
name="kafka-producer",
environment={'KAFKA_ZOOKEEPER_CONNECT': "zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181"},
command=PLAIN_CLIENTS.format(brokers="kafka-1:9092", topic="foo", messages=100),
host_config={'NetworkMode': 'cluster-test_zk'})
self.assertTrue("Processed a total of 100 messages" in client_logs)
class ClusterSSLBridgedNetworkTest(ClusterBridgedNetworkTest):
@classmethod
def setUpClass(cls):
machine_name = os.environ["DOCKER_MACHINE_NAME"]
cls.machine = utils.TestMachine(machine_name)
# Copy SSL files.
print cls.machine.ssh("mkdir -p /tmp/kafka-cluster-bridge-test/secrets")
local_secrets_dir = os.path.join(FIXTURES_DIR, "secrets")
cls.machine.scp_to_machine(local_secrets_dir, "/tmp/kafka-cluster-bridge-test")
cls.cluster = utils.TestCluster("cluster-test", FIXTURES_DIR, "cluster-bridged-ssl.yml")
cls.cluster.start()
assert "PASS" in cls.cluster.run_command_on_service("zookeeper-1", ZK_READY.format(servers="zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181"))
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
cls.machine.ssh("sudo rm -rf /tmp/kafka-cluster-bridge-test/secrets")
def test_bridge_network(self):
# Test from within the container
self.is_kafka_healthy_for_service("kafka-ssl-1", 9093, 3, "kafka-ssl-1", "SSL")
# Test from outside the container
logs = utils.run_docker_command(
image="confluentinc/cp-kafkacat",
command=KAFKA_SSL_CHECK.format(host="kafka-ssl-1", port=9093),
host_config={'NetworkMode': 'cluster-test_zk', 'Binds': ['/tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets']})
parsed_logs = json.loads(logs)
self.assertEquals(3, len(parsed_logs["brokers"]))
expected_brokers = [{"id": 1, "name": "kafka-ssl-1:9093"}, {"id": 2, "name": "kafka-ssl-2:9093"}, {"id": 3, "name": "kafka-ssl-3:9093"}]
self.assertEquals(sorted(expected_brokers), sorted(parsed_logs["brokers"]))
producer_logs = utils.run_docker_command(
300,
image="confluentinc/cp-kafka",
name="kafka-ssl-bridged-producer",
environment={'KAFKA_ZOOKEEPER_CONNECT': "zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/ssl"},
command=PRODUCER.format(brokers="kafka-ssl-1:9093", topic="foo", config="bridged.producer.ssl.config", messages=100),
host_config={'NetworkMode': 'cluster-test_zk', 'Binds': ['/tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets']})
self.assertTrue("PRODUCED 100 messages" in producer_logs)
consumer_logs = utils.run_docker_command(
300,
image="confluentinc/cp-kafkacat",
name="kafkacat-ssl-bridged-consumer",
command=KAFKACAT_SSL_CONSUMER.format(brokers="kafka-ssl-1:9093", topic="foo", messages=10),
host_config={'NetworkMode': 'cluster-test_zk', 'Binds': ['/tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets']})
self.assertEquals("\n".join([str(i + 1) for i in xrange(10)]), consumer_logs.strip())
class ClusterSASLBridgedNetworkTest(ClusterBridgedNetworkTest):
@classmethod
def setUpClass(cls):
machine_name = os.environ["DOCKER_MACHINE_NAME"]
cls.machine = utils.TestMachine(machine_name)
# Copy SSL files.
print cls.machine.ssh("mkdir -p /tmp/kafka-cluster-bridge-test/secrets")
local_secrets_dir = os.path.join(FIXTURES_DIR, "secrets")
cls.machine.scp_to_machine(local_secrets_dir, "/tmp/kafka-cluster-bridge-test")
cls.cluster = utils.TestCluster("cluster-test", FIXTURES_DIR, "cluster-bridged-sasl.yml")
cls.cluster.start()
# Create keytabs
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="bridged_broker1", principal="kafka", hostname="kafka-sasl-ssl-1"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="bridged_broker2", principal="kafka", hostname="kafka-sasl-ssl-2"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="bridged_broker3", principal="kafka", hostname="kafka-sasl-ssl-3"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="bridged_kafkacat", principal="bridged_kafkacat", hostname="bridged-kafkacat"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="bridged_producer", principal="bridged_producer", hostname="kafka-sasl-ssl-producer"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="bridged_consumer", principal="bridged_consumer", hostname="kafka-sasl-ssl-consumer"))
assert "PASS" in cls.cluster.run_command_on_service("zookeeper-1", ZK_READY.format(servers="zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181"))
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
cls.machine.ssh("sudo rm -rf /tmp/kafka-cluster-bridge-test/secrets")
def test_bridge_network(self):
# Test from within the container
self.is_kafka_healthy_for_service("kafka-sasl-ssl-1", 9094, 3, "kafka-sasl-ssl-1", "SASL_SSL")
# FIXME: Figure out how to use kafkacat with SASL/Kerberos
# Test from outside the container
# logs = utils.run_docker_command(
# image="confluentinc/cp-kafkacat",
# name="bridged-kafkacat",
# command=KAFKA_SASL_SSL_CHECK.format(host="kafka-sasl-ssl-1", port=9094, broker_principal="kafka", client_principal="bridged_kafkacat", client_host="bridged-kafkacat"),
# host_config={'NetworkMode': 'cluster-test_zk', 'Binds': ['/tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets', '/tmp/kafka-cluster-bridge-test/secrets/bridged_krb.conf:/etc/krb5.conf']})
#
# parsed_logs = json.loads(logs)
# self.assertEquals(3, len(parsed_logs["brokers"]))
# expected_brokers = [{"id": 1, "name": "kafka-sasl-ssl-1:9094"}, {"id": 2, "name": "kafka-sasl-ssl-2:9094"}, {"id": 3, "name": "kafka-sasl-ssl-3:9094"}]
# self.assertEquals(sorted(expected_brokers), sorted(parsed_logs["brokers"]))
producer_env = {'KAFKA_ZOOKEEPER_CONNECT': "zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/saslssl",
'KAFKA_OPTS': "-Djava.security.auth.login.config=/etc/kafka/secrets/bridged_producer_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/bridged_krb.conf -Dsun.net.spi.nameservice.provider.1=sun -Dsun.security.krb5.debug=true"}
producer_logs = utils.run_docker_command(
300,
image="confluentinc/cp-kafka",
name="kafka-sasl-ssl-bridged-producer",
environment=producer_env,
command=PRODUCER.format(brokers="kafka-sasl-ssl-1:9094", topic="foo", config="bridged.producer.ssl.sasl.config", messages=100),
host_config={'NetworkMode': 'cluster-test_zk', 'Binds': ['/tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets']})
self.assertTrue("PRODUCED 100 messages" in producer_logs)
consumer_env = {'KAFKA_ZOOKEEPER_CONNECT': "zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181/saslssl",
'KAFKA_OPTS': "-Djava.security.auth.login.config=/etc/kafka/secrets/bridged_consumer_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/bridged_krb.conf -Dsun.net.spi.nameservice.provider.1=sun -Dsun.security.krb5.debug=true"}
consumer_logs = utils.run_docker_command(
300,
image="confluentinc/cp-kafka",
name="kafka-sasl-ssl-bridged-consumer",
environment=consumer_env,
command=CONSUMER.format(brokers="kafka-sasl-ssl-1:9094", topic="foo", config="bridged.consumer.ssl.sasl.config", messages=10),
host_config={'NetworkMode': 'cluster-test_zk', 'Binds': ['/tmp/kafka-cluster-bridge-test/secrets:/etc/kafka/secrets']})
self.assertTrue("Processed a total of 10 messages" in consumer_logs)
class ClusterHostNetworkTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cluster = utils.TestCluster("cluster-test", FIXTURES_DIR, "cluster-host-plain.yml")
cls.cluster.start()
assert "PASS" in cls.cluster.run_command_on_service("zookeeper-1", ZK_READY.format(servers="localhost:22181,localhost:32181,localhost:42181"))
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
def test_cluster_running(self):
self.assertTrue(self.cluster.is_running())
@classmethod
def is_kafka_healthy_for_service(cls, service, port, num_brokers, host="localhost", security_protocol="PLAINTEXT"):
output = cls.cluster.run_command_on_service(service, HEALTH_CHECK.format(host=host, port=port, brokers=num_brokers, security_protocol=security_protocol))
assert "PASS" in output
def test_host_network(self):
# Test from within the container
self.is_kafka_healthy_for_service("kafka-1", 19092, 3)
# Test from outside the container
logs = utils.run_docker_command(
image="confluentinc/cp-kafkacat",
command=KAFKA_CHECK.format(host="localhost", port=19092),
host_config={'NetworkMode': 'host'})
parsed_logs = json.loads(logs)
self.assertEquals(3, len(parsed_logs["brokers"]))
expected_brokers = [{"id": 1, "name": "localhost:19092"}, {"id": 2, "name": "localhost:29092"}, {"id": 3, "name": "localhost:39092"}]
self.assertEquals(sorted(expected_brokers), sorted(parsed_logs["brokers"]))
client_logs = utils.run_docker_command(
300,
image="confluentinc/cp-kafka",
name="kafka-producer",
environment={'KAFKA_ZOOKEEPER_CONNECT': "localhost:22181,localhost:32181,localhost:42181"},
command=PLAIN_CLIENTS.format(brokers="localhost:19092", topic="foo", messages=100),
host_config={'NetworkMode': 'host'})
self.assertTrue("Processed a total of 100 messages" in client_logs)
class ClusterSSLHostNetworkTest(ClusterHostNetworkTest):
@classmethod
def setUpClass(cls):
machine_name = os.environ["DOCKER_MACHINE_NAME"]
cls.machine = utils.TestMachine(machine_name)
# Copy SSL files.
print cls.machine.ssh("mkdir -p /tmp/kafka-cluster-host-test/secrets")
local_secrets_dir = os.path.join(FIXTURES_DIR, "secrets")
cls.machine.scp_to_machine(local_secrets_dir, "/tmp/kafka-cluster-host-test")
cls.cluster = utils.TestCluster("cluster-test", FIXTURES_DIR, "cluster-host-ssl.yml")
cls.cluster.start()
assert "PASS" in cls.cluster.run_command_on_service("zookeeper-1", ZK_READY.format(servers="localhost:22181,localhost:32181,localhost:42181"))
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
cls.machine.ssh("sudo rm -rf /tmp/kafka-cluster-host-test/secrets")
def test_host_network(self):
# Test from within the container
self.is_kafka_healthy_for_service("kafka-ssl-1", 19093, 3, "localhost", "SSL")
# Test from outside the container
logs = utils.run_docker_command(
image="confluentinc/cp-kafkacat",
command=KAFKA_SSL_CHECK.format(host="localhost", port=19093),
host_config={'NetworkMode': 'host', 'Binds': ['/tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets']})
parsed_logs = json.loads(logs)
self.assertEquals(3, len(parsed_logs["brokers"]))
expected_brokers = [{"id": 1, "name": "localhost:19093"}, {"id": 2, "name": "localhost:29093"}, {"id": 3, "name": "localhost:39093"}]
self.assertEquals(sorted(expected_brokers), sorted(parsed_logs["brokers"]))
producer_logs = utils.run_docker_command(
300,
image="confluentinc/cp-kafka",
name="kafka-ssl-host-producer",
environment={'KAFKA_ZOOKEEPER_CONNECT': "localhost:22181,localhost:32181,localhost:42181/ssl"},
command=PRODUCER.format(brokers="localhost:29093", topic="foo", config="host.producer.ssl.config", messages=100),
host_config={'NetworkMode': 'host', 'Binds': ['/tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets']})
self.assertTrue("PRODUCED 100 messages" in producer_logs)
consumer_logs = utils.run_docker_command(
300,
image="confluentinc/cp-kafkacat",
name="kafkacat-ssl-host-consumer",
command=KAFKACAT_SSL_CONSUMER.format(brokers="localhost:29093", topic="foo", messages=10),
host_config={'NetworkMode': 'host', 'Binds': ['/tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets']})
self.assertEquals("\n".join([str(i + 1) for i in xrange(10)]), consumer_logs.strip())
class ClusterSASLHostNetworkTest(ClusterHostNetworkTest):
@classmethod
def setUpClass(cls):
machine_name = os.environ["DOCKER_MACHINE_NAME"]
cls.machine = utils.TestMachine(machine_name)
# Add a hostname mapped to eth0, required for SASL to work predictably.
# localhost and hostname both resolve to 127.0.0.1 in the docker image, so using localhost causes unprodicatable behaviour
# with zkclient
cmd = """
"sudo sh -c 'grep sasl.kafka.com /etc/hosts || echo {IP} sasl.kafka.com >> /etc/hosts'"
""".strip()
cls.machine.ssh(cmd.format(IP=cls.machine.get_internal_ip().strip()))
# Copy SSL files.
cls.machine.ssh("mkdir -p /tmp/kafka-cluster-host-test/secrets")
local_secrets_dir = os.path.join(FIXTURES_DIR, "secrets")
cls.machine.scp_to_machine(local_secrets_dir, "/tmp/kafka-cluster-host-test")
cls.cluster = utils.TestCluster("cluster-test", FIXTURES_DIR, "cluster-host-sasl.yml")
cls.cluster.start()
# Create keytabs
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="host_broker1", principal="kafka", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="host_broker2", principal="kafka", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="host_broker3", principal="kafka", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="host_producer", principal="host_producer", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="host_consumer", principal="host_consumer", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zookeeper-host-1", principal="zookeeper", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zookeeper-host-2", principal="zookeeper", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zookeeper-host-3", principal="zookeeper", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zkclient-host-1", principal="zkclient", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zkclient-host-2", principal="zkclient", hostname="sasl.kafka.com"))
cls.cluster.run_command_on_service("kerberos", KADMIN_KEYTAB_CREATE.format(filename="zkclient-host-3", principal="zkclient", hostname="sasl.kafka.com"))
assert "PASS" in cls.cluster.run_command_on_service("zookeeper-sasl-1", ZK_READY.format(servers="sasl.kafka.com:22181,sasl.kafka.com:32181,sasl.kafka.com:42181"))
@classmethod
def tearDownClass(cls):
cls.cluster.shutdown()
cls.machine.ssh("sudo rm -rf /tmp/kafka-cluster-host-test/secrets")
def test_host_network(self):
# Test from within the container
self.is_kafka_healthy_for_service("kafka-sasl-ssl-1", 19094, 3, "sasl.kafka.com", "SASL_SSL")
producer_env = {'KAFKA_ZOOKEEPER_CONNECT': "sasl.kafka.com:22181,sasl.kafka.com:32181,sasl.kafka.com:42181/saslssl",
'KAFKA_OPTS': "-Djava.security.auth.login.config=/etc/kafka/secrets/host_producer_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/host_krb.conf -Dsun.net.spi.nameservice.provider.1=sun -Dsun.security.krb5.debug=true"}
producer_logs = utils.run_docker_command(
300,
image="confluentinc/cp-kafka",
name="kafka-ssl-sasl-host-producer",
environment=producer_env,
command=PRODUCER.format(brokers="sasl.kafka.com:29094", topic="foo", config="host.producer.ssl.sasl.config", messages=100),
host_config={'NetworkMode': 'host', 'Binds': ['/tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets']})
self.assertTrue("PRODUCED 100 messages" in producer_logs)
consumer_env = {'KAFKA_ZOOKEEPER_CONNECT': "sasl.kafka.com:22181,sasl.kafka.com:32181,sasl.kafka.com:42181/saslssl",
'KAFKA_OPTS': "-Djava.security.auth.login.config=/etc/kafka/secrets/host_consumer_jaas.conf -Djava.security.krb5.conf=/etc/kafka/secrets/host_krb.conf -Dsun.net.spi.nameservice.provider.1=sun -Dsun.security.krb5.debug=true"}
consumer_logs = utils.run_docker_command(
300,
image="confluentinc/cp-kafka",
name="kafka-ssl-sasl-host-consumer",
environment=consumer_env,
command=CONSUMER.format(brokers="sasl.kafka.com:29094", topic="foo", config="host.consumer.ssl.sasl.config", messages=10),
host_config={'NetworkMode': 'host', 'Binds': ['/tmp/kafka-cluster-host-test/secrets:/etc/kafka/secrets']})
self.assertTrue("Processed a total of 10 messages" in consumer_logs)
================================================
FILE: kraft/none/image/kafka-images/kafka/tox.ini
================================================
[tox]
envlist = test
toxworkdir = /var/tmp
[testenv]
# Consolidating all deps here instead of cleanly/separately in test/style/cover so we
# have a single env (platform) to work with, which makes debugging easier (like which env?).
# Not as clean but easier to work with for dev, which is better.
deps =
-rrequirements.txt
flake8
pytest
pytest-xdist
pytest-cov
install_command = pip install -U {packages}
recreate = True
skipsdist = True
usedevelop = True
setenv =
PIP_PROCESS_DEPENDENCY_LINKS=1
PIP_DEFAULT_TIMEOUT=60
ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future
basepython = python3
envdir = {toxworkdir}/confluent
[testenv:test]
commands =
py.test --color=no {env:PYTESTARGS:} test
[testenv:style]
commands =
flake8 --config tox.ini
[testenv:cover]
commands =
py.test {env:PYTESTARGS:} --cov . --cov-report=xml --cov-report=html --cov-report=term test
[flake8]
ignore = E111,E121,W292,E123,E226
max-line-length = 160
[pytest]
addopts = -n 1
================================================
FILE: kraft/none/up
================================================
#!/bin/sh
docker-compose up -d
# Creating the user kafka
# kafka is configured as a super user, no need for additional ACL
# docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-255=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka
echo "Example configuration:"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9092 --topic test"
echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 --topic test --from-beginning"
================================================
FILE: ldap/acls/acls.csv
================================================
KafkaPrincipal,ResourceType,PatternType,ResourceName,Operation,PermissionType,Host
User:kafka,Cluster,LITERAL,kafka-cluster,All,Allow,*
Group:Kafka Developers,Group,LITERAL,*,Read,Allow,*
Group:Kafka Developers,Topic,LITERAL,test-topic,Describe,Allow,*
Group:Kafka Developers,Topic,LITERAL,test-topic,Read,Allow,*
Group:Kafka Developers,Topic,LITERAL,test-topic,Write,Allow,*
Group:Kafka Developers,Topic,LITERAL,test-topic,Create,Allow,*
================================================
FILE: ldap/add-user
================================================
#!/bin/sh
# Creating the users
docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=purbon-secret],SCRAM-SHA-512=[password=purbon-secret]' --entity-type users --entity-name purbon
echo "Should succeed as the new user is in the group"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --topic test-topic --producer.config=/service/kafka/users/purbon.properties"
echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /service/kafka/users/purbon.properties --topic test-topic --from-beginning"
================================================
FILE: ldap/custom/01_base.ldif
================================================
dn: ou=users,{{ LDAP_BASE_DN }}
objectClass: organizationalUnit
ou: Users
dn: ou=groups,{{ LDAP_BASE_DN }}
objectClass: organizationalUnit
ou: Groups
================================================
FILE: ldap/custom/02_KafkaDevelopers.ldif
================================================
dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }}
objectClass: top
objectClass: posixGroup
cn: Kafka Developers
gidNumber: 5000
================================================
FILE: ldap/custom/10_alice.ldif
================================================
dn: cn=alice,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: alice
sn: LookingGlass
givenName: Alice
cn: alice
displayName: Alice LookingGlass
uidNumber: 10000
gidNumber: 5000
userPassword: alice-secret
gecos: alice
loginShell: /bin/bash
homeDirectory: /home/alice
================================================
FILE: ldap/custom/11_barnie.ldif
================================================
dn: cn=barnie,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: barnie
sn: Rubble
givenName: Barnie
cn: barnie
displayName: Barnie Rubble
uidNumber: 10001
gidNumber: 5000
userPassword: barnie-secret
gecos: barnie
loginShell: /bin/bash
homeDirectory: /home/barnie
================================================
FILE: ldap/custom/12_charlie.ldif
================================================
dn: cn=charlie,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: charlie
sn: Sheen
givenName: Charlie
cn: charlie
displayName: Charlie Sheen
uidNumber: 10001
gidNumber: 5000
userPassword: charlie-secret
gecos: charlie
loginShell: /bin/bash
homeDirectory: /home/charlie
================================================
FILE: ldap/custom/20_group_add.ldif
================================================
dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }}
changetype: modify
add: memberuid
memberuid: cn=alice,ou=users,{{ LDAP_BASE_DN }}
dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }}
changetype: modify
add: memberuid
memberuid: cn=barnie,ou=users,{{ LDAP_BASE_DN }}
================================================
FILE: ldap/docker-compose-with-ssl.yaml
================================================
version: '3'
services:
ldap:
image: osixia/openldap:1.2.3
hostname: ldap
container_name: ldap
environment:
LDAP_ORGANISATION: "Confluent"
LDAP_DOMAIN: "confluent.io"
LDAP_TLS: "true"
LDAP_TLS_CRT_FILENAME: my-ldap.crt
LDAP_TLS_KEY_FILENAME: my-ldap.key
LDAP_TLS_CA_CRT_FILENAME: my-ca.crt
LDAP_TLS_VERIFY_CLIENT: demand
ports:
- "389:389"
- "636:636"
volumes:
- "$PWD/ldap/custom:/container/service/slapd/assets/config/bootstrap/ldif/custom"
- "$PWD/ldap/certs:/container/service/slapd/assets/certs"
command: "--copy-service"
zookeeper:
build: zookeeper/
hostname: zookeeper
container_name: zookeeper
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf
kafka:
build: kafka/
container_name: kafka
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
depends_on:
- zookeeper
- ldap
volumes:
- "$PWD/kafka/jks:/etc/kafka/jks"
ports:
- "9093:9093"
command: ["kafka-server-start", "/etc/kafka/server-with-ssl.properties"]
kafka-security-manager:
image: simplesteph/kafka-security-manager:latest
hostname: kafka-security-manager
container_name: kafka-security-manager
environment:
- AUTHORIZER_ZOOKEEPER_CONNECT=zookeeper:2181
- KSM_READONLY=false
- SOURCE_CLASS=com.github.simplesteph.ksm.source.FileSourceAcl
- SOURCE_FILE_FILENAME=/acls/acls.csv
depends_on:
- kafka
volumes:
- "$PWD/acls:/acls"
================================================
FILE: ldap/docker-compose.yaml
================================================
version: '3'
services:
ldap:
image: osixia/openldap:1.3.0
hostname: ldap
container_name: ldap
environment:
LDAP_ORGANISATION: "Confluent"
LDAP_DOMAIN: "confluent.io"
ports:
- "389:389"
- "636:636"
volumes:
- "$PWD/ldap/custom:/container/service/slapd/assets/config/bootstrap/ldif/custom"
command: "--copy-service"
phpldapadmin-service:
image: osixia/phpldapadmin:0.9.0
container_name: ldapadmin-service
environment:
- PHPLDAPADMIN_LDAP_HOSTS=ldap
ports:
- "6444:443"
depends_on:
- ldap
zookeeper:
build: zookeeper/
hostname: zookeeper
container_name: zookeeper
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf
kafka:
build: kafka/
container_name: kafka
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
depends_on:
- zookeeper
- ldap
volumes:
- "$PWD/kafka/users:/service/kafka/users"
- "$PWD/kafka/jks:/etc/kafka/jks"
ports:
- "9093:9093"
command: ["kafka-server-start", "/etc/kafka/server.properties"]
kafka-security-manager:
image: simplesteph/kafka-security-manager:latest
hostname: kafka-security-manager
container_name: kafka-security-manager
environment:
- AUTHORIZER_ZOOKEEPER_CONNECT=zookeeper:2181
- KSM_READONLY=false
- SOURCE_CLASS=com.github.simplesteph.ksm.source.FileSourceAcl
- SOURCE_FILE_FILENAME=/acls/acls.csv
depends_on:
- kafka
volumes:
- "$PWD/acls:/acls"
================================================
FILE: ldap/kafka/Dockerfile
================================================
FROM centos:centos8
MAINTAINER seknop@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-server
RUN yum install -y confluent-security
# 3. Configure Kafka and zookeeper for Kerberos
COPY server.properties /etc/kafka/server.properties
COPY server-with-ssl.properties /etc/kafka/server-with-ssl.properties
COPY kafka.jaas.config /etc/kafka/kafka_server_jaas.conf
COPY log4j.properties /etc/kafka/log4j.properties
COPY alice.properties /etc/kafka/alice.properties
COPY barnie.properties /etc/kafka/barnie.properties
COPY charlie.properties /etc/kafka/charlie.properties
COPY kafka.properties /etc/kafka/kafka.properties
EXPOSE 9093
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: ldap/kafka/alice.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="alice" \
password="alice-secret";
================================================
FILE: ldap/kafka/barnie.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="barnie" \
password="barnie-secret";
================================================
FILE: ldap/kafka/charlie.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="charlie" \
password="charlie-secret";
================================================
FILE: ldap/kafka/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.5/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.5/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.5
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.5/archive.key
enabled=1
================================================
FILE: ldap/kafka/consumer.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka";
================================================
FILE: ldap/kafka/jks/.gitignore
================================================
*.crt
*.csr
*_creds
*.jks
*.srl
*.key
*.pem
*.der
*.p12
================================================
FILE: ldap/kafka/kafka.jaas.config
================================================
KafkaServer {
org.apache.kafka.common.security.scram.ScramLoginModule required
username="kafka"
password="kafka";
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="kafka"
password="kafka";
};
================================================
FILE: ldap/kafka/kafka.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka";
================================================
FILE: ldap/kafka/log4j.properties
================================================
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unspecified loggers and loggers with additivity=true output to server.log and stdout
# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
# Sven is here!
log4j.rootLogger=INFO, stdout, kafkaAppender
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.ldapAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.ldapAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.ldapAppender.File=${kafka.logs.dir}/kafka-ldap.log
log4j.appender.ldapAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.ldapAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
# Change the two lines below to adjust ZK client logging
log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
log4j.logger.org.apache.zookeeper=INFO
# Change the two lines below to adjust the general broker logging level (output to server.log and stdout)
log4j.logger.kafka=INFO
log4j.logger.org.apache.kafka=INFO
# Change to DEBUG or TRACE to enable request logging
log4j.logger.kafka.request.logger=WARN, requestAppender
log4j.additivity.kafka.request.logger=false
# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
# related to the handling of requests
#log4j.logger.kafka.network.Processor=TRACE, requestAppender
#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
#log4j.additivity.kafka.server.KafkaApis=false
log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
log4j.additivity.kafka.network.RequestChannel$=false
log4j.logger.kafka.controller=TRACE, controllerAppender
log4j.additivity.kafka.controller=false
log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
log4j.additivity.kafka.log.LogCleaner=false
log4j.logger.state.change.logger=TRACE, stateChangeAppender
log4j.additivity.state.change.logger=false
# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses
log4j.logger.kafka.authorizer.logger=DEBUG, authorizerAppender
log4j.additivity.kafka.authorizer.logger=false
# Experimental, add logging for LDAP
log4j.logger.io.confluent.kafka.security.ldap.authorizer.LdapGroupManager=TRACE, ldapAppender
================================================
FILE: ldap/kafka/server-with-ssl.properties
================================================
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
############################# Socket Server Settings #############################
# The address the socket server listens on. It will get the value returned from
# java.net.InetAddress.getCanonicalHostName() if not configured.
# FORMAT:
# listeners = listener_name://host_name:port
# EXAMPLE:
# listeners = PLAINTEXT://your.host.name:9092
listeners=SASL_PLAINTEXT://kafka:9093
# Hostname and port the broker will advertise to producers and consumers. If not set,
# it uses the value for "listeners" if configured. Otherwise, it will use the value
# returned from java.net.InetAddress.getCanonicalHostName().
advertised.listeners=SASL_PLAINTEXT://kafka:9093
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
security.inter.broker.protocol=SASL_PLAINTEXT
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
num.network.threads=3
# The number of threads that the server uses for processing requests, which may include disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma separated list of directories under which to store log files
log.dirs=/var/lib/kafka
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended for to ensure availability such as 3.
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
#log.retention.bytes=1073741824
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=zookeeper:2181
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=6000
##################### Confluent Metrics Reporter #######################
# Confluent Control Center and Confluent Auto Data Balancer integration
#
# Uncomment the following lines to publish monitoring data for
# Confluent Control Center and Confluent Auto Data Balancer
# If you are using a dedicated metrics cluster, also adjust the settings
# to point to your metrics kakfa cluster.
#metric.reporters=io.confluent.metrics.reporter.ConfluentMetricsReporter
#confluent.metrics.reporter.bootstrap.servers=localhost:9092
#
# Uncomment the following line if the metrics cluster has a single broker
#confluent.metrics.reporter.topic.replicas=1
##################### Confluent Proactive Support ######################
# If set to true, and confluent-support-metrics package is installed
# then the feature to collect and report support metrics
# ("Metrics") is enabled. If set to false, the feature is disabled.
#
confluent.support.metrics.enable=false
# The customer ID under which support metrics will be collected and
# reported.
#
# When the customer ID is set to "anonymous" (the default), then only a
# reduced set of metrics is being collected and reported.
#
# Confluent customers
# -------------------
# If you are a Confluent customer, then you should replace the default
# value with your actual Confluent customer ID. Doing so will ensure
# that additional support metrics will be collected and reported.
#
confluent.support.customer.id=anonymous
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0
# SASL Configuration
sasl.enabled.mechanisms=SCRAM-SHA-256
sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256
security.inter.broker.protocol=SASL_PLAINTEXT
allow.everyone.if.no.acl.found=false
super.users=User:kafka
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# Configure authorizer
authorizer.class.name=io.confluent.kafka.security.ldap.authorizer.LdapAuthorizer
# LDAP provider URL
ldap.authorizer.java.naming.provider.url=ldaps://ldap:636/DC=CONFLUENT,DC=IO
# Refresh interval for LDAP cache. If set to zero, persistent search is used.
ldap.authorizer.refresh.interval.ms=60000
# Lets see if we can connect with TLS to our LDAP server
ldap.authorizer.java.naming.security.principal=cn=admin,dc=confluent,dc=io
ldap.authorizer.java.naming.security.credentials=admin
ldap.authorizer.java.naming.security.protocol=SSL
ldap.authorizer.ssl.keystore.location=/etc/kafka/jks/ldap.keystore.jks
ldap.authorizer.ssl.keystore.password=confluent
ldap.authorizer.ssl.truststore.location=/etc/kafka/jks/ldap.truststore.jks
ldap.authorizer.ssl.truststore.password=confluent
# Search base for group-based search
#ldap.authorizer.group.search.base=ou=groups,dc=confluent,dc=io
# Remember that LDAP works in a context. The search base is ou=groups,dc=confluent,dc=io
# But since my URL is ldap://ldap:389/DC=CONFLUENT,DC=IO, we are already working in the dc=confluent,dc=io context
ldap.authorizer.group.search.base=ou=groups
# Object class for groups
ldap.authorizer.group.object.class=posixGroup
ldap.authorizer.group.search.scope=2
# Name of the attribute from which group name used in ACLs is obtained
ldap.authorizer.group.name.attribute=cn
# Regex pattern to obtain group name used in ACLs from the attribute `ldap.authorizer.group.name.attribute`
ldap.authorizer.group.name.attribute.pattern=
# Name of the attribute from which group members (user principals) are obtained
ldap.authorizer.group.member.attribute=memberUid
# Regex pattern to obtain user principal from group member attribute
ldap.authorizer.group.member.attribute.pattern=cn=(.*),ou=users,dc=confluent,dc=io
================================================
FILE: ldap/kafka/server.properties
================================================
broker.id=0
listeners=SASL_PLAINTEXT://kafka:9093
advertised.listeners=SASL_PLAINTEXT://kafka:9093
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
default.replication.factor=1
confluent.license.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
zookeeper.connect=zookeeper:2181
# SASL Configuration
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.enabled.mechanisms=SCRAM-SHA-256
sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256
security.inter.broker.protocol=SASL_PLAINTEXT
allow.everyone.if.no.acl.found=false
super.users=User:kafka
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# Configure authorizer
authorizer.class.name=io.confluent.kafka.security.ldap.authorizer.LdapAuthorizer
# LDAP provider URL
ldap.authorizer.java.naming.provider.url=ldap://ldap:389/DC=CONFLUENT,DC=IO
# Refresh interval for LDAP cache. If set to zero, persistent search is used.
# Reduced this value from the default 60000ms (60sec) to 10sec to detect
# faster the updates done in the LDAP database
ldap.authorizer.refresh.interval.ms=10000
# Lets try to see if we can run without security
ldap.authorizer.java.naming.security.authentication=SIMPLE
ldap.authorizer.java.naming.security.principal=cn=admin,dc=confluent,dc=io
ldap.authorizer.java.naming.security.credentials=admin
# Search base for group-based search
#ldap.authorizer.group.search.base=ou=groups,dc=confluent,dc=io
# Remember that LDAP works in a context. The search base is ou=groups,dc=confluent,dc=io
# But since my URL is ldap://ldap:389/DC=CONFLUENT,DC=IO, we are already working in the dc=confluent,dc=io context
ldap.authorizer.group.search.base=ou=groups
# Object class for groups
ldap.authorizer.group.object.class=posixGroup
ldap.authorizer.group.search.scope=2
# Name of the attribute from which group name used in ACLs is obtained
ldap.authorizer.group.name.attribute=cn
# Regex pattern to obtain group name used in ACLs from the attribute `ldap.authorizer.group.name.attribute`
ldap.authorizer.group.name.attribute.pattern=
# Name of the attribute from which group members (user principals) are obtained
ldap.authorizer.group.member.attribute=memberUid
# Regex pattern to obtain user principal from group member attribute
ldap.authorizer.group.member.attribute.pattern=cn=(.*),ou=users,dc=confluent,dc=io
================================================
FILE: ldap/kafka/users/purbon.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="purbon" \
password="purbon-secret";
================================================
FILE: ldap/ldap/certs/.gitignore
================================================
*.crt
*.csr
*_creds
*.jks
*.srl
*.key
*.pem
*.der
*.p12
================================================
FILE: ldap/ldap/custom/01_base.ldif
================================================
dn: ou=users,{{ LDAP_BASE_DN }}
objectClass: organizationalUnit
ou: Users
dn: ou=groups,{{ LDAP_BASE_DN }}
objectClass: organizationalUnit
ou: Groups
================================================
FILE: ldap/ldap/custom/02_KafkaDevelopers.ldif
================================================
dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }}
objectClass: top
objectClass: posixGroup
cn: Kafka Developers
gidNumber: 5000
================================================
FILE: ldap/ldap/custom/10_alice.ldif
================================================
dn: cn=alice,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: alice
sn: LookingGlass
givenName: Alice
cn: alice
displayName: Alice LookingGlass
uidNumber: 10000
gidNumber: 5000
userPassword: alice-secret
gecos: alice
loginShell: /bin/bash
homeDirectory: /home/alice
================================================
FILE: ldap/ldap/custom/11_barnie.ldif
================================================
dn: cn=barnie,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: barnie
sn: Rubble
givenName: Barnie
cn: barnie
displayName: Barnie Rubble
uidNumber: 10001
gidNumber: 5000
userPassword: barnie-secret
gecos: barnie
loginShell: /bin/bash
homeDirectory: /home/barnie
================================================
FILE: ldap/ldap/custom/12_charlie.ldif
================================================
dn: cn=charlie,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: charlie
sn: Sheen
givenName: Charlie
cn: charlie
displayName: Charlie Sheen
uidNumber: 10001
gidNumber: 5000
userPassword: charlie-secret
gecos: charlie
loginShell: /bin/bash
homeDirectory: /home/charlie
================================================
FILE: ldap/ldap/custom/20_group_add.ldif
================================================
dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }}
changetype: modify
add: memberuid
memberuid: cn=alice,ou=users,{{ LDAP_BASE_DN }}
dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }}
changetype: modify
add: memberuid
memberuid: cn=barnie,ou=users,{{ LDAP_BASE_DN }}
================================================
FILE: ldap/scripts/.gitignore
================================================
*.crt
*.csr
*_creds
*.jks
*.srl
*.key
*.pem
*.der
*.p12
================================================
FILE: ldap/scripts/certs-create.sh
================================================
#!/bin/bash
#set -o nounset \
# -o errexit \
# -o verbose \
# -o xtrace
# Cleanup files
rm -f *.crt *.csr *_creds *.jks *.srl *.key *.pem *.der *.p12
# Generate CA key
openssl req -new -x509 -keyout snakeoil-ca-1.key -out snakeoil-ca-1.crt -days 365 -subj '/CN=ca1.test.confluent.io/OU=TEST/O=CONFLUENT/L=PaloAlto/S=Ca/C=US' -passin pass:confluent -passout pass:confluent
for i in kafka ldap
do
echo "------------------------------- $i -------------------------------"
# Create host keystore
keytool -genkey -noprompt \
-alias $i \
-dname "CN=$i,OU=TEST,O=CONFLUENT,L=PaloAlto,S=Ca,C=US" \
-ext "SAN=dns:$i,dns:localhost" \
-keystore kafka.$i.keystore.jks \
-keyalg RSA \
-storepass confluent \
-keypass confluent
# Create the certificate signing request (CSR)
keytool -keystore kafka.$i.keystore.jks -alias $i -certreq -file $i.csr -storepass confluent -keypass confluent -ext "SAN=dns:$i,dns:localhost"
#openssl req -in $i.csr -text -noout
# Sign the host certificate with the certificate authority (CA)
openssl x509 -req -CA snakeoil-ca-1.crt -CAkey snakeoil-ca-1.key -in $i.csr -out $i-ca1-signed.crt -days 9999 -CAcreateserial -passin pass:confluent -extensions v3_req -extfile <(cat < ${i}_sslkey_creds
echo "confluent" > ${i}_keystore_creds
echo "confluent" > ${i}_truststore_creds
# Create pem files and keys used for Schema Registry HTTPS testing
# openssl x509 -noout -modulus -in client.certificate.pem | openssl md5
# openssl rsa -noout -modulus -in client.key | openssl md5
# echo "GET /" | openssl s_client -connect localhost:8085/subjects -cert client.certificate.pem -key client.key -tls1
keytool -export -alias $i -file $i.der -keystore kafka.$i.keystore.jks -storepass confluent
openssl x509 -inform der -in $i.der -out $i.certificate.pem
keytool -importkeystore -srckeystore kafka.$i.keystore.jks -destkeystore $i.keystore.p12 -deststoretype PKCS12 -deststorepass confluent -srcstorepass confluent -noprompt
openssl pkcs12 -in $i.keystore.p12 -nodes -nocerts -out $i.key -passin pass:confluent
done
================================================
FILE: ldap/up
================================================
#!/bin/sh
usage() { echo "Usage: $0 [--ssl] " 1>&2; exit 1; }
ssl=0
while getopts ":s-:" opt; do
case $opt in
-)
case "${OPTARG}" in
ssl)
ssl=1
;;
*)
usage
exit 1
;;
esac;;
*)
usage
exit 1
;;
esac
done
## Select to run with security or not
DOCKER_COMPOSE_FILE="$PWD/docker-compose.yaml"
if [ $ssl -eq 1 ]; then
echo "Running with SSL enabled between the brokers and the LDAP server"
# Generate the certificates
cd scripts
./certs-create.sh
## Copy the necessary broker JKS stores
cp kafka.kafka.keystore.jks ../kafka/jks/ldap.keystore.jks
cp kafka.kafka.truststore.jks ../kafka/jks/ldap.truststore.jks
## copy the LDAP server certificates
cp ldap-ca1-signed.crt ../ldap/certs/my-ldap.crt
cp ldap.key ../ldap/certs/my-ldap.key
cp snakeoil-ca-1.crt ../ldap/certs/my-ca.crt
cd ..
DOCKER_COMPOSE_FILE="$PWD/docker-compose-with-ssl.yaml"
fi
## start docker-compose up to and including kafka
docker-compose -f $DOCKER_COMPOSE_FILE up -d --build kafka
# Creating the users
# kafka is configured as a super user
docker-compose exec kafka kafka-configs --bootstrap-server kafka:9093 --alter --add-config 'SCRAM-SHA-256=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka
docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=alice-secret],SCRAM-SHA-512=[password=alice-secret]' --entity-type users --entity-name alice
docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=barnie-secret],SCRAM-SHA-512=[password=barnie-secret]' --entity-type users --entity-name barnie
docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=charlie-secret],SCRAM-SHA-512=[password=charlie-secret]' --entity-type users --entity-name charlie
docker-compose up -d
echo "Example configuration:"
echo "Should succeed (barnie is in group)"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --topic test-topic --producer.config=/etc/kafka/barnie.properties"
echo "Should fail (charlie is NOT in group)"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --topic test-topic --producer.config=/etc/kafka/charlie.properties"
echo "Should succeed (alice is in group)"
echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /etc/kafka/alice.properties --topic test-topic --from-beginning"
echo "List ACLs"
echo "-> docker-compose exec kafka kafka-acls --bootstrap-server kafka:9093 --list --command-config /etc/kafka/kafka.properties"
================================================
FILE: ldap/zookeeper/Dockerfile
================================================
FROM centos:centos8
MAINTAINER seknop@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-platform-2.12
# 3. Configure Kafka and zookeeper for Kerberos
COPY zookeeper.properties /etc/kafka/zookeeper.properties
COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf
EXPOSE 2181
CMD zookeeper-server-start /etc/kafka/zookeeper.properties
================================================
FILE: ldap/zookeeper/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.5/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.5/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.5
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.5/archive.key
enabled=1
================================================
FILE: ldap/zookeeper/zookeeper.properties
================================================
dataDir=/var/lib/zookeeper
clientPort=2181
maxClientCnxns=0
authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
================================================
FILE: ldap/zookeeper/zookeeper.sasl.jaas.config
================================================
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_kafka="kafka";
};
================================================
FILE: ldap-auth/docker-compose.yaml
================================================
version: '3'
services:
ldap:
image: osixia/openldap:1.3.0
hostname: ldap
container_name: ldap
environment:
LDAP_ORGANISATION: "Confluent"
LDAP_DOMAIN: "confluent.io"
ports:
- "389:389"
- "636:636"
volumes:
- "$PWD/ldap/custom:/container/service/slapd/assets/config/bootstrap/ldif/custom"
command: "--copy-service"
phpldapadmin-service:
image: osixia/phpldapadmin:0.9.0
container_name: ldapadmin-service
environment:
- PHPLDAPADMIN_LDAP_HOSTS=ldap
ports:
- "6444:443"
depends_on:
- ldap
zookeeper:
build: zookeeper/
hostname: zookeeper
container_name: zookeeper
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf
kafka:
build: kafka/
container_name: kafka
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
depends_on:
- zookeeper
- ldap
volumes:
- "$PWD/kafka/users:/service/kafka/users"
- "$PWD/kafka/jks:/etc/kafka/jks"
ports:
- "9093:9093"
================================================
FILE: ldap-auth/kafka/Dockerfile
================================================
FROM centos:centos8
MAINTAINER seknop@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-server
RUN yum install -y confluent-security
# 3. Configure Kafka and zookeeper for Kerberos
COPY server.properties /etc/kafka/server.properties
COPY kafka.jaas.config /etc/kafka/kafka_server_jaas.conf
COPY log4j.properties /etc/kafka/log4j.properties
COPY alice.properties /etc/kafka/alice.properties
COPY barnie.properties /etc/kafka/barnie.properties
COPY charlie.properties /etc/kafka/charlie.properties
COPY kafka.properties /etc/kafka/kafka.properties
EXPOSE 9093
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: ldap-auth/kafka/alice.properties
================================================
sasl.mechanism=PLAIN
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="alice" password="alice-secret";
================================================
FILE: ldap-auth/kafka/barnie.properties
================================================
sasl.mechanism=PLAIN
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="barnie" \
password="barnie-secret";
================================================
FILE: ldap-auth/kafka/charlie.properties
================================================
sasl.mechanism=PLAIN
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="charlie" \
password="charlie-secret";
================================================
FILE: ldap-auth/kafka/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.5/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.5/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.5
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.5/archive.key
enabled=1
================================================
FILE: ldap-auth/kafka/kafka.jaas.config
================================================
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="kafka"
password="kafka";
};
================================================
FILE: ldap-auth/kafka/kafka.properties
================================================
sasl.mechanism=PLAIN
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="kafka" \
password="kafka";
================================================
FILE: ldap-auth/kafka/log4j.properties
================================================
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unspecified loggers and loggers with additivity=true output to server.log and stdout
# Note that INFO only applies to unspecified loggers, the log level of the child logger is used otherwise
# Sven is here!
log4j.rootLogger=INFO, stdout, kafkaAppender
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.authorizerAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.authorizerAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.ldapAppender=org.apache.log4j.DailyRollingFileAppender
log4j.appender.ldapAppender.DatePattern='.'yyyy-MM-dd-HH
log4j.appender.ldapAppender.File=${kafka.logs.dir}/kafka-ldap.log
log4j.appender.ldapAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.ldapAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
# Change the two lines below to adjust ZK client logging
log4j.logger.org.I0Itec.zkclient.ZkClient=INFO
log4j.logger.org.apache.zookeeper=INFO
# Change the two lines below to adjust the general broker logging level (output to server.log and stdout)
log4j.logger.kafka=INFO
log4j.logger.org.apache.kafka=INFO
# Change to DEBUG or TRACE to enable request logging
log4j.logger.kafka.request.logger=WARN, requestAppender
log4j.additivity.kafka.request.logger=false
# Uncomment the lines below and change log4j.logger.kafka.network.RequestChannel$ to TRACE for additional output
# related to the handling of requests
#log4j.logger.kafka.network.Processor=TRACE, requestAppender
#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender
#log4j.additivity.kafka.server.KafkaApis=false
log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
log4j.additivity.kafka.network.RequestChannel$=false
log4j.logger.kafka.controller=TRACE, controllerAppender
log4j.additivity.kafka.controller=false
log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
log4j.additivity.kafka.log.LogCleaner=false
log4j.logger.state.change.logger=TRACE, stateChangeAppender
log4j.additivity.state.change.logger=false
# Access denials are logged at INFO level, change to DEBUG to also log allowed accesses
log4j.logger.kafka.authorizer.logger=DEBUG, authorizerAppender
log4j.additivity.kafka.authorizer.logger=false
# Experimental, add logging for LDAP
log4j.logger.io.confluent.kafka.security.ldap.authorizer.LdapGroupManager=TRACE, ldapAppender
================================================
FILE: ldap-auth/kafka/server.properties
================================================
broker.id=0
listeners=SASL_PLAINTEXT://kafka:9093
advertised.listeners=SASL_PLAINTEXT://kafka:9093
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
zookeeper.connect=zookeeper:2181
inter.broker.listener.name=SASL_PLAINTEXT
sasl.mechanism.inter.broker.protocol=PLAIN
# SASL Configuration
listener.name.sasl_plaintext.sasl.enabled.mechanisms=PLAIN
listener.name.sasl_plaintext.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="kafka" password="kafka";
listener.name.sasl_plaintext.plain.sasl.server.callback.handler.class=io.confluent.security.auth.provider.ldap.LdapAuthenticateCallbackHandler
# LDAP authentication
ldap.java.naming.provider.url=ldap://ldap:389
ldap.java.naming.factory.initial=com.sun.jndi.ldap.LdapCtxFactory
# Authenticate to LDAP
ldap.java.naming.security.principal=cn=admin,dc=confluent,dc=io
ldap.java.naming.security.credentials=admin
ldap.java.naming.security.authentication=SIMPLE
# Locate users
ldap.user.search.base=ou=users,dc=confluent,dc=io
ldap.user.name.attribute=uid
ldap.user.object.class=inetOrgPerson
confluent.support.metrics.enable=false
confluent.license.topic.replication.factor=1
# ldap.user.password.attribute=userPassword
# allow.everyone.if.no.acl.found=false
# super.users=User:kafka
#authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
#sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
# username="kafka" \
# password="kafka" ;
# Configure authorizer
# authorizer.class.name=io.confluent.kafka.security.ldap.authorizer.LdapAuthorizer
# authorizer.class.name=io.confluent.kafka.security.authorizer.ConfluentServerAuthorizer
# LDAP provider URL
# ldap.authorizer.java.naming.provider.url=ldap://ldap:389
# Refresh interval for LDAP cache. If set to zero, persistent search is used.
# Reduced this value from the default 60000ms (60sec) to 10sec to detect
# faster the updates done in the LDAP database
# ldap.authorizer.refresh.interval.ms=10000
# ldap.authorizer.java.naming.security.authentication=SIMPLE
# ldap.authorizer.java.naming.security.principal=cn=admin,dc=confluent,dc=io
# ldap.authorizer.java.naming.security.credentials=admin
# Search base for group-based search
#ldap.authorizer.group.search.base=ou=groups,dc=confluent,dc=io
# Remember that LDAP works in a context. The search base is ou=groups,dc=confluent,dc=io
# But since my URL is ldap://ldap:389/DC=CONFLUENT,DC=IO, we are already working in the dc=confluent,dc=io context
# ldap.authorizer.group.search.base=ou=groups,dc=confluent,dc=io
# Object class for groups
# ldap.authorizer.group.object.class=posixGroup
#ldap.authorizer.group.search.scope=2
# Name of the attribute from which group name used in ACLs is obtained
# ldap.authorizer.group.name.attribute=cn
# Regex pattern to obtain group name used in ACLs from the attribute `ldap.authorizer.group.name.attribute`
# ldap.authorizer.group.name.attribute.pattern=
# Name of the attribute from which group members (user principals) are obtained
# ldap.authorizer.group.member.attribute=memberUid
# Regex pattern to obtain user principal from group member attribute
# ldap.authorizer.group.member.attribute.pattern=cn=(.*),ou=users,dc=confluent,dc=io
================================================
FILE: ldap-auth/ldap/custom/01_base.ldif
================================================
dn: ou=users,dc=confluent,dc=io
objectClass: organizationalUnit
ou: Users
dn: ou=groups,dc=confluent,dc=io
objectClass: organizationalUnit
ou: Groups
================================================
FILE: ldap-auth/ldap/custom/02_KafkaDevelopers.ldif
================================================
dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }}
objectClass: top
objectClass: posixGroup
cn: Kafka Developers
gidNumber: 5000
================================================
FILE: ldap-auth/ldap/custom/03_ProjectA.ldif
================================================
dn: cn=ProjectA,ou=groups,{{ LDAP_BASE_DN }}
objectClass: top
objectClass: posixGroup
cn: ProjectA
gidNumber: 5001
================================================
FILE: ldap-auth/ldap/custom/04_ProjectB.ldif
================================================
dn: cn=ProjectB,ou=groups,{{ LDAP_BASE_DN }}
objectClass: top
objectClass: posixGroup
cn: ProjectB
gidNumber: 5002
================================================
FILE: ldap-auth/ldap/custom/10_alice.ldif
================================================
dn: cn=alice,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: alice
sn: LookingGlass
givenName: Alice
cn: alice
displayName: Alice LookingGlass
uidNumber: 10000
gidNumber: 5000
userPassword: alice-secret
gecos: alice
loginShell: /bin/bash
homeDirectory: /home/alice
================================================
FILE: ldap-auth/ldap/custom/11_barnie.ldif
================================================
dn: cn=barnie,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: barnie
sn: Rubble
givenName: Barnie
cn: barnie
displayName: Barnie Rubble
uidNumber: 10001
gidNumber: 5000
userPassword: barnie-secret
gecos: barnie
loginShell: /bin/bash
homeDirectory: /home/barnie
================================================
FILE: ldap-auth/ldap/custom/12_charlie.ldif
================================================
dn: cn=charlie,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: charlie
sn: Sheen
givenName: Charlie
cn: charlie
displayName: Charlie Sheen
uidNumber: 10002
gidNumber: 5000
userPassword: charlie-secret
gecos: charlie
loginShell: /bin/bash
homeDirectory: /home/charlie
================================================
FILE: ldap-auth/ldap/custom/13_donald.ldif
================================================
dn: cn=donald,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: donald
sn: Duck
givenName: Donald
cn: donald
displayName: Donald Duck
uidNumber: 10003
gidNumber: 5000
userPassword: donald-secret
gecos: donald
loginShell: /bin/bash
homeDirectory: /home/donald
================================================
FILE: ldap-auth/ldap/custom/14_eva.ldif
================================================
dn: cn=eva,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: eva
sn: Maria
givenName: Eva
cn: eva
displayName: Eva Maria
uidNumber: 10004
gidNumber: 5000
userPassword: eva-secret
gecos: eva
loginShell: /bin/bash
homeDirectory: /home/eva
================================================
FILE: ldap-auth/ldap/custom/15_fritz.ldif
================================================
dn: cn=fritz,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: fritz
sn: Walter
givenName: Fritz
cn: fritz
displayName: Fritz Walter
uidNumber: 10005
gidNumber: 5000
userPassword: fritz-secret
gecos: fritz
loginShell: /bin/bash
homeDirectory: /home/fritz
================================================
FILE: ldap-auth/ldap/custom/16_greta.ldif
================================================
dn: cn=greta,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: greta
sn: Thunberg
givenName: Greta
cn: greta
displayName: Greta Thunberg
uidNumber: 10006
gidNumber: 5000
userPassword: greta-secret
gecos: greta
loginShell: /bin/bash
homeDirectory: /home/greta
================================================
FILE: ldap-auth/ldap/custom/17_kafka.ldif
================================================
dn: cn=kafka,ou=users,{{ LDAP_BASE_DN }}
objectClass: inetOrgPerson
objectClass: posixAccount
objectClass: shadowAccount
uid: kafka
sn: kafka
givenName: kafka
cn: kafka
displayName: kafka
uidNumber: 10007
gidNumber: 5000
userPassword: kafka
gecos: kafka
loginShell: /bin/bash
homeDirectory: /home/kafka
================================================
FILE: ldap-auth/ldap/custom/20_group_add.ldif
================================================
dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }}
changetype: modify
add: memberuid
memberuid: cn=alice,ou=users,{{ LDAP_BASE_DN }}
dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }}
changetype: modify
add: memberuid
memberuid: cn=barnie,ou=users,{{ LDAP_BASE_DN }}
dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }}
changetype: modify
add: memberuid
memberuid: cn=charlie,ou=users,{{ LDAP_BASE_DN }}
dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }}
changetype: modify
add: memberuid
memberuid: cn=eva,ou=users,{{ LDAP_BASE_DN }}
dn: cn=Kafka Developers,ou=groups,{{ LDAP_BASE_DN }}
changetype: modify
add: memberuid
memberuid: cn=fritz,ou=users,{{ LDAP_BASE_DN }}
================================================
FILE: ldap-auth/up
================================================
#!/bin/sh
## start docker-compose up to and including kafka
docker-compose up -d --build
echo "Example configuration:"
echo "Should succeed (barnie is in group)"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --topic test-topic --producer.config=/etc/kafka/barnie.properties"
echo "Should fail (charlie is NOT in group)"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --topic test-topic --producer.config=/etc/kafka/charlie.properties"
echo "Should succeed (alice is in group)"
echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /etc/kafka/alice.properties --topic test-topic --from-beginning"
echo "List ACLs"
echo "-> docker-compose exec kafka kafka-acls --bootstrap-server kafka:9093 --list --command-config /etc/kafka/kafka.properties"
================================================
FILE: ldap-auth/zookeeper/Dockerfile
================================================
FROM centos:centos8
MAINTAINER seknop@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.5/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk
RUN yum install -y confluent-platform-2.12
# 3. Configure Kafka and zookeeper for Kerberos
COPY zookeeper.properties /etc/kafka/zookeeper.properties
COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf
EXPOSE 2181
CMD zookeeper-server-start /etc/kafka/zookeeper.properties
================================================
FILE: ldap-auth/zookeeper/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.5/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.5/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.5
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.5/archive.key
enabled=1
================================================
FILE: ldap-auth/zookeeper/zookeeper.properties
================================================
dataDir=/var/lib/zookeeper
clientPort=2181
maxClientCnxns=0
authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
================================================
FILE: ldap-auth/zookeeper/zookeeper.sasl.jaas.config
================================================
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_kafka="kafka";
};
================================================
FILE: multi-sasl/docker-compose.yml
================================================
version: '3'
services:
zookeeper:
build: zookeeper/
container_name: zookeeper
hostname: zookeeper
restart: on-failure
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/zookeeper_server_jaas.conf
kafka:
build: kafka/
container_name: kafka
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
depends_on:
- zookeeper
restart: on-failure
================================================
FILE: multi-sasl/kafka/Dockerfile
================================================
FROM centos
MAINTAINER seknop@gmail.com
ENV container docker
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk-devel
RUN yum install -y confluent-platform-2.12
# 3. Configure Kafka and zookeeper for Kerberos
COPY server.properties /etc/kafka/server.properties
COPY kafka.sasl.jaas.config /etc/kafka/kafka_server_jaas.conf
COPY consumer.properties /etc/kafka/consumer.properties
COPY consumer.plain.properties /etc/kafka/consumer.plain.properties
EXPOSE 9093
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: multi-sasl/kafka/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: multi-sasl/kafka/consumer.plain.properties
================================================
sasl.mechanism=PLAIN
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="kafka" \
password="kafka";
================================================
FILE: multi-sasl/kafka/consumer.properties
================================================
sasl.mechanism=SCRAM-SHA-256
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka";
================================================
FILE: multi-sasl/kafka/kafka.sasl.jaas.config
================================================
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="admin"
password="password";
};
================================================
FILE: multi-sasl/kafka/server.properties
================================================
broker.id=0
listeners=SASL_PLAINTEXT://kafka:9093
advertised.listeners=SASL_PLAINTEXT://kafka:9093
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
zookeeper.connect=zookeeper:2181
# Scram Authentication mechanism
sasl.enabled.mechanisms=SCRAM-SHA-256,PLAIN
sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256
listener.name.sasl_plaintext.scram-sha-256.sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required \
username="kafka" \
password="kafka";
listener.name.sasl_plaintext.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="kafka" \
password="kafka" \
user_kafka="kafka";
security.inter.broker.protocol=SASL_PLAINTEXT
allow.everyone.if.no.acl.found=false
super.users=User:kafka
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
zookeeper.set.acl=true
================================================
FILE: multi-sasl/up
================================================
#!/bin/sh
docker-compose up -d --build
# Creating the user kafka
# kafka is configured as a super user, no need for additional ACL
docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka
echo "Example configuration:"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --producer.config /etc/kafka/consumer.properties --topic test"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9093 --producer.config /etc/kafka/consumer.plain.properties --topic test"
echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9093 --consumer.config /etc/kafka/consumer.properties --topic test --from-beginning"
================================================
FILE: multi-sasl/zookeeper/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-11-openjdk-devel
RUN yum install -y confluent-platform-2.12
# 3. Configure Kafka and zookeeper for Kerberos
COPY zookeeper.properties /etc/kafka/zookeeper.properties
COPY zookeeper.sasl.jaas.config /etc/kafka/zookeeper_server_jaas.conf
EXPOSE 2181
CMD zookeeper-server-start /etc/kafka/zookeeper.properties
================================================
FILE: multi-sasl/zookeeper/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: multi-sasl/zookeeper/zookeeper.properties
================================================
dataDir=/var/lib/zookeeper
clientPort=2181
maxClientCnxns=0
authProvider.1 = org.apache.zookeeper.server.auth.SASLAuthenticationProvider
================================================
FILE: multi-sasl/zookeeper/zookeeper.sasl.jaas.config
================================================
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_admin="password";
};
================================================
FILE: none/docker-compose.yml
================================================
---
version: '3'
services:
zookeeper:
image: confluentinc/cp-zookeeper:6.1.0
hostname: zookeeper
container_name: zookeeper
environment:
ZOOKEEPER_SERVER_ID: 1
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: "2000"
ZOOKEEPER_SERVERS: zookeeper:2888:3888
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: localhost
kafka:
image: confluentinc/cp-enterprise-kafka:6.1.0
hostname: kafka
container_name: kafka
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENER: INTERNAL://kafka:9092,OUTSIDE://localhost:9093
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:9092,OUTSIDE://localhost:9093
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter
CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: kafka:9092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_JMX_PORT: 9999
KAFKA_JMX_HOSTNAME: kafka
KAFKA_BROKER_RACK: 0
ports:
- 9093:9093
================================================
FILE: none/up
================================================
#!/bin/sh
docker-compose up -d
# Creating the user kafka
# kafka is configured as a super user, no need for additional ACL
# docker-compose exec kafka kafka-configs --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-255=[password=kafka],SCRAM-SHA-512=[password=kafka]' --entity-type users --entity-name kafka
echo "Example configuration:"
echo "-> docker-compose exec kafka kafka-console-producer --broker-list kafka:9092 --topic test"
echo "-> docker-compose exec kafka kafka-console-consumer --bootstrap-server kafka:9092 --topic test --from-beginning"
================================================
FILE: oauth/.gitignore
================================================
certs/
*.jks
================================================
FILE: oauth/ca.cnf
================================================
[ policy_match ]
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
prompt = no
distinguished_name = dn
default_md = sha256
default_bits = 4096
x509_extensions = v3_ca
[ dn ]
countryName = UK
organizationName = Confluent
localityName = London
commonName = kafka.confluent.local
[ v3_ca ]
subjectKeyIdentifier=hash
basicConstraints = critical,CA:true
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = critical,keyCertSign,cRLSign
================================================
FILE: oauth/docker-compose.yml
================================================
version: '3'
services:
zookeeper:
build: zookeeper/
container_name: zookeeper
domainname: confluent.local
hostname: zookeeper
networks:
default:
aliases:
- zookeeper.confluent.local
kafka:
build: kafka/
container_name: kafka
domainname: confluent.local
hostname: kafka
depends_on:
- zookeeper
environment:
- KAFKA_OPTS=-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
networks:
default:
aliases:
- kafka.confluent.local
networks:
default:
================================================
FILE: oauth/generate_certs.sh
================================================
#!/bin/bash
set -u
set -e
validity=1
keystore_pass=secret
key_pass=secret
distinguished_name=kafka.confluent.local
hostname=kafka.confluent.local
rm -rf certs
mkdir certs
pushd certs
# Generate server certificate
keytool -keystore kafka.server.keystore.jks \
-alias localhost \
-validity ${validity} \
-genkey -keyalg RSA \
-storepass ${keystore_pass} \
-key-pass ${key_pass} \
-dname "CN=kafka.confluent.local, OU=CS, O=Confluent, L=Palo Alto, S=CA, C=US" \
-ext SAN=DNS:${hostname}
# keytool -list -v -keystore kafka.server.keystore.jks -storepass ${keystore_pass}
# Generate certificate auithority
openssl req -new -nodes -x509 -newkey rsa:2048 -keyout ca-key -out ca-cert -days ${validity} -config ../ca.cnf
echo "Importing certificate authority into client truststore."
keytool -keystore kafka.client.truststore.jks \
-alias CARoot \
-import \
-file ca-cert \
-store-pass ${keystore_pass} \
-no-prompt
echo "Importing certificate authority into server truststore."
keytool -keystore kafka.server.truststore.jks \
-alias CARoot \
-import \
-file ca-cert \
-store-pass ${keystore_pass} \
-no-prompt
echo "Exporting server certificate such that it can be signed."
keytool -keystore kafka.server.keystore.jks \
-alias localhost \
-certreq \
-file cert-file \
-store-pass ${keystore_pass}
echo "Signing exported server certificate."
openssl x509 -req \
-CA ca-cert \
-CAkey ca-key \
-in cert-file \
-out cert-signed \
-days ${validity} \
-CAcreateserial \
-passin pass:{ca-password}
echo "Importing certificate authority into keystore."
keytool -keystore kafka.server.keystore.jks \
-alias CARoot \
-import -file ca-cert \
-store-pass ${keystore_pass} \
-no-prompt
echo "Importing signed certificate into server keystore."
keytool -keystore kafka.server.keystore.jks \
-alias localhost \
-import \
-file cert-signed \
-store-pass ${keystore_pass} \
-no-prompt
echo "Moving client truststore and kafka truststore and kafka keystore to Kafka docker directory"
popd
mv certs/kafka.server.truststore.jks kafka
mv certs/kafka.server.keystore.jks kafka
mv certs/kafka.client.truststore.jks kafka
================================================
FILE: oauth/kafka/Dockerfile
================================================
FROM centos:centos8
MAINTAINER d.gasparina@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-1.8.0-openjdk
RUN yum install -y confluent-platform-2.12
# 3. Configure Kafka and zookeeper
COPY server.properties /etc/kafka/server.properties
COPY client.properties /etc/kafka/client.properties
COPY kafka_server_jaas.conf /etc/kafka/kafka_server_jaas.conf
COPY oauthcallbackhandlers/target/dummy-oauth-adapter-0.1.0-jar-with-dependencies.jar /usr/share/java/kafka/dummy-oauth-adapter-0.1.0-jar-with-dependencies.jar
COPY test_produce_and_consume.sh /tmp/test_produce_and_consume.sh
# 4. Put SSL certificates in place
COPY kafka.server.keystore.jks /etc/kafka/kafka.server.keystore.jks
COPY kafka.server.truststore.jks /etc/kafka/kafka.server.truststore.jks
# this will be used by the kafka-console-producer.sh and kafka-console-consumer.sh scripts
COPY kafka.client.truststore.jks /etc/kafka/kafka.client.truststore.jks
EXPOSE 9093
CMD kafka-server-start /etc/kafka/server.properties
================================================
FILE: oauth/kafka/client.properties
================================================
security.protocol=SASL_SSL
sasl.mechanism=OAUTHBEARER
sasl.login.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerLoginCallbackHandler
ssl.truststore.location=/etc/kafka/kafka.client.truststore.jks
ssl.truststore.password=secret
================================================
FILE: oauth/kafka/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: oauth/kafka/kafka_server_jaas.conf
================================================
KafkaServer {
org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required;
};
KafkaClient {
org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required;
};
================================================
FILE: oauth/kafka/oauthcallbackhandlers/.gitignore
================================================
target/
.idea
================================================
FILE: oauth/kafka/oauthcallbackhandlers/pom.xml
================================================
4.0.0
io.confluent.cs.examples
dummy-oauth-adapter
0.1.0
1.8
1.8
org.apache.kafka
kafka-clients
2.4.0
junit
junit
4.13.1
test
com.fasterxml.jackson.core
jackson-databind
2.10.0.pr1
jakarta.xml.bind
jakarta.xml.bind-api
2.3.2
org.glassfish.jaxb
jaxb-runtime
2.3.2
io.jsonwebtoken
jjwt
0.9.1
org.projectlombok
lombok
1.18.0
org.apache.maven.plugins
maven-assembly-plugin
3.1.1
jar-with-dependencies
assemble-all
package
single
================================================
FILE: oauth/kafka/oauthcallbackhandlers/src/main/java/io/confluent/examples/authentication/oauth/JwtHelper.java
================================================
package io.confluent.examples.authentication.oauth;
import io.jsonwebtoken.Claims;
import io.jsonwebtoken.Jws;
import io.jsonwebtoken.Jwts;
import io.jsonwebtoken.SignatureAlgorithm;
import java.io.UnsupportedEncodingException;
import java.util.Arrays;
import java.util.Date;
import java.util.HashSet;
public class JwtHelper {
String createJwt() throws UnsupportedEncodingException {
return Jwts.builder()
.setSubject("bene")
.setExpiration(new Date(System.currentTimeMillis() + 1000 * 60 * 60))
.claim("name", "Benedikt")
.claim("scope", "developer admin")
.setNotBefore(new Date())
.setIssuedAt(new Date())
.signWith(
SignatureAlgorithm.HS256,
"secret".getBytes("UTF-8")
).compact();
}
MyOauthBearerToken validate(String jwt) throws UnsupportedEncodingException {
Jws claims = Jwts.parser()
.setSigningKey("secret".getBytes("UTF-8"))
.parseClaimsJws(jwt);
MyOauthBearerToken token = new MyOauthBearerToken();
token.setLifetimeMs(claims.getBody().getExpiration().getTime());
token.setPrincipalName(claims.getBody().getSubject());
token.setScopes(new HashSet<>(Arrays.asList(((String) claims.getBody().get("scope")).split(" "))));
token.setStartTimeMs(claims.getBody().getIssuedAt().getTime());
token.setValue(jwt);
return token;
}
}
================================================
FILE: oauth/kafka/oauthcallbackhandlers/src/main/java/io/confluent/examples/authentication/oauth/MyOauthBearerToken.java
================================================
package io.confluent.examples.authentication.oauth;
import lombok.Data;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken;
import java.util.HashSet;
import java.util.Set;
@Data
public class MyOauthBearerToken implements OAuthBearerToken {
private long lifetimeMs;
private String value;
private long startTimeMs;
private String principalName;
private Set scopes = new HashSet<>();
MyOauthBearerToken() { }
MyOauthBearerToken(String value) {
this.value = value;
this.lifetimeMs = System.currentTimeMillis() + 1000 * 60 * 60;
}
@Override
public String value() {
return this.value;
}
@Override
public Set scope() {
return scopes;
}
@Override
public long lifetimeMs() {
return this.lifetimeMs;
}
@Override
public String principalName() {
return this.principalName;
}
@Override
public Long startTimeMs() {
return startTimeMs;
}
}
================================================
FILE: oauth/kafka/oauthcallbackhandlers/src/main/java/io/confluent/examples/authentication/oauth/OauthBearerLoginCallbackHandler.java
================================================
package io.confluent.examples.authentication.oauth;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerTokenCallback;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.login.AppConfigurationEntry;
import java.io.UnsupportedEncodingException;
import java.util.List;
import java.util.Map;
public class OauthBearerLoginCallbackHandler implements AuthenticateCallbackHandler {
private final Logger log = LoggerFactory.getLogger(OauthBearerLoginCallbackHandler.class);
private JwtHelper jwtHelper = new JwtHelper();
@Override
public void configure(Map configs, String saslMechanism, List jaasConfigEntries) {
}
@Override
public void close() {
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException, UnsupportedEncodingException {
log.info("Handling callbacks.");
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerTokenCallback) {
OAuthBearerTokenCallback oAuthBearerTokenCallback = (OAuthBearerTokenCallback) callback;
// TODO: a bearer token would usually be retrieved from an authorization server.
oAuthBearerTokenCallback.token(new MyOauthBearerToken(jwtHelper.createJwt()));
log.info("Created jwt compact form");
continue;
}
throw new UnsupportedCallbackException(callback);
}
}
}
================================================
FILE: oauth/kafka/oauthcallbackhandlers/src/main/java/io/confluent/examples/authentication/oauth/OauthBearerValidatorCallbackHandler.java
================================================
package io.confluent.examples.authentication.oauth;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.login.AppConfigurationEntry;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public class OauthBearerValidatorCallbackHandler implements AuthenticateCallbackHandler {
private final Logger log = LoggerFactory.getLogger(OauthBearerValidatorCallbackHandler.class);
private JwtHelper jwtHelper = new JwtHelper();
@Override
public void configure(Map configs, String saslMechanism, List jaasConfigEntries) {
}
@Override
public void close() {
}
@Override
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
log.info("Validating token.");
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerValidatorCallback) {
OAuthBearerValidatorCallback oAuthBearerValidatorCallback = (OAuthBearerValidatorCallback) callback;
log.info("Tokenvalue: {}", oAuthBearerValidatorCallback.tokenValue());
oAuthBearerValidatorCallback.token(jwtHelper.validate(oAuthBearerValidatorCallback.tokenValue()));
continue;
}
throw new UnsupportedCallbackException(callback);
}
}
}
================================================
FILE: oauth/kafka/oauthcallbackhandlers/src/test/java/io/confluent/examples/authentication/oauth/JwtHelperTest.java
================================================
package io.confluent.examples.authentication.oauth;
import org.junit.Test;
import java.io.UnsupportedEncodingException;
import java.util.Arrays;
import java.util.HashSet;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class JwtHelperTest {
@Test
public void test() throws UnsupportedEncodingException {
JwtHelper underTest = new JwtHelper();
String jwt = underTest.createJwt();
MyOauthBearerToken parsed = underTest.validate(jwt);
System.err.println(parsed);
assertEquals("bene", parsed.getPrincipalName());
assertEquals(new HashSet<>(Arrays.asList("developer", "admin")), parsed.getScopes());
assertTrue(parsed.getStartTimeMs() <= System.currentTimeMillis());
assertTrue(parsed.getLifetimeMs() > System.currentTimeMillis());
}
}
================================================
FILE: oauth/kafka/oauthcallbackhandlers/src/test/java/io/confluent/examples/authentication/oauth/ProduceDataTest.java
================================================
package io.confluent.examples.authentication.oauth;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import org.apache.kafka.common.serialization.StringSerializer;
import org.junit.Ignore;
import org.junit.Test;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
public class ProduceDataTest {
// This test will not work until advertised listeners is set correctly in docker-compose.yml .
@Test
@Ignore
public void test() throws ExecutionException, InterruptedException {
Properties producerConfig = new Properties();
producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9093");
producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
producerConfig.put(ProducerConfig.RETRIES_CONFIG, 0);
producerConfig.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, 3000);
producerConfig.put(ProducerConfig.LINGER_MS_CONFIG, 1000);
producerConfig.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 1000);
producerConfig.put("sasl.mechanism", "OAUTHBEARER");
producerConfig.put("security.protocol", SecurityProtocol.SASL_PLAINTEXT.name);
producerConfig.put("sasl.jaas.config", "org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required unsecuredLoginStringClaim_sub=\"alice\";");
producerConfig.put("sasl.login.callback.handler.class", "io.confluent.examples.authentication.oauth.OauthBearerLoginCallbackHandler");
KafkaProducer kafkaProducer = new KafkaProducer<>(producerConfig);
Future result = kafkaProducer.send(new ProducerRecord<>("foo", "bar"));
result.get();
}
}
================================================
FILE: oauth/kafka/server.properties
================================================
############################# Server Basics #############################
broker.id=0
listeners=SASL_SSL://kafka.confluent.local:9093
advertised.listeners=SASL_SSL://kafka.confluent.local:9093
log.dirs=/var/lib/kafka
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
zookeeper.connect=zookeeper.confluent.local:2181
# oauth bearer configuration
security.inter.broker.protocol=SASL_SSL
sasl.mechanism.inter.broker.protocol=OAUTHBEARER
sasl.enabled.mechanisms=OAUTHBEARER
listener.name.sasl_ssl.oauthbearer.sasl.server.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerValidatorCallbackHandler
listener.name.sasl_ssl.oauthbearer.sasl.login.callback.handler.class=io.confluent.examples.authentication.oauth.OauthBearerLoginCallbackHandler
ssl.truststore.location=/etc/kafka/kafka.server.truststore.jks
ssl.truststore.password=secret
ssl.keystore.location=/etc/kafka/kafka.server.keystore.jks
ssl.keystore.password=secret
ssl.key.password=secret
================================================
FILE: oauth/kafka/test_produce_and_consume.sh
================================================
#!/bin/bash
echo 'some sample messages
sent via sasl outh bearer authentication
with custom token generation and validation.
' | kafka-console-producer --broker-list kafka.confluent.local:9093 --topic test --producer.config /etc/kafka/client.properties
timeout 5 kafka-console-consumer --bootstrap-server kafka.confluent.local:9093 --topic test --from-beginning --consumer.config /etc/kafka/client.properties
================================================
FILE: oauth/up
================================================
#!/bin/sh
set -e
set -u
pushd kafka/oauthcallbackhandlers
mvn clean package
popd
./generate_certs.sh
docker-compose up -d --build
sleep 5
docker-compose exec kafka /tmp/test_produce_and_consume.sh
================================================
FILE: oauth/zookeeper/Dockerfile
================================================
FROM centos:centos8
MAINTAINER seknop@gmail.com
ENV container docker
# 0. Fixing Mirror list for Centos
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-Linux-*
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.4/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install zookeeper and kafka
RUN yum install -y java-1.8.0-openjdk
RUN yum install -y confluent-platform-2.12
# 3. Configure Kafka and zookeeper
COPY zookeeper.properties /etc/kafka/zookeeper.properties
EXPOSE 2181
CMD zookeeper-server-start /etc/kafka/zookeeper.properties
================================================
FILE: oauth/zookeeper/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.4/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.4
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.4/archive.key
enabled=1
================================================
FILE: oauth/zookeeper/zookeeper.properties
================================================
dataDir=/var/lib/zookeeper
clientPort=2181
maxClientCnxns=0
================================================
FILE: plain/consumer.properties
================================================
sasl.mechanism=PLAIN
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="consumer" \
password="consumer-secret";
================================================
FILE: plain/docker-compose.yml
================================================
version: '3.4'
services:
zookeeper:
image: ${REPOSITORY}/cp-zookeeper:${TAG}
container_name: zookeeper
hostname: zookeeper
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
kafka:
image: ${REPOSITORY}/cp-kafka:${TAG}
container_name: kafka
hostname: kafka
depends_on:
- zookeeper
ports:
- 9093:9093
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER: INTERNAL://kafka:9092,OUTSIDE://localhost:9093
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:9092,OUTSIDE://localhost:9093
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:SASL_PLAINTEXT,OUTSIDE:SASL_PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
KAFKA_LISTENER_NAME_INTERNAL_SASL_ENABLED_MECHANISMS: PLAIN
KAFKA_LISTENER_NAME_OUTSIDE_SASL_ENABLED_MECHANISMS: PLAIN
KAFKA_LISTENER_NAME_INTERNAL_PLAIN_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required \
username="admin" \
password="admin-secret" \
user_admin="admin-secret" ;
KAFKA_LISTENER_NAME_OUTSIDE_PLAIN_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required \
username="admin" \
password="admin-secret" \
user_admin="admin-secret" \
user_producer="producer-secret" \
user_consumer="consumer-secret" ;
KAFKA_SASL_JAAS_CONFIG: org.apache.kafka.common.security.plain.PlainLoginModule required \
username="admin" \
password="admin-secret" ;
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
================================================
FILE: plain/producer.properties
================================================
sasl.mechanism=PLAIN
security.protocol=SASL_PLAINTEXT
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \
username="producer" \
password="producer-secret";
================================================
FILE: plain/up
================================================
#!/bin/sh
docker-compose up -d
echo "Example configuration:"
echo "-> kafka-console-producer --broker-list localhost:9093 --producer.config producer.properties --topic test"
echo "-> kafka-console-consumer --bootstrap-server localhost:9093 --consumer.config consumer.properties --topic test --from-beginning"
================================================
FILE: quotas/Client/Dockerfile
================================================
FROM centos
MAINTAINER seknop@gmail.com
ENV container docker
# 1. Adding Confluent repository
RUN rpm --import https://packages.confluent.io/rpm/5.1/archive.key
COPY confluent.repo /etc/yum.repos.d/confluent.repo
RUN yum clean all
# 2. Install Confluent Kafka stack
RUN yum install -y java-1.8.0-openjdk
RUN yum install -y confluent-kafka-2.11
CMD tail -f /dev/null
================================================
FILE: quotas/Client/confluent.repo
================================================
[Confluent.dist]
name=Confluent repository (dist)
baseurl=https://packages.confluent.io/rpm/5.1/7
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.1/archive.key
enabled=1
[Confluent]
name=Confluent repository
baseurl=https://packages.confluent.io/rpm/5.1
gpgcheck=1
gpgkey=https://packages.confluent.io/rpm/5.1/archive.key
enabled=1
================================================
FILE: quotas/Grafana/provisioning/dashboards/grafana-dashboard.json
================================================
{
"__inputs": [
{
"name": "DS_PROMETHEUS",
"label": "Prometheus",
"description": "",
"type": "datasource",
"pluginId": "prometheus",
"pluginName": "Prometheus"
}
],
"__requires": [
{
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "6.1.1"
},
{
"type": "panel",
"id": "graph",
"name": "Graph",
"version": ""
},
{
"type": "datasource",
"id": "prometheus",
"name": "Prometheus",
"version": "1.0.0"
}
],
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"id": null,
"links": [],
"panels": [
{
"aliasColors": {},
"bars": false,
"dashLength": 10,
"dashes": false,
"datasource": "Prometheus",
"fill": 1,
"gridPos": {
"h": 9,
"w": 12,
"x": 0,
"y": 0
},
"id": 2,
"legend": {
"avg": false,
"current": false,
"max": false,
"min": false,
"show": true,
"total": false,
"values": false
},
"lines": true,
"linewidth": 1,
"nullPointMode": "null",
"percentage": false,
"pointradius": 2,
"points": false,
"renderer": "flot",
"seriesOverrides": [],
"spaceLength": 10,
"stack": false,
"steppedLine": false,
"targets": [
{
"expr": "cp_kafka_server_throttle_produce_quota_throttle",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"refId": "A"
}
],
"thresholds": [],
"timeFrom": null,
"timeRegions": [],
"timeShift": null,
"title": "Panel Title",
"tooltip": {
"shared": true,
"sort": 0,
"value_type": "individual"
},
"type": "graph",
"xaxis": {
"buckets": null,
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yaxes": [
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
},
{
"format": "short",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
],
"yaxis": {
"align": false,
"alignLevel": null
}
}
],
"refresh": "5s",
"schemaVersion": 18,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-5m",
"to": "now"
},
"timepicker": {
"refresh_intervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"time_options": [
"5m",
"15m",
"1h",
"6h",
"12h",
"24h",
"2d",
"7d",
"30d"
]
},
"timezone": "",
"title": "throttle-one-user",
"uid": "M4aaf_RWz",
"version": 1
}
================================================
FILE: quotas/Grafana/provisioning/dashboards/one-quota.yaml
================================================
apiVersion: 1
providers:
# provider name
- name: 'prometheus'
# org id. will default to orgId 1 if not specified
orgId: 1
# name of the dashboard folder. Required
folder: ''
# folder UID. will be automatically generated if not specified
folderUid: ''
# provider type. Required
type: file
# disable dashboard deletion
disableDeletion: false
# enable dashboard editing
editable: true
# how often Grafana will scan for changed dashboards
updateIntervalSeconds: 10
options:
# path to dashboard files on disk. Required
path: /etc/grafana/provisioning/dashboards
================================================
FILE: quotas/Grafana/provisioning/datasources/prometheus.yaml
================================================
# config file version
apiVersion: 1
# list of datasources that should be deleted from the database
deleteDatasources:
- name: Prometheus
orgId: 1
# list of datasources to insert/update depending
# whats available in the database
datasources:
# name of the datasource. Required
- name: Prometheus
# datasource type. Required
type: prometheus
# access mode. direct or proxy. Required
access: proxy
# org id. will default to orgId 1 if not specified
orgId: 1
# url
url: http://prometheus:9090
# database password, if used
password:
# database user, if used
user:
# database name, if used
database:
# enable/disable basic auth
basicAuth: true
# basic auth username
basicAuthUser: admin
# basic auth password
basicAuthPassword: foobar
# enable/disable with credentials headers
withCredentials:
# mark as default datasource. Max one per org
isDefault: true
#