Я написал несколько µ-сервисов с лагом. В режиме разработки и на моей маленькой локальной машине все работает нормально. Теперь я хочу перенести его на небольшой сервер Ubuntu 20. Я пытаюсь написать файл компоновки докеров для установки баз данных, zookeeper и kafka. А как подключить кафку к лагому?
version: '3.4'
networks:
panakeia-network:
driver: bridge
services:
postgres-database:
container_name: panakeia-postgres
image: postgres
expose:
- "5432"
ports:
- "12000:5432"
environment:
- POSTGRES_PASSWORD=123456789
- POSTGRES_USER=panakeia
- POSTGRES_DB=panakeia
volumes:
- /opt/panakeia/database/postgresql:/var/lib/postgresql/data
networks:
- panakeia-network
zookeeper-server:
container_name: panakeia-zookeeper-server
image: bitnami/zookeeper
expose:
- 2181
ports:
- "2181:2181"
environment:
- ZOO_ENABLE_AUTH=yes
- ZOO_SERVER_USERS=user1
- ZOO_SERVER_PASSWORDS=pass4user1
- ZOO_CLIENT_USER=user1
- ZOO_CLIENT_PASSWORD=pass4user1
volumes:
- /opt/panakeia/database/zookeper:/bitnami/zookeeper
networks:
- panakeia-network
kafka:
container_name: panakeia-kafka
image: bitnami/kafka
expose:
- 9092
ports:
- '9092:9092'
environment:
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper-server:2181
- KAFKA_ZOOKEEPER_USER=user1
- KAFKA_ZOOKEEPER_PASSWORD=pass4user1
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_LISTENERS=PLAINTEXT://:9092
# - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092
# - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=INTERNAL:SSL,CLIENT:SASL_SSL
# - KAFKA_CFG_LISTENERS=INTERNAL://:9093,CLIENT://:9092
# - KAFKA_CFG_ADVERTISED_LISTENERS=INTERNAL://kafka:9093,CLIENT://kafka:9092
# - KAFKA_CLIENT_USER=user
# - KAFKA_CLIENT_PASSWORD=password
volumes:
- /opt/panakeia/database/kafka-persistence:/bitnami/kafka
networks:
- panakeia-network
Одна из первых проблем -
org.apache.zookeeper.KeeperException$NoAuthException: KeeperErrorCode = NoAuth for /consumers
Но если я защищаюсь с помощью ssl, я не знаю, как lagom может общаться с kafka.
И в какой строке мне нужно добавить тестовые учетные данные, IP и порт?
#lagom.broker.kafka.service-name = kafka # or whatever you have defined as the service name in k8s
#lagom.broker.kafka.service-name = '' # empty string disables service locator lookup
#lagom.broker.kafka.brokers = "kf1.mydomain.com:9092" # this can be a comma-separated string if you have >1
//lagom.broker.kafka {
// # The name of the Kafka service to look up out of the service locator.
// # If this is an empty string, then a service locator lookup will not be done,
// # and the brokers configuration will be used instead.
// service-name = "kafka_native"
// service-name = ${?KAFKA_SERVICE_NAME}
//
// # The URLs of the Kafka brokers. Separate each URL with a comma.
// # This will be ignored if the service-name configuration is non empty.
// brokers = ${lagom.broker.defaults.kafka.brokers}
// #brokers = "kafka.avalon.svc.cluster.local:9092"
//
// client {
// default {
// # Exponential backoff for failures
// failure-exponential-backoff {
// # minimum (initial) duration until processor is started again
// # after failure
// min = 3s
//
// # the exponential back-off is capped to this duration
// max = 30s
//
// # additional random delay is based on this factor
// random-factor = 0.2
// }
// }
//
// # configuration used by the Lagom Kafka producer
// producer = ${lagom.broker.kafka.client.default}
// producer.role = ""
//
// # configuration used by the Lagom Kafka consumer
// consumer {
// failure-exponential-backoff = ${lagom.broker.kafka.client.default.failure-exponential-backoff}
//
// # The number of offsets that will be buffered to allow the consumer flow to
// # do its own buffering. This should be set to a number that is at least as
// # large as the maximum amount of buffering that the consumer flow will do,
// # if the consumer buffer buffers more than this, the offset buffer will
// # backpressure and cause the stream to stop.
// offset-buffer = 100
//
// # Number of messages batched together by the consumer before the related messages'
// # offsets are committed to Kafka.
// # By increasing the batching-size you are trading speed with the risk of having
// # to re-process a larger number of messages if a failure occurs.
// # The value provided must be strictly greater than zero.
// batching-size = 20
//
// # Interval of time waited by the consumer before the currently batched messages'
// # offsets are committed to Kafka.
// # This parameter is useful to ensure that messages' offsets are always committed
// # within a fixed amount of time.
// # The value provided must be strictly greater than zero.
// batching-interval = 5 seconds
// }
// }
//}
//
//lagom.services {
//// cas_native = "_cql._tcp.cassandra.svc.cluster.local"
// kafka_native = "_kafka._tcp.kafka-headless.svc.cluster.local"
//}
Их кто-то может мне помочь?
Спасибо. Андре
Не уверен, что понимаю проблему. Lagom должен позволить вам указать все свойства, связанные с Kafka SSL, но у вас есть
ALLOW_PLAINTEXT_LISTENER=yes
, поэтому они вам не нужны.