I just started learning about Docker and I am trying to create a project where I have producer, consumer and server scripts in seperate containers.Consumers update the table in sqlite3 database file in app/data and I want to send api requests to reach them.
What I Expect:
I expect that producer and consumer to be running after kafka,zookeper and server container’s health conditions are checked after that I expect to send api requests to my server container through localhost:5000 from my host machine
What actually happens:
kafka zookeeper ,server and producer containers works as expected but for some reason consumer container starts but do not log anything or show any sign that it is executing script in cmd command in docker file
What I tried:
I checked all the scripts running inside containers and they work perfectly.I also using docker exec -it
executed python script inside of the consumer container and it worked perfect too it is just not executed after my docker compose up -- build
command. Lastly I changed port to be wrong and it logged error.
Edit:
I replaced my consumer.py code with arbitrary hello world.py and it worked expected.I think that problem is probably about in my consume script even though it works outside of the container with the correct port configuration so I am adding it here too.
from confluent_kafka import Consumer, KafkaException , KafkaError
import json
import sqlite3
import uuid
# Kafka consumer configuration
conf = {
'bootstrap.servers': 'kafka:9092',
'group.id': uuid.uuid4,
'auto.offset.reset': 'earliest',
'enable.auto.commit': False # Disable auto commit to manually commit offsets
}
consumer = Consumer(conf)
consumer.subscribe(['pokemon'])
# SQLite database configuration
conn = sqlite3.connect('/app/data/pokemon.db')
cursor = conn.cursor()
def insert_into_db(name, description, price, stock):
cursor.execute('''
INSERT OR REPLACE INTO pokemon (name, description, price, stock) VALUES (?, ?, ?, ?)
''', (name, description, price, stock))
conn.commit()
try:
while True:
msg = consumer.poll(timeout=1.0)
if msg is None:
continue
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
print('End of partition reached {0}/{1}'.format(msg.topic(), msg.partition()))
elif msg.error():
raise KafkaException(msg.error())
else:
message = json.loads(msg.value().decode('utf-8'))
name = message.get('name')
print("Received information of " + name)
description = message.get('Description')
price = message.get('price')
stock = message.get('stock')
# Insert message data into database
insert_into_db(name, description, price, stock)
print([name, description, price, stock])
# Commit offset after writing to file
consumer.commit(msg)
except Exception as e:
print(f"An error occurred: {e}")
finally:
consumer.close()
conn.close()
print("Consumer closed and database connection closed.")
Below you can find details of my code:
services:
zookeeper:
image: confluentinc/cp-zookeeper:latest
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
ports:
- "2181:2181"
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "2181"]
interval: 10s
retries: 3
start_period: 10s
timeout: 5s
kafka:
image: confluentinc/cp-kafka:latest
depends_on:
- zookeeper
ports:
- "9092:9092"
- "29092:29092"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "9092"]
interval: 10s
retries: 3
start_period: 10s
timeout: 5s
producer:
build: ./producer
depends_on:
kafka:
condition: service_healthy
consumer:
build: ./consumer
depends_on:
producer:
condition: service_completed_successfully
server:
condition: service_healthy
volumes:
- db-data:/app/data
server:
build: ./server
depends_on:
- kafka
environment:
KAFKA_BOOTSTRAP_SERVERS: kafka:9092 # Add environment variable for Kafka servers
volumes:
- db-data:/app/data # Mount the shared volume for the database
ports:
- "5000:5000" # Expose port for the Flask app
healthcheck:
test: ["CMD-SHELL", "[ -f /app/data/pokemon.db ]"] # Check if the database file exists
interval: 10s
retries: 3
start_period: 30s
timeout: 5s
volumes:
db-data:
producer = Producer({
'bootstrap.servers': 'kafka:9092', # Kafka broker address
})
conf = {
'bootstrap.servers': 'kafka:9092',
'group.id': uuid.uuid4,
'auto.offset.reset': 'earliest',
'enable.auto.commit': False # Disable auto commit to manually commit offsets
}
consumer = Consumer(conf)
consumer.subscribe(['pokemon'])
Here is my consumer DockerFile:
FROM python:3.11
WORKDIR /app
RUN pip install confluent-kafka
COPY . /app/
CMD ["python", "./consume.py"]
2