We have two project
- project-data-chatbot
- project-frontend
We are trying to setup a standalone nginx reverse proxy that maps 2 separate docker-compose projects, the backend is working on a default_server configuration but trying to setup proxy_pass to the front-end project is not working
Dockerfile details on project-frontend
FROM php:8.1-fpm
USER root
WORKDIR /var/www/html
RUN apt-get update && apt-get install -y
libpng-dev
zlib1g-dev
libxml2-dev
libzip-dev
libonig-dev
libpq-dev
zip
curl
unzip
nodejs
npm
&& docker-php-ext-configure gd
&& docker-php-ext-configure pgsql -with-pgsql=/usr/local/pgsql
&& docker-php-ext-install -j$(nproc) gd
&& docker-php-ext-install pdo_mysql
&& docker-php-ext-install mysqli
&& docker-php-ext-install zip
&& docker-php-ext-install exif
&& docker-php-ext-install pdo
&& docker-php-ext-install pgsql
&& docker-php-ext-install pdo_pgsql
&& docker-php-source delete
COPY . /var/www/html
COPY ./.env /var/www/html/.env
# Install composer
RUN curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer
EXPOSE 9000 443
# EXPOSE 9000 5173 443
# EXPOSE 5173 443
# Start PHP-FPM server
docker-compose details on project-frontend
version: '3.8'
services:
# nginx:
# container_name: ${APP_NAME}-nginx
# image: nginx:alpine
# ports:
# - "8088:80"
# depends_on:
# - app
# - db
# volumes:
# - ./:/var/www/html
# - ./nginx/conf.d/preprod.conf:/etc/nginx/conf.d/default.conf
# networks:
# - project_frontend_network
# - chatbot_shared_network
app:
container_name: ${APP_NAME}
build:
context: ./
dockerfile: Dockerfile
ports:
- "9000:9000"
- "5170:5170"
expose:
- "5170"
volumes:
- ./:/var/www/html
networks:
# - project_frontend_network
- chatbot_shared_network
db:
container_name: ${APP_NAME}-db
image: mariadb
restart: unless-stopped
ports:
- "3306:3306"
volumes:
- ./mysql/data:/var/lib/mysql
environment:
- MYSQL_DATABASE=project-frontend-db
- MYSQL_ROOT_PASSWORD=password
networks:
# - project_frontend_network
- chatbot_shared_network
phpmyadmin:
container_name: ${APP_NAME}-pma
image: phpmyadmin/phpmyadmin
restart: unless-stopped
ports:
- "3400:80"
depends_on:
- db
networks:
- chatbot_shared_network
# - project_frontend_network
#networks:
# project_frontend_network:
# driver: bridge
networks:
chatbot_shared_network:
external: true
docker-compose.nginx.yaml for project-data-chatbot
version: '3.4'
services:
web:
build: nginx
container_name: reverse-proxy
restart: always
environment:
- CHATBOT_PORT=${CHATBOT_PORT}
- CHAT_FRONTEND_PORT=${CHAT_FRONTEND_PORT}
ports:
- 80:80
- 81:81
- 443:443
depends_on:
chatbot:
condition: service_started
chatbot:
expose:
- ${CHATBOT_PORT}
- ${CHAT_FRONTEND_PORT}
networks:
default:
name: chatbot_shared_network
docker-compose for project-data-chatbot
version: '3.4'
services:
###########
# App #
###########
chatbot:
build:
context: .
dockerfile: Dockerfile
args:
- INGEST=${INGEST}
container_name: chatbot
restart: always
depends_on:
chatbot-weaviate:
condition: service_started
chatbot-postgres:
condition: service_healthy
ports:
- ${CHATBOT_PORT}:7860
chatbot-postgres:
image: postgres:16.1-bullseye
container_name: chatbot_postgres
restart: always
healthcheck:
test: ["CMD-SHELL", "pg_isready"]
interval: 30s
timeout: 10s
retries: 10
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${CHATBOT_POSTGRES_DB}
volumes:
- chatbot_pgdata:/var/lib/postgresql/data
#################
# Vector DB #
#################
chatbot-weaviate:
command:
- --host
- 0.0.0.0
- --port
- '8080'
- --scheme
- http
image: semitechnologies/weaviate:1.23.2
container_name: chatbot_weaviate
ports:
- ${WEAVIATE_PORT}:8080
- 50051:50051
volumes:
- chatbot_weaviate_data:/var/lib/weaviate
restart: always
environment:
QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT}
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: 'true'
PERSISTENCE_DATA_PATH: '/var/lib/weaviate'
DEFAULT_VECTORIZER_MODULE: 'none'
CLUSTER_HOSTNAME: 'node1'
###############
# Monitor #
###############
#https://langfuse.com/docs/langflow
# https://github.com/langfuse/langfuse/blob/main/docker-compose.yml
langfuse-server:
image: ghcr.io/langfuse/langfuse:2.11.0
container_name: langfuse
restart: always
depends_on:
langfuse-postgres:
condition: service_healthy
ports:
- ${LANGFUSE_PORT}:3000
environment:
- NODE_ENV=production
- DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@langfuse_postgres:5432/${LANGFUSE_POSTGRES_DB}
- NEXTAUTH_SECRET=${NEXTAUTH_SECRET}
- SALT=${SALT}
- NEXTAUTH_URL=http:localhost:3000
langfuse-postgres:
image: postgres:16.1-bullseye
container_name: langfuse_postgres
restart: always
healthcheck:
test: ["CMD-SHELL", "pg_isready"]
interval: 1s
timeout: 5s
retries: 10
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${LANGFUSE_POSTGRES_DB}
volumes:
- langfuse_pgdata:/var/lib/postgresql/data
volumes:
chatbot_pgdata:
langfuse_pgdata:
chatbot_weaviate_data:
networks:
chatbot_shared_network:
external: true
init.sh for managing containers on project-data-chatbot
#!/bin/bash
# Check if correct argument is supplied
if [ "$#" -lt 1 ] || ([ "$1" != "up" ] && [ "$1" != "down" ]); then
echo "[ERROR]: Invalid argument. Please use 'up' or 'down'."
exit 1
fi
# Load environment variables from .env file
export $(grep -v '^#' "./config/.env" | tr -d 'r' | xargs)
# Use Additional Modules
options=()
if [ "$NGINX" == "true" ]; then
options+=("-f "docker-compose.nginx.yaml"")
fi
# Whether to start, end service
if [ "$1" = "down" ]; then
command="docker compose --env-file "./config/.env" ${options[@]} -f "docker-compose.yaml" down"
echo $command
eval $command &
wait $!
elif [ "$1" = "up" ]; then
command="docker compose --env-file "./config/.env" ${options[@]} -f "docker-compose.yaml" up -d --build"
echo $command
eval $command &
wait $!
fi
The whole idea is to use only one nginx container that is configured from the backend project and use it for the front-end too that’s why we have 2 server{} blocks in the nginx config
Nginx config on project-data-chatbot
worker_processes 1;
events { worker_connections 1024;
}
http {
sendfile on;
# upstream chatbot:${CHATBOT_PORT} {
# server nginx:80;
# server chatbot:${CHATBOT_PORT};
# }
# upstream docker-apache {
# server apache:80;
#}
server {
listen 80 default_server;
server_name _;
# server_name project-preprod.sparktechstaging.com;
location / {
# proxy_pass http://docker-nginx;
proxy_pass http://chatbot:${CHATBOT_PORT};
# proxy_redirect off;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Host $server_name;
}
}
server {
listen 80;
server_name project-preprod.sparktechstaging.com;
root /var/www/html/public/project-frontend;
#root /var/www/html/public;
index index.php index.htm index.html;
error_log /var/log/nginx/error.log;
access_log /var/log/nginx/access.log;
index index.php;
location / {
# proxy_pass http://docker-apache;
# resolver 127.0.0.11;
# resolver 8.8.8.8;
# root /var/www/html/public;
# root /var/www/public;
proxy_pass http://project:${CHAT_FRONTEND_PORT};
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# try_files $uri /index.php?$query_string;
# try_files $uri $uri/ /index.php$is_args$args;
# try_files $uri $uri/ /index.php?$query_string;
# proxy_redirect off;
# proxy_set_header Host $host;
# proxy_set_header X-Real-IP $remote_addr;
# proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# proxy_set_header X-Forwarded-Host $server_name;
}
# location ~ .php$ {
# fastcgi_pass unix:/var/run/php/php8.1-fpm.sock;
# fastcgi_pass app:9000;
# fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name;
# include fastcgi_params;
# }
# location /index.php {
# try_files $uri = 404;
# fastcgi_split_path_info ^(.+.php)(/.+)$;
# fastcgi_pass app:9000;
# fastcgi_pass app:5173;
# fastcgi_index index.php;
# include fastcgi_params;
# fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
# fastcgi_param PATH_INFO $fastcgi_path_info;
# }
}
}
telnet host 5170
Trying host…
Connected to host.
Escape character is ‘^]’.
Connection closed by foreign host.
Atif Jamal Ansari is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.