cannot access laravel over https in docker - angularjs

We have two docker containters. one running our angular app and one running our laravel api. Each has their own docker-compose file.
On our localhost there was no issue making api calls from angular to laravel over 127.0.0.1:3000
Then I took these containers and started them up on my Ubuntu server. Still no problem making calls over 195.xxx.xxx.xx:3000
I then added a ssl certificate to the domain and all of the sudden I can not make calls to the api over port 3000
Can anyone tell me where I am going wrong. I have tried different ports. If I remove the certbot stuff and call over http it all works fine again. Please please help...
For my ssl setup I followed this article and got it all setup without any real issues
Here is to docker setup for laravel
Dockerfile:
FROM php:7.3-fpm
# Copy composer.lock and composer.json
COPY composer.lock composer.json /var/www/
# Set working directory
WORKDIR /var/www
# Install dependencies
RUN apt-get update && apt-get install -y \
build-essential \
mariadb-client \
libpng-dev \
libjpeg62-turbo-dev \
libfreetype6-dev \
locales \
zip \
jpegoptim optipng pngquant gifsicle \
vim \
unzip \
git \
curl \
libzip-dev
# Clear cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
# Install extensions
RUN docker-php-ext-install pdo_mysql mbstring zip exif pcntl
RUN docker-php-ext-configure gd --with-gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ --with-png-dir=/usr/include/
RUN docker-php-ext-install gd
# Install composer
RUN curl -sS https://getcomposer.org/installer | php -- --install-dir=/usr/local/bin --filename=composer
# Add user for laravel application
RUN groupadd -g 1000 www
RUN useradd -u 1000 -ms /bin/bash -g www www
# Copy existing application directory contents
COPY . /var/www
# Copy existing application directory permissions
COPY --chown=www:www . /var/www
# Change current user to www
USER www
# Expose port 3000 and start php-fpm server
EXPOSE 3000
CMD php-fpm
docker-compose.yml
version: "3"
services:
#PHP Service
api:
build:
context: .
dockerfile: Dockerfile
image: laravel360
container_name: app
restart: unless-stopped
tty: true
environment:
SERVICE_NAME: app
SERVICE_TAGS: dev
working_dir: /var/www
volumes:
- ./:/var/www
- ./php/local.ini:/usr/local/etc/php/conf.d/local.ini
networks:
- app-network
#Nginx Service
webserver:
image: nginx:alpine
container_name: webserver
restart: unless-stopped
tty: true
ports:
- "3000:80"
- "3001:443"
volumes:
- ./:/var/www
- ./nginx/conf.d/:/etc/nginx/conf.d/
networks:
- app-network
#MySQL Service
db:
image: mysql:5.7.22
container_name: db
restart: unless-stopped
tty: true
ports:
- "3306:3306"
environment:
MYSQL_DATABASE: name
MYSQL_ROOT_PASSWORD: password
SERVICE_TAGS: dev
SERVICE_NAME: mysql
volumes:
- dbdata:/var/lib/mysql/
- ./mysql/my.cnf:/etc/mysql/my.cnf
networks:
- app-network
#Docker Networks
networks:
app-network:
driver: bridge
#Volumes
volumes:
dbdata:
driver: local
Any finally the config file
server {
listen 80;
client_max_body_size 100M;
index index.php index.html;
error_log /var/log/nginx/error.log;
access_log /var/log/nginx/access.log;
root /var/www/public;
location ~ \.php$ {
try_files $uri =404;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass app:9000;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
}
location / {
try_files $uri $uri/ /index.php?$query_string;
gzip_static on;
}
}
server {
listen 443 ssl;
client_max_body_size 100M;
index index.php index.html;
error_log /var/log/nginx/error.log;
access_log /var/log/nginx/access.log;
root /var/www/public;
location ~ \.php$ {
try_files $uri =404;
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass app:9000;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $fastcgi_path_info;
}
location / {
try_files $uri $uri/ /index.php?$query_string;
gzip_static on;
}
}
Angular Docker
#############
### build ###
#############
# base image
FROM node:alpine as build
# set working directory
WORKDIR /app
# add `/app/node_modules/.bin` to $PATH
ENV PATH /app/node_modules/.bin:$PATH
# install and cache app dependencies
COPY package.json /app/package.json
RUN npm install
RUN npm install -g #angular/cli#~9.1.0
# add app
COPY . /app
# run tests
# RUN ng test --watch=false
# RUN ng e2e --port 4202
# generate build
RUN ng build --output-path=dist
FROM nginx:alpine
COPY --from=build /app/dist /usr/share/nginx/html
EXPOSE 80 443
CMD [ "nginx", "-g", "daemon off;" ]
Docker Compose
version: '3'
services:
angular:
container_name: angular
build:
context: .
dockerfile: Dockerfile-prod
ports:
- "80:80"
- "443:443"
volumes:
- ./data/nginx:/etc/nginx/conf.d
- ./data/certbot/conf:/etc/letsencrypt
- ./data/certbot/www:/var/www/certbot
command: "/bin/sh -c 'while :; do sleep 6h & wait $${!}; nginx -s reload; done & nginx -g \"daemon off;\"'"
certbot:
image: certbot/certbot
volumes:
- ./data/certbot/conf:/etc/letsencrypt
- ./data/certbot/www:/var/www/certbot
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"
And then finally my nginx conf for the angular side
server {
listen 80;
server_name mydomaindotcom;
root /usr/share/nginx/html;
index index.html;
location / {
try_files $uri /index.html;
}
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
}
server {
listen 443 ssl;
server_name mydomaindotcom;
root /usr/share/nginx/html;
index index.html;
location / {
try_files $uri /index.html
proxy_pass http://mydomaindotcom; #for demo purposes
proxy_set_header Host http://mydomaindotcom;
}
ssl_certificate /etc/letsencrypt/live/mydomaindotcom/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/mydomaindotcom/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
}

Did you run ./init-letsencrypt.sh?
and changed some files like nginx -> angular
echo "### Starting nginx ..."
docker-compose up --force-recreate -d angular
echo

Related

Redirecting from the homepage to multiple Docker apps using subdomains and NGINX (including SSL)

I've a tricky problem. So I have a server on which a homepage can be reached with SSL by running NGINX locally.
Now I would like to list my demo React apps from this homepage, which should run dockerized on the same server and can be reached under different subdomains.
E.g.:
example.com -> Homepage with links to demo 1 and demo 2 (should always reachable)
demo1.example.com -> React demo in Docker1
demo2.example.com -> React demo in Docker2
My approach:
Homepage (example.com)
server {
root /var/www/example.com;
index index.html;
server_name example.com www.example.com;
location / {
try_files $uri $uri/ =404;
}
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
listen 80;
server_name example.com www.example.com;
return 404; # managed by Certbot
}
Demo 1:
Dockerfile:
FROM node:16.13.0 as build
# App
WORKDIR /app
COPY package.json .
COPY yarn.lock .
RUN yarn install
RUN yarn build
# Webserver
FROM nginx:stable-alpine
COPY --from=build /app/build /usr/share/nginx/html
COPY ./app/nginx.conf /etc/nginx/conf.d/default.conf
EXPOSE 8080
CMD ["nginx", "-g", "daemon off;"]
Docker-Compose:
version: '3'
services:
backend:
container_name: backend
build:
context: .
dockerfile: backend/Dockerfile
env_file:
- backend/.env
ports:
- '3000:3000'
restart: always
app:
container_name: app
build:
context: .
dockerfile: app/Dockerfile
env_file:
- .env
ports:
- '8080:80'
volumes:
- /etc/letsencrypt/:/etc/letsencrypt/
restart: always
nginx.conf:
server {
server_name demo1.example.com www.demo1.example.com
root /usr/share/nginx/html;
index index.html index.htm;
location / {
proxy_pass https://localhost:8080;
}
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
listen 80;
server_name demo1.example.com www.demo1.example.com;
return 404; # managed by Certbot
}
So far without success, because I'm sure there is something wrong. If there is a completely different/easier way to do this, I'm open to any help.
Thanks

How to add Reactjs code to django app on docker-compose with nginx-proxy acme-companion

I am trying to setup a complete django react webapp via docker-compose on AWS. I went through a tutorial to create a django backend with database and ssl via nginx-proxy and letsencrypt acme-companion.
Everything works so far, but I struggle to add reactjs code as the frontend. I created a frontend folder with react-code and a Dockerfile to create the static files:
# Dockerfile frontend
FROM node:15.13-alpine as build
WORKDIR /frontend
# add `/app/node_modules/.bin` to $PATH
ENV PATH /frontend/node_modules/.bin:$PATH
# install app dependencies
COPY package.json ./
COPY package-lock.json ./
RUN npm ci --silent
COPY . ./
RUN npm run build
# The second stage
# Copy React static files
FROM nginx:stable-alpine
COPY --from=build /frontend/build /usr/share/nginx/html
I tried to change the default file in nginx/vhost.d/default to access static frontend files as default and the django-backend-app via /api:
# nginx/vhost.d/default
server {
listen 80;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location /api {
try_files $uri #proxy_api;
}
location /admin {
try_files $uri #proxy_api;
}
location #proxy_api {
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Url-Scheme $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://backend:8000;
}
location /django_static/ {
autoindex on;
alias /app/backend/server/django_static/;
}
}
Here is the docker-compose file:
# docker-compose.yml
version: '3.8'
services:
backend:
platform: linux/amd64
build:
context: ./django
dockerfile: Dockerfile.prod
logging:
driver: "awslogs"
options:
awslogs-region: "eu-central-1"
awslogs-group: "acquirepad_nginx_proxy"
awslogs-stream: "web"
image: "${BACKEND_IMAGE}"
command: gunicorn core.wsgi:application --bind 0.0.0.0:8000 --log-level=debug
volumes:
- static_volume:/home/app/web/staticfiles
- media_volume:/home/app/web/mediafiles
expose:
- 8000
env_file:
- ./.env
frontend:
build:
context: ./frontend
volumes:
- react_build:/frontend/build
nginx-proxy:
container_name: nginx-proxy
build: nginx
logging:
driver: "awslogs"
options:
awslogs-region: "eu-central-1"
awslogs-group: "acquirepad_nginx_proxy"
awslogs-stream: "nginx-proxy"
image: "${NGINX_IMAGE}"
restart: always
ports:
- 443:443
- 80:80
volumes:
- react_build:/var/www/frontend
- static_volume:/home/app/web/staticfiles
- media_volume:/home/app/web/mediafiles
- certs:/etc/nginx/certs
- html:/usr/share/nginx/html
- vhost:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
depends_on:
- frontend
- backend
nginx-proxy-letsencrypt:
platform: linux/amd64
logging:
driver: "awslogs"
options:
awslogs-region: "eu-central-1"
awslogs-group: "acquirepad_nginx_proxy"
awslogs-stream: "nginx-proxy-letsencrypt"
image: nginxproxy/acme-companion
env_file:
- ./.env.staging.proxy-companion
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- certs:/etc/nginx/certs
- html:/usr/share/nginx/html
- vhost:/etc/nginx/vhost.d
- acme:/etc/acme.sh
depends_on:
- nginx-proxy
volumes:
static_volume:
media_volume:
certs:
html:
vhost:
acme:
react_build:
When I run docker-compose on the AWS-EC2 instance, the django backend is still displayed by default on the website and I can not get access to the frontend. I have the feeling, that the file /nginx/vhost.d/default does not have any influence on the webapp at all. Help is much appreciated.

Docker - served react app, asset-manifest.json with incorrect filenames

I'm new to web development, and I run into a strange error.
I have a React/Django app which I'm trying to productionize with nginx and docker.
Django runs on a postgres db, and nginx just reroutes port 80 to my react and django ports.
When I locally deploy the application using
npm run build
serve -s build
everything works as desired.
However, doing the same through Docker doesn't.
I have a Dockerfile building the react application:
FROM node:12.18.3-alpine3.9 as builder
WORKDIR /usr/src/app
COPY ./react-app/package.json .
RUN apk add --no-cache --virtual .gyp \
python \
make \
g++ \
&& npm install \
&& apk del .gyp
COPY ./react-app .
RUN npm run build
FROM node:12.18.3-alpine3.9
WORKDIR /usr/src/app
RUN npm install -g serve
COPY --from=builder /usr/src/app/build ./build
Now when I use
docker-compose build
docker-compose up
I see that my Django, React, Postgres and nginx containers are all running, with nginx visible at port 80. When I open localhost in my browser, I see nginx is looking for some static react files in the right directory. However, the react files it is looking for have a different hash than the static files. The static files of both the nginx and react container are the same. So somehow, my asset-manifest.json contains the wrong filenames. Any idea what causes this is or how I can solve this?
Edit: Added docker-compose.yml:
version: "3.7"
services:
django:
build:
context: ./backend
dockerfile: Dockerfile
volumes:
- django_static_volume:/usr/src/app/static
expose:
- 8000
env_file:
- ./backend/.env
command: gunicorn core.wsgi:application --bind 0.0.0.0:8000
depends_on:
- db
db:
image: postgres:12.0-alpine
volumes:
- postgres_data:/var/lib/postgresql/data/
env_file:
- ./postgres/.env
react:
build:
context: ./frontend
dockerfile: Dockerfile
volumes:
- react_static_volume:/usr/src/app/build/static
expose:
- 5000
env_file:
- .env
command: serve -s build
depends_on:
- django
nginx:
restart: always
build: ./nginx
volumes:
- django_static_volume:/usr/src/app/django_files/static
- react_static_volume:/usr/src/app/react_files/static
ports:
- 80:80
depends_on:
- react
volumes:
postgres_data:
django_static_volume:
react_static_volume:
Do you need to run React in a separate container? Is there any reason for doing this? (It might be)
In my approach, I'm building React static files in nginx Dockerfile, and copy them to /usr/share/nginx/html. Then nginx serves it at location /.
nginx Dockerfile
# The first stage
# Build React static files
FROM node:13.12.0-alpine as build
WORKDIR /app/frontend
COPY ./frontend/package.json ./
COPY ./frontend/package-lock.json ./
RUN npm ci --silent
COPY ./frontend/ ./
RUN npm run build
# The second stage
# Copy React static files and start nginx
FROM nginx:stable-alpine
COPY --from=build /app/frontend/build /usr/share/nginx/html
CMD ["nginx", "-g", "daemon off;"]
nginx configuration file
server {
listen 80;
server_name _;
server_tokens off;
client_max_body_size 20M;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
location /api {
try_files $uri #proxy_api;
}
location /admin {
try_files $uri #proxy_api;
}
location #proxy_api {
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Url-Scheme $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $http_host;
proxy_redirect off;
proxy_pass http://backend:8000;
}
location /django_static/ {
autoindex on;
alias /app/backend/server/django_static/;
}
}
Docker-compose
version: '2'
services:
nginx:
restart: unless-stopped
build:
context: .
dockerfile: ./docker/nginx/Dockerfile
ports:
- 80:80
volumes:
- static_volume:/app/backend/server/django_static
- ./docker/nginx/development:/etc/nginx/conf.d
depends_on:
- backend
backend:
restart: unless-stopped
build:
context: .
dockerfile: ./docker/backend/Dockerfile
volumes:
entrypoint: /app/docker/backend/wsgi-entrypoint.sh
volumes:
- static_volume:/app/backend/server/django_static
expose:
- 8000
volumes:
static_volume: {}
Please check my article
Docker-Compose for Django and React with Nginx reverse-proxy and Let's encrypt certificate for more details. There is also example of how to issue Let's encrypt certificate and renew it in docker-compose. If you will need more help, please let me know.

Configure Nginx for React and Flask with Docker-Compose

I am trying to configure multiple Docker containers for a website with Nginx.
I have docker-compose working to spin up each container but I'm having trouble getting the React app to hit the Gunicorn WSGI server for the Flask API's when using Nginx.
Any idea why this might happen? It works fine without Nginx in the picture. Do I need an Nginx conf for the flask app also? Or is it a case of routing requests to the Gunicorn WSGI server from Nginx?
React frontend (container)
# build environment
FROM node:12.2.0-alpine as build
WORKDIR /usr/src/app
ENV PATH /usr/src/app/node_modules/.bin:$PATH
COPY package.json /usr/src/app/package.json
RUN npm install --silent
RUN npm install react-scripts#3.0.1 -g --silent
COPY . /usr/src/app
RUN npm run build
# production environment
FROM nginx:1.16.0-alpine
COPY --from=build /usr/src/app/build /usr/share/nginx/html
RUN rm /etc/nginx/conf.d/default.conf
COPY nginx/nginx.conf /etc/nginx/conf.d
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]
Nginx.conf
server {
listen 80;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
try_files $uri $uri/ /index.html;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
Docker-compose
version: '3.7'
services:
middleware:
build:
context: ./middleware
dockerfile: Dockerfile.prod
command: gunicorn --bind 0.0.0.0:5000 main:app
ports:
- 5000:5000
env_file:
- ./.env.prod
frontend:
container_name: frontend
build:
context: ./frontend/app
dockerfile: Dockerfile.prod
ports:
- '80:80'
Yes, you need to proxy the traffic in Nginx to the WSGI app, something like
server {
listen 80;
server_name your_domain www.your_domain;
location / {
include uwsgi_params;
uwsgi_pass unix:/home/sammy/myproject/myproject.sock;
}
}
read more here
Update
In this particular case, you will need to proxy to Gunicorn which is in a separate container, visible under the name middleware.
Because of that the uwsgi_pass directive should refer to that container:
server {
listen 80;
server_name your_domain www.your_domain;
location / {
include uwsgi_params;
uwsgi_pass http://middleware:5000;
}
}
Please mind, that if you're using Gunicorn -it recommends using proxy_pass, not uwsgi_pass.

Can't setup nginx proxy to docker container with react

I'm trying to build web application based on docker containers and included usage of symfony and react. The problem is my container with nginx does not proxy my container with react running in development mode.Requests to backend by /api/... works as well, but when I'm trying to access to frontend domain.com for example, I'v got 502 error.
My nginx configuration:
upstream frontend {
server frontend:8080;
}
server {
set $APP_ENV "dev";
set $APP_DEBUG "1";
listen 80;
listen [::]:80 default_server;
server_name store.com;
root /var/www/store/public;
location /api {
try_files $uri /index.php$is_args$args;
}
location /oauth {
try_files $uri /index.php$is_args$args;
}
location /_wdt {
# try to serve file directly, fallback to app.php
try_files $uri /index.php$is_args$args;
}
location /_profiler {
# try to serve file directly, fallback to app.php
try_files $uri /index.php$is_args$args;
}
# DEV
# This rule should only be placed on your development environment
# In production, don't include this and don't deploy app_dev.php or config.php
location ~ ^/(index)\.php(/|$) {
fastcgi_pass php:9000;
fastcgi_split_path_info ^(.+\.php)(/.*)$;
include fastcgi_params;
# When you are using symlinks to link the document root to the
# current version of your application, you should pass the real
# application path instead of the path to the symlink to PHP
# FPM.
# Otherwise, PHP's OPcache may not properly detect changes to
# your PHP files (see https://github.com/zendtech/ZendOptimizerPlus/issues/126
# for more information).
fastcgi_param SCRIPT_FILENAME $realpath_root$fastcgi_script_name;
fastcgi_param DOCUMENT_ROOT $realpath_root;
}
location = /favicon.ico {
log_not_found off;
access_log off;
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
# return 404 for all other php files not matching the front controller
# this prevents access to other php files you don't want to be accessible.
location ~ \.php$ {
return 404;
}
location / {
proxy_pass http://frontend/;
proxy_redirect off;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
error_log /var/log/nginx/project_error.log;
access_log /var/log/nginx/project_access.log;
}
docker-compose
version: '3'
services:
php:
build: php
working_dir: /var/www/store
links:
- mysql
volumes:
- ../backend:/var/www/store
- ./php/php.ini:/usr/local/etc/php/php.ini:ro
networks:
- backend
- frontend
environment:
XDEBUG_CONFIG: remote_host=192.168.31.32
nginx:
image: nginx
links:
- php
- frontend
ports:
- "80:80"
- "443:443"
networks:
- backend
- frontend
volumes:
- ../backend:/var/www/store
- ../frontend:/var/www/app
- ./nginx/vhosts/dev/default.conf:/etc/nginx/conf.d/default.conf:ro
mysql:
restart: always
image: mysql:5.6
environment:
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
MYSQL_DATABASE: ${MYSQL_DATABASE}
MYSQL_USER: ${MYSQL_USER}
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
networks:
- backend
volumes:
- mysql-data:/var/lib/mysql
ports:
- "3306:3306"
frontend:
image: node:latest
user: node
command: bash -c "npm install && npm start"
working_dir: /home/node/app
networks:
- frontend
volumes:
- ../frontend:/home/node/app
networks:
frontend:
backend:
volumes:
mysql-data:
The problem was not in the nginx or docker configuration, the problem was in the configuration of webpack dev server. Resolved by using command "start": "webpack-dev-server --host 0.0.0.0 --inline --content-base" for start dev server, and some additional config
devServer: {
disableHostCheck: true,
historyApiFallback: true
}

Resources