Question

No live upstream while connecting to upstream jwilder/ngnix-proxy

This is my ngnix->vhost.d->default conf file.

upstream djangotango.meghaggarwal.com {
    server web:8000;
}

server {

    listen 80;
    listen 443;
    server_name djangotango.meghaggarwal.com

    location / {
        proxy_pass http://djangotango.meghaggarwal.com;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header Host $host;
        proxy_redirect off;
     
    }

    location /static/ {
      alias /home/app/web/static/;
      add_header Access-Control-Allow-Origin *;
    }

        location /media/ {
        alias /home/app/web/media/;
        add_header Access-Control-Allow-Origin *;

}

}

docker-compose.staging.yml

version: '3.8'

networks:
  public_network:
      name: public_network
      driver: bridge

services:
  web:
    build: 
      context: .
      dockerfile: Dockerfile.prod
    command: gunicorn djangotango.wsgi:application --bind 0.0.0.0:8000
    volumes:
      # - .:/home/app/web/
      - static_volume:/home/app/web/static
      - media_volume:/home/app/web/media 
     
    expose:
      - 8000
    env_file:
      - ./.env.staging
    

  db:
    image: postgres:12.0-alpine
    volumes:
      - postgres_data:/var/lib/postgresql/data/
    env_file:
      - ./.env.staging.db
    depends_on: 
      - web
  
  pgadmin:
    image: dpage/pgadmin4
    env_file: 
      - ./.env.staging.db
    ports:
      - "8080:80"
    volumes:
      - pgadmin-data:/var/lib/pgadmin
    depends_on: 
      - db
    links: 
      - "db:pgsql-server"
    environment: 
      - PGADMIN_DEFAULT_EMAIL=pgadmin4@pgadmin.org
      - PGADMIN_DEFAULT_PASSWORD=root
      - PGADMIN_LISTEN_PORT=80

  nginx-proxy:
    build: ./nginx
    restart: always
    ports:
      - 443:443  
      - 80:80
    volumes:
      - static_volume:/home/app/web/static
      - media_volume:/home/app/web/media 
      - certs:/etc/nginx/certs
      - html:/usr/share/nginx/html
      - vhost:/etc/nginx/vhost.d
      - /var/run/docker.sock:/tmp/docker.sock:ro
    depends_on:
      - web
    networks:
      - public_network

  nginx-proxy-letsencrypt:
    image: jrcs/letsencrypt-nginx-proxy-companion
    env_file:
      - .env.staging.proxy-companion
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - certs:/etc/nginx/certs
      - html:/usr/share/nginx/html
      - vhost:/etc/nginx/vhost.d
    depends_on:
      - nginx-proxy
    networks:
      - public_network
    
   
volumes:
  postgres_data:
  pgadmin-data:
  static_volume:
  media_volume:
  certs:
  html:
  vhost:

.env.staging.db

VIRTUAL_HOST=djangotango.meghaggarwal.com
VIRTUAL_PORT=8000
LETSENCRYPT_HOST=djangotango.meghaggarwal.com

I have shown the main snippets only. I’ve tried my best through documenatation, stackoverflow but no help, almost feel giving up

Can anyone help me to configure my ngnix conf file. I keep getting 502 error when loading browser.


Submit an answer

This textbox defaults to using Markdown to format your answer.

You can type !ref in this text area to quickly search our full set of tutorials, documentation & marketplace offerings and insert the link!

Sign In or Sign Up to Answer

These answers are provided by our Community. If you find them useful, show some love by clicking the heart. If you run into issues leave a comment, or add your own answer to help others.

Want to learn more? Join the DigitalOcean Community!

Join our DigitalOcean community of over a million developers for free! Get help and share knowledge in Q&A, subscribe to topics of interest, and get courses and tools that will help you grow as a developer and scale your project or business.

Does some requests work and then it stops or does not work at all? my guess is that the container web server does not answer for some reason.

Can you dump the generated nginx config docker exec <nginx-proxy-container-id> cat /etc/nginx/conf.d/default.conf.

Also maybe useful to try to publish the web container port and access it without going thru nginx-proxy.