Nginx Cache, redirect on miss/bypass

cachenginx

Is there possibility to setup nginx cache (nginx_proxy_cache) to handle MISS/BYPASS differently:

  1. cache exists -> use it
  2. cache MISSed/BYPASSed/UPDATing -> Redirect to backend server.

Behaviour Example

Why i need this?

I have 6 servers with static mp4 files (all clones) on SATA drives.
And bought new one with SSDs to test performance.

If i use standard nginx cache, yes, it will work… some hours later after it will download 1k x 10MB files from overloaded servers 🙂

I'm trying to achieve: "server if cached, redirect to backend(not BYPASS!) while caching/updating"

As asked in comments, if i visit URL that is not in cache, i'll get such response:

GET http://xxx/content/mp4/yyy/zzz.mp4

HTTP/1.1 200 OK
Server: nginx
Date: Fri, 16 Dec 2016 23:44:54 GMT
Content-Type: video/mp4
Content-Length: 25065219
Connection: keep-alive
Last-Modified: Mon, 22 Jun 2015 02:34:00 GMT
ETag: "55877418-17e7703"
X-Cache-Status: MISS
Accept-Ranges: bytes

As you can see, there is "Location" field which i want. And nginx currently "pass-through" video file through itself to browser as well to cache. If I do 1 more request, while it's "updating", it will pass-through again.

What i would like really to have (w/o involing php/ruby & etc) is such response:

GET http://xxx/content/mp4/yyy/zzz.mp4

HTTP/1.1 200 OK
Server: nginx
Date: Fri, 16 Dec 2016 23:44:54 GMT
Content-Type: video/mp4
Content-Length: 25065219
Connection: keep-alive
Last-Modified: Mon, 22 Jun 2015 02:34:00 GMT
ETag: "55877418-17e7703"
X-Cache-Status: MISS
Accept-Ranges: bytes
Location: http://backend/content/mp4/yyy/zzz.mp4

While keeping nginx to populate this file into cache.

Or maybe it will be simplier to implement this behaviour in Varnish?
Didn't tried yet.

nginx.conf

s001:/etc/nginx# cat nginx.conf

user XXX;
pid /run/nginx.pid;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
worker_rlimit_nofile 30000;
include /etc/nginx/modules-enabled/*.conf;

events {
    worker_connections 2048;
    multi_accept on;
}

http {
    server_tokens off;

    include /etc/nginx/mime.types;
    default_type application/octet-stream;

    log_format ups '$remote_addr $host [$time_local] "$request" "$upstream_response_time"';
    log_format IP '[$time_local] $http_referer $request $remote_addr';
    log_format vhost '$remote_addr $host $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$request_body" "$http_user_agent" upstrea_response "$upstream_response_time"';

    access_log /var/log/nginx/access.log vhost;

    sendfile on;
    sendfile_max_chunk 1m;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65s;

    client_body_buffer_size 8k;
    client_max_body_size 80M;
    client_body_timeout 60s;
    large_client_header_buffers 4 16k;
    send_timeout 60s;
    types_hash_max_size 2048;

    gzip off;
    gzip_static off;
    gzip_proxied expired no-cache no-store private auth;
    gzip_vary on;
    gzip_min_length 1100;
    gzip_buffers 64 8k;
    gzip_comp_level 2;
    gzip_http_version 1.1;
    gzip_proxied any;
    gzip_types text/plain application/xml application/x-javascript text/css video/x-flv application/octet-stream video/mpeg video/mp4;
    server_names_hash_bucket_size 64;

    proxy_connect_timeout 60s;
    proxy_read_timeout 60s;
    proxy_buffering on;
    proxy_force_ranges on;
    proxy_http_version 1.1;
    proxy_ignore_headers X-Accel-Expires Expires Cache-Control;
    proxy_pass_request_headers on;

    upstream uHDDs {
        server 172.16.0.5;
        server 172.16.0.6;
        server 172.16.0.7;
        server 172.16.0.8;
        server 172.16.0.9;
        server 172.16.0.10;
    }

    add_header X-Cache-Status $upstream_cache_status;

    proxy_cache_path /mnt/ssd2/nginx_cache levels=1:2 keys_zone=ssd2_nginx_cache:256m max_size=400g inactive=72h use_temp_path=off;

    # proxy_cache_key $scheme$proxy_host$uri;

    proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504;
    proxy_cache_valid any 24h;
    proxy_cache_min_uses 1;

    proxy_cache_lock on;
    proxy_cache_lock_age 180s;
    proxy_cache_lock_timeout 100ms;

    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/sites-enabled/*;
}

sites-enabled/0_default.conf

server {
    listen 80 default;
    server_name XXX;
    charset utf8;
    root /home/XXX/www/s1;

    proxy_bind 172.16.1.1;
    proxy_cache_key $uri;
    proxy_cache_bypass $cookie_nocache $arg_nocache;

    proxy_set_header X-Real-IP $remote_addr;
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_set_header X-Forwarded-Proto $scheme;

    location ^~ /v/ {
        rewrite ^/v/(\w+)/(.+)$ /content/$2 break;

        proxy_cache ssd2_nginx_cache;
        proxy_pass http://172.16.0.5$uri?$args;
    }

    location ^~ /content/ {
        proxy_cache ssd2_nginx_cache;
        proxy_pass http://172.16.0.5$uri?$args;
    }

    location /nginx_status {
        stub_status on;
        access_log off;
        allow 127.0.0.1;
        deny all;
    }
}

Best Answer

Nginx Solution: not found.

Varnish: can be done using pass/miss handlers.