我一直在尝试配置 vh 文件,但最终出现此错误:
我从 unicorns github 网站上抓取了这个配置文件。
# This is example contains the bare mininum to get nginx going with
# Unicorn or Rainbows! servers. Generally these configuration settings
# are applicable to other HTTP application servers (and not just Ruby
# ones), so if you have one working well for proxying another app
# server, feel free to continue using it.
#
# The only setting we feel strongly about is the fail_timeout=0
# directive in the "upstream" block. max_fails=0 also has the same
# effect as fail_timeout=0 for current versions of nginx and may be
# used in its place.
#
# Users are strongly encouraged to refer to nginx documentation for more
# details and search for other example configs.
# you generally only need one nginx worker unless you're serving
# large amounts of static files which require blocking disk reads
worker_processes 1;
# # drop privileges, root is needed on most systems for binding to port 80
# # (or anything < 1024). Capability-based security may be available for
# # your system and worth checking out so you won't need to be root to
# # start nginx to bind on 80
user nobody nogroup; # for systems with a "nogroup"
# user nobody nobody; # for systems with "nobody" as a group instead
# Feel free to change all paths to suite your needs here, of course
pid /tmp/nginx.pid;
error_log /tmp/nginx.error.log;
events {
worker_connections 1024; # increase if you have lots of clients
accept_mutex off; # "on" if nginx worker_processes > 1
# use epoll; # enable for Linux 2.6+
# use kqueue; # enable for FreeBSD, OSX
}
http {
# nginx will find this file in the config directory set at nginx build time
include mime.types;
# fallback in case we can't determine a type
default_type application/octet-stream;
# click tracking!
access_log /tmp/nginx.access.log combined;
# you generally want to serve static files with nginx since neither
# Unicorn nor Rainbows! is optimized for it at the moment
sendfile on;
tcp_nopush on; # off may be better for *some* Comet/long-poll stuff
tcp_nodelay off; # on may be better for some Comet/long-poll stuff
# we haven't checked to see if Rack::Deflate on the app server is
# faster or not than doing compression via nginx. It's easier
# to configure it all in one place here for static files and also
# to disable gzip for clients who don't get gzip/deflate right.
# There are other gzip settings that may be needed used to deal with
# bad clients out there, see http://wiki.nginx.org/NginxHttpGzipModule
gzip on;
gzip_http_version 1.0;
gzip_proxied any;
gzip_min_length 500;
gzip_disable "MSIE [1-6]\.";
gzip_types text/plain text/html text/xml text/css
text/comma-separated-values
text/javascript application/x-javascript
application/atom+xml;
# this can be any application server, not just Unicorn/Rainbows!
upstream app_server {
# fail_timeout=0 means we always retry an upstream even if it failed
# to return a good HTTP response (in case the Unicorn master nukes a
# single worker for timing out).
# for UNIX domain socket setups:
server unix:/tmp/.sock fail_timeout=0;
# for TCP setups, point these to your backend servers
# server 192.168.0.7:8080 fail_timeout=0;
# server 192.168.0.8:8080 fail_timeout=0;
# server 192.168.0.9:8080 fail_timeout=0;
}
server {
# enable one of the following if you're on Linux or FreeBSD
# listen 80 default deferred; # for Linux
# listen 80 default accept_filter=httpready; # for FreeBSD
# If you have IPv6, you'll likely want to have two separate listeners.
# One on IPv4 only (the default), and another on IPv6 only instead
# of a single dual-stack listener. A dual-stack listener will make
# for ugly IPv4 addresses in $remote_addr (e.g ":ffff:10.0.0.1"
# instead of just "10.0.0.1") and potentially trigger bugs in
# some software.
# listen [::]:80 ipv6only=on; # deferred or accept_filter recommended
client_max_body_size 4G;
server_name _;
# ~2 seconds is often enough for most folks to parse HTML/CSS and
# retrieve needed images/icons/frames, connections are cheap in
# nginx so increasing this is generally safe...
keepalive_timeout 5;
# path for static files
root /path/to/app/current/public;
# Prefer to serve static files directly from nginx to avoid unnecessary
# data copies from the application server.
#
# try_files directive appeared in in nginx 0.7.27 and has stabilized
# over time. Older versions of nginx (e.g. 0.6.x) requires
# "if (!-f $request_filename)" which was less efficient:
# http://bogomips.org/unicorn.git/tree/examples/nginx.conf?id=v3.3.1#n127
try_files $uri/index.html $uri.html $uri @app;
location @app {
# an HTTP header important enough to have its own Wikipedia entry:
# http://en.wikipedia.org/wiki/X-Forwarded-For
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# enable this if you forward HTTPS traffic to unicorn,
# this helps Rack set the proper URL scheme for doing redirects:
# proxy_set_header X-Forwarded-Proto $scheme;
# pass the Host: header from the client right along so redirects
# can be set properly within the Rack application
proxy_set_header Host $http_host;
# we don't want nginx trying to do something clever with
# redirects, we set the Host: header above already.
proxy_redirect off;
# set "proxy_buffering off" *only* for Rainbows! when doing
# Comet/long-poll/streaming. It's also safe to set if you're using
# only serving fast clients with Unicorn + nginx, but not slow
# clients. You normally want nginx to buffer responses to slow
# clients, even with Rails 3.1 streaming because otherwise a slow
# client can become a bottleneck of Unicorn.
#
# The Rack application may also set "X-Accel-Buffering (yes|no)"
# in the response headers do disable/enable buffering on a
# per-response basis.
# proxy_buffering off;
proxy_pass http://app_server;
}
# Rails error pages
error_page 500 502 503 504 /500.html;
location = /500.html {
root /path/to/app/current/public;
}
}
}
我在尝试启动服务器时收到此错误,并不是说我已经将路径等更改为我自己的路径,但我什至没有到达那里......
Starting nginx: nginx: [emerg] "worker_processes" directive is not allowed here in /etc/nginx/sites-enabled/default.conf:18
我究竟做错了什么?我自己编译了 nginx 并将其安装到 /usr/local/... 这是我的 nginx.conf 文件:
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
include /etc/nginx/sites-enabled/*;
#gzip on;
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
root html;
index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443;
# server_name localhost;
# ssl on;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_timeout 5m;
# ssl_protocols SSLv2 SSLv3 TLSv1;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}
我也试过用独角兽配置文件覆盖 /usr/local/nginx/conf/nginx.conf 文件,但这无济于事......