[-]
[+]
|
Changed |
nginx.spec
|
|
[-]
[+]
|
Added |
check_1.2.6+.patch
^
|
@@ -0,0 +1,209 @@
+diff --git a/src/http/modules/ngx_http_upstream_ip_hash_module.c b/src/http/modules/ngx_http_upstream_ip_hash_module.c
+index 89ccc2b..a552044 100644
+--- a/src/http/modules/ngx_http_upstream_ip_hash_module.c
++++ b/src/http/modules/ngx_http_upstream_ip_hash_module.c
+@@ -9,6 +9,10 @@
+ #include <ngx_core.h>
+ #include <ngx_http.h>
+
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
++#endif
++
+
+ typedef struct {
+ /* the round robin data must be first */
+@@ -208,6 +212,12 @@ ngx_http_upstream_get_ip_hash_peer(ngx_peer_connection_t *pc, void *data)
+
+ if (!peer->down) {
+
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
++ "get ip_hash peer, check_index: %ui",
++ peer->check_index);
++ if (!ngx_http_check_peer_down(peer->check_index)) {
++#endif
+ if (peer->max_fails == 0 || peer->fails < peer->max_fails) {
+ break;
+ }
+@@ -216,6 +226,9 @@ ngx_http_upstream_get_ip_hash_peer(ngx_peer_connection_t *pc, void *data)
+ peer->checked = now;
+ break;
+ }
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ }
++#endif
+ }
+
+ iphp->rrp.tried[n] |= m;
+diff --git a/src/http/modules/ngx_http_upstream_least_conn_module.c b/src/http/modules/ngx_http_upstream_least_conn_module.c
+index 21156ae..c57393d 100644
+--- a/src/http/modules/ngx_http_upstream_least_conn_module.c
++++ b/src/http/modules/ngx_http_upstream_least_conn_module.c
+@@ -9,6 +9,10 @@
+ #include <ngx_core.h>
+ #include <ngx_http.h>
+
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
++#endif
++
+
+ typedef struct {
+ ngx_uint_t *conns;
+@@ -203,6 +207,16 @@ ngx_http_upstream_get_least_conn_peer(ngx_peer_connection_t *pc, void *data)
+ continue;
+ }
+
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
++ "get least_conn peer, check_index: %ui",
++ peer->check_index);
++
++ if (ngx_http_check_peer_down(peer->check_index)) {
++ continue;
++ }
++#endif
++
+ if (peer->max_fails
+ && peer->fails >= peer->max_fails
+ && now - peer->checked <= peer->fail_timeout)
+@@ -256,6 +270,16 @@ ngx_http_upstream_get_least_conn_peer(ngx_peer_connection_t *pc, void *data)
+ continue;
+ }
+
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
++ "get least_conn peer, check_index: %ui",
++ peer->check_index);
++
++ if (ngx_http_check_peer_down(peer->check_index)) {
++ continue;
++ }
++#endif
++
+ if (lcp->conns[i] * best->weight != lcp->conns[p] * peer->weight) {
+ continue;
+ }
+diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c
+index 4b78cff..f077b46 100644
+--- a/src/http/ngx_http_upstream_round_robin.c
++++ b/src/http/ngx_http_upstream_round_robin.c
+@@ -9,6 +9,9 @@
+ #include <ngx_core.h>
+ #include <ngx_http.h>
+
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
++#endif
+
+ static ngx_int_t ngx_http_upstream_cmp_servers(const void *one,
+ const void *two);
+@@ -87,7 +90,17 @@ ngx_http_upstream_init_round_robin(ngx_conf_t *cf,
+ peers->peer[n].weight = server[i].weight;
+ peers->peer[n].effective_weight = server[i].weight;
+ peers->peer[n].current_weight = 0;
+- n++;
++
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ if (!server[i].down) {
++ peers->peer[n].check_index =
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
++ }
++ else {
++ peers->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
++ }
++#endif
++ n++;
+ }
+ }
+
+@@ -145,6 +158,17 @@ ngx_http_upstream_init_round_robin(ngx_conf_t *cf,
+ backup->peer[n].max_fails = server[i].max_fails;
+ backup->peer[n].fail_timeout = server[i].fail_timeout;
+ backup->peer[n].down = server[i].down;
++
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ if (!server[i].down) {
++ backup->peer[n].check_index =
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
++ }
++ else {
++ backup->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
++ }
++#endif
++
+ n++;
+ }
+ }
+@@ -206,6 +230,9 @@ ngx_http_upstream_init_round_robin(ngx_conf_t *cf,
+ peers->peer[i].current_weight = 0;
+ peers->peer[i].max_fails = 1;
+ peers->peer[i].fail_timeout = 10;
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
++#endif
+ }
+
+ us->peer.data = peers;
+@@ -323,6 +350,9 @@ ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r,
+ peers->peer[0].current_weight = 0;
+ peers->peer[0].max_fails = 1;
+ peers->peer[0].fail_timeout = 10;
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ peers->peer[0].check_index = (ngx_uint_t) NGX_ERROR;
++#endif
+
+ } else {
+
+@@ -356,6 +386,9 @@ ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r,
+ peers->peer[i].current_weight = 0;
+ peers->peer[i].max_fails = 1;
+ peers->peer[i].fail_timeout = 10;
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
++#endif
+ }
+ }
+
+@@ -434,6 +467,12 @@ ngx_http_upstream_get_round_robin_peer(ngx_peer_connection_t *pc, void *data)
+ goto failed;
+ }
+
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ if (ngx_http_check_peer_down(peer->check_index)) {
++ goto failed;
++ }
++#endif
++
+ } else {
+
+ /* there are several peers */
+@@ -531,6 +570,12 @@ ngx_http_upstream_get_peer(ngx_http_upstream_rr_peer_data_t *rrp)
+ continue;
+ }
+
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ if (ngx_http_check_peer_down(peer->check_index)) {
++ continue;
++ }
++#endif
++
+ if (peer->max_fails
+ && peer->fails >= peer->max_fails
+ && now - peer->checked <= peer->fail_timeout)
+diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h
+index 3f8cbf8..1613168 100644
+--- a/src/http/ngx_http_upstream_round_robin.h
++++ b/src/http/ngx_http_upstream_round_robin.h
+@@ -30,6 +30,10 @@ typedef struct {
|
[-]
[+]
|
Added |
nginx-versiontilt.patch
^
|
@@ -0,0 +1,13 @@
+--- nginx-1.2.0/src/core/nginx.h 2012-04-23 14:54:14.000000000 +0200
++++ nginx-1.2.0/src/core/nginx.h~ 2012-04-25 16:20:55.000000000 +0200
+@@ -10,8 +10,8 @@
+
+
+ #define nginx_version 1002000
+-#define NGINX_VERSION "1.2.0"
+-#define NGINX_VER "nginx/" NGINX_VERSION
++#define NGINX_VERSION ""
++#define NGINX_VER "" NGINX_VERSION
+
+ #define NGINX_VAR "NGINX"
+ #define NGX_OLDPID_EXT ".oldbin"
|
|
Added |
nginx-1.4.7.tar.bz2
^
|
|
Added |
nginx-1.6.2.tar.bz2
^
|
|
Added |
nginx-rtmp-module-1.1.2.tar.bz2
^
|
|
Added |
nginx-rtmp-module-1.1.5.tar.bz2
^
|
|
Added |
nginx-sticky-module-ng-1.2.5.tar.gz
^
|
[-]
[+]
|
Added |
nginx.init.suse
^
|
@@ -0,0 +1,277 @@
+#!/bin/sh
+#
+# Copyright (C) 1995--2007 Marcus Rückert, SUSE / Novell Inc.
+#
+# This library is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2.1 of the License, or (at
+# your option) any later version.
+#
+# This library is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
+# USA.
+#
+# /etc/init.d/nginx
+# and its symbolic link
+# /(usr/)sbin/rcnginx
+#
+# Template system startup script for some example service/daemon nginx
+#
+# LSB compatible service control script; see http://www.linuxbase.org/spec/
+#
+# Note: This template uses functions rc_XXX defined in /etc/rc.status on
+# UnitedLinux/SUSE/Novell based Linux distributions. If you want to base your
+# script on this template and ensure that it works on non UL based LSB
+# compliant Linux distributions, you either have to provide the rc.status
+# functions from UL or change the script to work without them.
+# See skeleton.compat for a template that works with other distros as well.
+#
+### BEGIN INIT INFO
+# Provides: nginx
+# Required-Start: $syslog $remote_fs
+# Should-Start: $time ypbind sendmail
+# Required-Stop: $syslog $remote_fs
+# Should-Stop: $time ypbind sendmail
+# Default-Start: 3 5
+# Default-Stop: 0 1 2 6
+# Short-Description: nginx
+# Description: nginx
+### END INIT INFO
+#
+# Any extensions to the keywords given above should be preceeded by
+# X-VendorTag- (X-UnitedLinux- X-SuSE- for us) according to LSB.
+#
+# Notes on Required-Start/Should-Start:
+# * There are two different issues that are solved by Required-Start
+# and Should-Start
+# (a) Hard dependencies: This is used by the runlevel editor to determine
+# which services absolutely need to be started to make the start of
+# this service make sense. Example: nfsserver should have
+# Required-Start: $portmap
+# Also, required services are started before the dependent ones.
+# The runlevel editor will warn about such missing hard dependencies
+# and suggest enabling. During system startup, you may expect an error,
+# if the dependency is not fulfilled.
+# (b) Specifying the init script ordering, not real (hard) dependencies.
+# This is needed by insserv to determine which service should be
+# started first (and at a later stage what services can be started
+# in parallel). The tag Should-Start: is used for this.
+# It tells, that if a service is available, it should be started
+# before. If not, never mind.
+# * When specifying hard dependencies or ordering requirements, you can
+# use names of services (contents of their Provides: section)
+# or pseudo names starting with a $. The following ones are available
+# according to LSB (1.1):
+# $local_fs all local file systems are mounted
+# (most services should need this!)
+# $remote_fs all remote file systems are mounted
+# (note that /usr may be remote, so
+# many services should Require this!)
+# $syslog system logging facility up
+# $network low level networking (eth card, ...)
+# $named hostname resolution available
+# $netdaemons all network daemons are running
+# The $netdaemons pseudo service has been removed in LSB 1.2.
+# For now, we still offer it for backward compatibility.
+# These are new (LSB 1.2):
+# $time the system time has been set correctly
+# $portmap SunRPC portmapping service available
+# UnitedLinux extensions:
+# $ALL indicates that a script should be inserted
+# at the end
+# * The services specified in the stop tags
+# (Required-Stop/Should-Stop)
+# specify which services need to be still running when this service
+# is shut down. Often the entries there are just copies or a subset
+# from the respective start tag.
+# * Should-Start/Stop are now part of LSB as of 2.0,
+# formerly SUSE/Unitedlinux used X-UnitedLinux-Should-Start/-Stop.
+# insserv does support both variants.
+# * X-UnitedLinux-Default-Enabled: yes/no is used at installation time
+# (%fillup_and_insserv macro in %post of many RPMs) to specify whether
+# a startup script should default to be enabled after installation.
+# It's not used by insserv.
+#
+# Note on runlevels:
+# 0 - halt/poweroff 6 - reboot
+# 1 - single user 2 - multiuser without network exported
+# 3 - multiuser w/ network (text mode) 5 - multiuser w/ network and X11 (xdm)
+#
+# Note on script names:
+# http://www.linuxbase.org/spec/refspecs/LSB_1.3.0/gLSB/gLSB/scrptnames.html
+# A registry has been set up to manage the init script namespace.
+# http://www.lanana.org/
+# Please use the names already registered or register one or use a
+# vendor prefix.
+
+
+# Check for missing binaries (stale symlinks should not happen)
+# Note: Special treatment of stop for LSB conformance
+NGINX_BIN=/usr/sbin/nginx
+test -x $NGINX_BIN || { echo "$NGINX_BIN not installed";
+ if [ "$1" = "stop" ]; then exit 0;
+ else exit 5; fi; }
+
+# Check for existence of needed config file and read it
+#NGINX_CONFIG=/etc/sysconfig/nginx
+#test -r $NGINX_CONFIG || { echo "$NGINX_CONFIG not existing";
+# if [ "$1" = "stop" ]; then exit 0;
+# else exit 6; fi; }
+#
+# Read config
+#. $NGINX_CONFIG
+
+# Source LSB init functions
+# providing start_daemon, killproc, pidofproc,
+# log_success_msg, log_failure_msg and log_warning_msg.
+# This is currently not used by UnitedLinux based distributions and
+# not needed for init scripts for UnitedLinux only. If it is used,
+# the functions from rc.status should not be sourced or used.
+#. /lib/lsb/init-functions
+
+# Shell functions sourced from /etc/rc.status:
+# rc_check check and set local and overall rc status
+# rc_status check and set local and overall rc status
+# rc_status -v be verbose in local rc status and clear it afterwards
+# rc_status -v -r ditto and clear both the local and overall rc status
+# rc_status -s display "skipped" and exit with status 3
+# rc_status -u display "unused" and exit with status 3
+# rc_failed set local and overall rc status to failed
+# rc_failed <num> set local and overall rc status to <num>
+# rc_reset clear both the local and overall rc status
+# rc_exit exit appropriate to overall rc status
+# rc_active checks whether a service is activated by symlinks
+. /etc/rc.status
+
+# Reset status of this service
+rc_reset
+
+# Return values acc. to LSB for all commands but status:
+# 0 - success
+# 1 - generic or unspecified error
+# 2 - invalid or excess argument(s)
+# 3 - unimplemented feature (e.g. "reload")
+# 4 - user had insufficient privileges
+# 5 - program is not installed
+# 6 - program is not configured
+# 7 - program is not running
+# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl)
+#
+# Note that starting an already running service, stopping
+# or restarting a not-running service as well as the restart
+# with force-reload (in case signaling is not supported) are
+# considered a success.
+
+case "$1" in
+ start)
+ echo -n "Starting nginx "
+ ## Start daemon with startproc(8). If this fails
+ ## the return value is set appropriately by startproc.
+ /sbin/startproc $NGINX_BIN
+
+ # Remember status and be verbose
+ rc_status -v
+ ;;
+ stop)
+ echo -n "Shutting down nginx "
+ ## Stop daemon with killproc(8) and if this fails
+ ## killproc sets the return value according to LSB.
+
+ /sbin/killproc -TERM $NGINX_BIN
+
+ # Remember status and be verbose
+ rc_status -v
+ ;;
+ try-restart|condrestart)
+ ## Do a restart only if the service was active before.
+ ## Note: try-restart is now part of LSB (as of 1.9).
+ ## RH has a similar command named condrestart.
+ if test "$1" = "condrestart"; then
+ echo "${attn} Use try-restart ${done}(LSB)${attn} rather than condrestart ${warn}(RH)${norm}"
+ fi
+ $0 status
+ if test $? = 0; then
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/HEAD
^
|
@@ -1 +0,0 @@
-ref: refs/heads/master
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/config
^
|
@@ -1,12 +0,0 @@
-[core]
- repositoryformatversion = 0
- filemode = true
- bare = false
- logallrefupdates = true
- ignorecase = true
-[remote "origin"]
- fetch = +refs/heads/*:refs/remotes/origin/*
- url = https://github.com/yaoweibin/nginx_upstream_check_module.git
-[branch "master"]
- remote = origin
- merge = refs/heads/master
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/description
^
|
@@ -1 +0,0 @@
-Unnamed repository; edit this file 'description' to name the repository.
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/hooks
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/hooks/applypatch-msg.sample
^
|
@@ -1,15 +0,0 @@
-#!/bin/sh
-#
-# An example hook script to check the commit log message taken by
-# applypatch from an e-mail message.
-#
-# The hook should exit with non-zero status after issuing an
-# appropriate message if it wants to stop the commit. The hook is
-# allowed to edit the commit message file.
-#
-# To enable this hook, rename this file to "applypatch-msg".
-
-. git-sh-setup
-test -x "$GIT_DIR/hooks/commit-msg" &&
- exec "$GIT_DIR/hooks/commit-msg" ${1+"$@"}
-:
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/hooks/commit-msg.sample
^
|
@@ -1,24 +0,0 @@
-#!/bin/sh
-#
-# An example hook script to check the commit log message.
-# Called by "git commit" with one argument, the name of the file
-# that has the commit message. The hook should exit with non-zero
-# status after issuing an appropriate message if it wants to stop the
-# commit. The hook is allowed to edit the commit message file.
-#
-# To enable this hook, rename this file to "commit-msg".
-
-# Uncomment the below to add a Signed-off-by line to the message.
-# Doing this in a hook is a bad idea in general, but the prepare-commit-msg
-# hook is more suited to it.
-#
-# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p')
-# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1"
-
-# This example catches duplicate Signed-off-by lines.
-
-test "" = "$(grep '^Signed-off-by: ' "$1" |
- sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || {
- echo >&2 Duplicate Signed-off-by lines.
- exit 1
-}
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/hooks/post-update.sample
^
|
@@ -1,8 +0,0 @@
-#!/bin/sh
-#
-# An example hook script to prepare a packed repository for use over
-# dumb transports.
-#
-# To enable this hook, rename this file to "post-update".
-
-exec git update-server-info
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/hooks/pre-applypatch.sample
^
|
@@ -1,14 +0,0 @@
-#!/bin/sh
-#
-# An example hook script to verify what is about to be committed
-# by applypatch from an e-mail message.
-#
-# The hook should exit with non-zero status after issuing an
-# appropriate message if it wants to stop the commit.
-#
-# To enable this hook, rename this file to "pre-applypatch".
-
-. git-sh-setup
-test -x "$GIT_DIR/hooks/pre-commit" &&
- exec "$GIT_DIR/hooks/pre-commit" ${1+"$@"}
-:
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/hooks/pre-commit.sample
^
|
@@ -1,50 +0,0 @@
-#!/bin/sh
-#
-# An example hook script to verify what is about to be committed.
-# Called by "git commit" with no arguments. The hook should
-# exit with non-zero status after issuing an appropriate message if
-# it wants to stop the commit.
-#
-# To enable this hook, rename this file to "pre-commit".
-
-if git rev-parse --verify HEAD >/dev/null 2>&1
-then
- against=HEAD
-else
- # Initial commit: diff against an empty tree object
- against=4b825dc642cb6eb9a060e54bf8d69288fbee4904
-fi
-
-# If you want to allow non-ascii filenames set this variable to true.
-allownonascii=$(git config hooks.allownonascii)
-
-# Redirect output to stderr.
-exec 1>&2
-
-# Cross platform projects tend to avoid non-ascii filenames; prevent
-# them from being added to the repository. We exploit the fact that the
-# printable range starts at the space character and ends with tilde.
-if [ "$allownonascii" != "true" ] &&
- # Note that the use of brackets around a tr range is ok here, (it's
- # even required, for portability to Solaris 10's /usr/bin/tr), since
- # the square bracket bytes happen to fall in the designated range.
- test $(git diff --cached --name-only --diff-filter=A -z $against |
- LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0
-then
- echo "Error: Attempt to add a non-ascii file name."
- echo
- echo "This can cause problems if you want to work"
- echo "with people on other platforms."
- echo
- echo "To be portable it is advisable to rename the file ..."
- echo
- echo "If you know what you are doing you can disable this"
- echo "check using:"
- echo
- echo " git config hooks.allownonascii true"
- echo
- exit 1
-fi
-
-# If there are whitespace errors, print the offending file names and fail.
-exec git diff-index --check --cached $against --
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/hooks/pre-rebase.sample
^
|
@@ -1,169 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2006, 2008 Junio C Hamano
-#
-# The "pre-rebase" hook is run just before "git rebase" starts doing
-# its job, and can prevent the command from running by exiting with
-# non-zero status.
-#
-# The hook is called with the following parameters:
-#
-# $1 -- the upstream the series was forked from.
-# $2 -- the branch being rebased (or empty when rebasing the current branch).
-#
-# This sample shows how to prevent topic branches that are already
-# merged to 'next' branch from getting rebased, because allowing it
-# would result in rebasing already published history.
-
-publish=next
-basebranch="$1"
-if test "$#" = 2
-then
- topic="refs/heads/$2"
-else
- topic=`git symbolic-ref HEAD` ||
- exit 0 ;# we do not interrupt rebasing detached HEAD
-fi
-
-case "$topic" in
-refs/heads/??/*)
- ;;
-*)
- exit 0 ;# we do not interrupt others.
- ;;
-esac
-
-# Now we are dealing with a topic branch being rebased
-# on top of master. Is it OK to rebase it?
-
-# Does the topic really exist?
-git show-ref -q "$topic" || {
- echo >&2 "No such branch $topic"
- exit 1
-}
-
-# Is topic fully merged to master?
-not_in_master=`git rev-list --pretty=oneline ^master "$topic"`
-if test -z "$not_in_master"
-then
- echo >&2 "$topic is fully merged to master; better remove it."
- exit 1 ;# we could allow it, but there is no point.
-fi
-
-# Is topic ever merged to next? If so you should not be rebasing it.
-only_next_1=`git rev-list ^master "^$topic" ${publish} | sort`
-only_next_2=`git rev-list ^master ${publish} | sort`
-if test "$only_next_1" = "$only_next_2"
-then
- not_in_topic=`git rev-list "^$topic" master`
- if test -z "$not_in_topic"
- then
- echo >&2 "$topic is already up-to-date with master"
- exit 1 ;# we could allow it, but there is no point.
- else
- exit 0
- fi
-else
- not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"`
- /usr/bin/perl -e '
- my $topic = $ARGV[0];
- my $msg = "* $topic has commits already merged to public branch:\n";
- my (%not_in_next) = map {
- /^([0-9a-f]+) /;
- ($1 => 1);
- } split(/\n/, $ARGV[1]);
- for my $elem (map {
- /^([0-9a-f]+) (.*)$/;
- [$1 => $2];
- } split(/\n/, $ARGV[2])) {
- if (!exists $not_in_next{$elem->[0]}) {
- if ($msg) {
- print STDERR $msg;
- undef $msg;
- }
- print STDERR " $elem->[1]\n";
- }
- }
- ' "$topic" "$not_in_next" "$not_in_master"
- exit 1
-fi
-
-exit 0
-
-################################################################
-
-This sample hook safeguards topic branches that have been
-published from being rewound.
-
-The workflow assumed here is:
-
- * Once a topic branch forks from "master", "master" is never
- merged into it again (either directly or indirectly).
-
- * Once a topic branch is fully cooked and merged into "master",
- it is deleted. If you need to build on top of it to correct
- earlier mistakes, a new topic branch is created by forking at
- the tip of the "master". This is not strictly necessary, but
- it makes it easier to keep your history simple.
-
- * Whenever you need to test or publish your changes to topic
- branches, merge them into "next" branch.
-
-The script, being an example, hardcodes the publish branch name
-to be "next", but it is trivial to make it configurable via
-$GIT_DIR/config mechanism.
-
-With this workflow, you would want to know:
-
-(1) ... if a topic branch has ever been merged to "next". Young
- topic branches can have stupid mistakes you would rather
- clean up before publishing, and things that have not been
- merged into other branches can be easily rebased without
- affecting other people. But once it is published, you would
- not want to rewind it.
-
-(2) ... if a topic branch has been fully merged to "master".
- Then you can delete it. More importantly, you should not
- build on top of it -- other people may already want to
- change things related to the topic as patches against your
- "master", so if you need further changes, it is better to
- fork the topic (perhaps with the same name) afresh from the
- tip of "master".
-
-Let's look at this example:
-
- o---o---o---o---o---o---o---o---o---o "next"
- / / / /
- / a---a---b A / /
- / / / /
- / / c---c---c---c B /
- / / / \ /
- / / / b---b C \ /
- / / / / \ /
- ---o---o---o---o---o---o---o---o---o---o---o "master"
-
-
-A, B and C are topic branches.
-
- * A has one fix since it was merged up to "next".
-
- * B has finished. It has been fully merged up to "master" and "next",
- and is ready to be deleted.
-
- * C has not merged to "next" at all.
-
-We would want to allow C to be rebased, refuse A, and encourage
-B to be deleted.
-
-To compute (1):
-
- git rev-list ^master ^topic next
- git rev-list ^master next
-
- if these match, topic has not merged in next at all.
-
-To compute (2):
-
- git rev-list master..topic
-
- if this is empty, it is fully merged to "master".
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/hooks/prepare-commit-msg.sample
^
|
@@ -1,36 +0,0 @@
-#!/bin/sh
-#
-# An example hook script to prepare the commit log message.
-# Called by "git commit" with the name of the file that has the
-# commit message, followed by the description of the commit
-# message's source. The hook's purpose is to edit the commit
-# message file. If the hook fails with a non-zero status,
-# the commit is aborted.
-#
-# To enable this hook, rename this file to "prepare-commit-msg".
-
-# This hook includes three examples. The first comments out the
-# "Conflicts:" part of a merge commit.
-#
-# The second includes the output of "git diff --name-status -r"
-# into the message, just before the "git status" output. It is
-# commented because it doesn't cope with --amend or with squashed
-# commits.
-#
-# The third example adds a Signed-off-by line to the message, that can
-# still be edited. This is rarely a good idea.
-
-case "$2,$3" in
- merge,)
- /usr/bin/perl -i.bak -ne 's/^/# /, s/^# #/#/ if /^Conflicts/ .. /#/; print' "$1" ;;
-
-# ,|template,)
-# /usr/bin/perl -i.bak -pe '
-# print "\n" . `git diff --cached --name-status -r`
-# if /^#/ && $first++ == 0' "$1" ;;
-
- *) ;;
-esac
-
-# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p')
-# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1"
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/hooks/update.sample
^
|
@@ -1,128 +0,0 @@
-#!/bin/sh
-#
-# An example hook script to blocks unannotated tags from entering.
-# Called by "git receive-pack" with arguments: refname sha1-old sha1-new
-#
-# To enable this hook, rename this file to "update".
-#
-# Config
-# ------
-# hooks.allowunannotated
-# This boolean sets whether unannotated tags will be allowed into the
-# repository. By default they won't be.
-# hooks.allowdeletetag
-# This boolean sets whether deleting tags will be allowed in the
-# repository. By default they won't be.
-# hooks.allowmodifytag
-# This boolean sets whether a tag may be modified after creation. By default
-# it won't be.
-# hooks.allowdeletebranch
-# This boolean sets whether deleting branches will be allowed in the
-# repository. By default they won't be.
-# hooks.denycreatebranch
-# This boolean sets whether remotely creating branches will be denied
-# in the repository. By default this is allowed.
-#
-
-# --- Command line
-refname="$1"
-oldrev="$2"
-newrev="$3"
-
-# --- Safety check
-if [ -z "$GIT_DIR" ]; then
- echo "Don't run this script from the command line." >&2
- echo " (if you want, you could supply GIT_DIR then run" >&2
- echo " $0 <ref> <oldrev> <newrev>)" >&2
- exit 1
-fi
-
-if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then
- echo "Usage: $0 <ref> <oldrev> <newrev>" >&2
- exit 1
-fi
-
-# --- Config
-allowunannotated=$(git config --bool hooks.allowunannotated)
-allowdeletebranch=$(git config --bool hooks.allowdeletebranch)
-denycreatebranch=$(git config --bool hooks.denycreatebranch)
-allowdeletetag=$(git config --bool hooks.allowdeletetag)
-allowmodifytag=$(git config --bool hooks.allowmodifytag)
-
-# check for no description
-projectdesc=$(sed -e '1q' "$GIT_DIR/description")
-case "$projectdesc" in
-"Unnamed repository"* | "")
- echo "*** Project description file hasn't been set" >&2
- exit 1
- ;;
-esac
-
-# --- Check types
-# if $newrev is 0000...0000, it's a commit to delete a ref.
-zero="0000000000000000000000000000000000000000"
-if [ "$newrev" = "$zero" ]; then
- newrev_type=delete
-else
- newrev_type=$(git cat-file -t $newrev)
-fi
-
-case "$refname","$newrev_type" in
- refs/tags/*,commit)
- # un-annotated tag
- short_refname=${refname##refs/tags/}
- if [ "$allowunannotated" != "true" ]; then
- echo "*** The un-annotated tag, $short_refname, is not allowed in this repository" >&2
- echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2
- exit 1
- fi
- ;;
- refs/tags/*,delete)
- # delete tag
- if [ "$allowdeletetag" != "true" ]; then
- echo "*** Deleting a tag is not allowed in this repository" >&2
- exit 1
- fi
- ;;
- refs/tags/*,tag)
- # annotated tag
- if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1
- then
- echo "*** Tag '$refname' already exists." >&2
- echo "*** Modifying a tag is not allowed in this repository." >&2
- exit 1
- fi
- ;;
- refs/heads/*,commit)
- # branch
- if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then
- echo "*** Creating a branch is not allowed in this repository" >&2
- exit 1
- fi
- ;;
- refs/heads/*,delete)
- # delete branch
- if [ "$allowdeletebranch" != "true" ]; then
- echo "*** Deleting a branch is not allowed in this repository" >&2
- exit 1
- fi
- ;;
- refs/remotes/*,commit)
- # tracking branch
- ;;
- refs/remotes/*,delete)
- # delete tracking branch
- if [ "$allowdeletebranch" != "true" ]; then
- echo "*** Deleting a tracking branch is not allowed in this repository" >&2
- exit 1
- fi
- ;;
- *)
- # Anything else (is there anything else?)
- echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2
- exit 1
- ;;
-esac
-
-# --- Finished
-exit 0
|
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/index
^
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/info
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/info/exclude
^
|
@@ -1,6 +0,0 @@
-# git ls-files --others --exclude-from=.git/info/exclude
-# Lines that start with '#' are comments.
-# For a project mostly in C, the following would be a good set of
-# exclude patterns (uncomment them if you want to use them):
-# *.[oa]
-# *~
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/logs
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/logs/HEAD
^
|
@@ -1 +0,0 @@
-0000000000000000000000000000000000000000 fdda58751149473cc32e4adc95dd2892c8a1bc49 U-arrakis\jg <jg@arrakis.(none)> 1420042220 +0100 clone: from https://github.com/yaoweibin/nginx_upstream_check_module.git
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/logs/refs
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/logs/refs/heads
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/logs/refs/heads/master
^
|
@@ -1 +0,0 @@
-0000000000000000000000000000000000000000 fdda58751149473cc32e4adc95dd2892c8a1bc49 U-arrakis\jg <jg@arrakis.(none)> 1420042220 +0100 clone: from https://github.com/yaoweibin/nginx_upstream_check_module.git
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/objects
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/objects/info
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/objects/pack
^
|
-(directory)
|
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/objects/pack/pack-94943f89e97721f966da20d991fa6f18b4d73510.idx
^
|
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/objects/pack/pack-94943f89e97721f966da20d991fa6f18b4d73510.pack
^
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/packed-refs
^
|
@@ -1,20 +0,0 @@
-# pack-refs with: peeled
-7d73c60f5ec51e55ad06a2eaec6f68829d3f5996 refs/remotes/origin/development
-b1cd212fc06c3e4decc73fe2e8f388b3956271c8 refs/remotes/origin/for_adam
-669f88ddc370ad170f622cdc32882e9922be25ea refs/remotes/origin/legacy
-fdda58751149473cc32e4adc95dd2892c8a1bc49 refs/remotes/origin/master
-0f5e85efc9424b64eea3ee3f22fa1e1c3b0a7055 refs/remotes/origin/pure_version
-4f1617f7193e435af9e880d8d065fc4350ff9141 refs/tags/0.1.8
-^99524efeb68a80f789827c35af654c16110f277d
-6bb3618fb6175b5e52a152b4f78fabc2021ac21c refs/tags/v0.1.4
-^4bd5f761c5f2f63053737e7e7d041acfe64ab72a
-e384764692d77f8e34631da700112388f71eb871 refs/tags/v0.1.5
-^63ca24bc040ca02fd3e3aa71eed65342b48c2fdc
-33328bb7768497cdf21135338f44f8ff94433e91 refs/tags/v0.1.6
-^0ee0616c1a6272b7c351b43fe20f221cd1c53ebd
-47f7a667fcf4a4f5c35c99f0367485e09a42e458 refs/tags/v0.1.9
-^2662fe4f2b6238364ca5457e3b8f28c261de6106
-0f52429f7eb7650876b086e9e16baae6bdb27803 refs/tags/v0.2.0
-^669f88ddc370ad170f622cdc32882e9922be25ea
-1e1a195e4b5c728417a7e19911773a4fe2eda95e refs/tags/v0.3.0
-^3dc59023c308b2ea6198fac987efa0f407b5aa81
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/refs
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/refs/heads
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/refs/heads/master
^
|
@@ -1 +0,0 @@
-fdda58751149473cc32e4adc95dd2892c8a1bc49
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/refs/remotes
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/refs/remotes/origin
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/refs/remotes/origin/HEAD
^
|
@@ -1 +0,0 @@
-ref: refs/remotes/origin/master
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/.git/refs/tags
^
|
-(directory)
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/README
^
|
@@ -1,6 +1,5 @@
Name
- nginx_http_upstream_check_module - support upstream health check with
- Nginx
+ nginx_http_upstream_check_module - support health check with Nginx
Synopsis
http {
@@ -11,12 +10,12 @@
server 192.168.0.1:80;
server 192.168.0.2:80;
- check interval=5000 rise=1 fall=3 timeout=4000;
+ check interval=3000 rise=2 fall=5 timeout=1000;
#check interval=3000 rise=2 fall=5 timeout=1000 type=ssl_hello;
#check interval=3000 rise=2 fall=5 timeout=1000 type=http;
- #check_http_send "HEAD / HTTP/1.0\r\n\r\n";
+ #check_http_send "GET / HTTP/1.0\r\n\r\n";
#check_http_expect_alive http_2xx http_3xx;
}
@@ -45,7 +44,7 @@
check
syntax: *check interval=milliseconds [fall=count] [rise=count]
[timeout=milliseconds] [default_down=true|false]
- [type=tcp|http|ssl_hello|mysql|ajp|fastcgi]*
+ [type=tcp|http|ssl_hello|mysql|ajp]*
default: *none, if parameters omitted, default parameters are
interval=30000 fall=5 rise=2 timeout=1000 default_down=true type=tcp*
@@ -69,10 +68,6 @@
* *default_down*: set initial state of backend server, default is
down.
- * *port*: specify the check port in the backend servers. It can be
- different with the original servers port. Default the port is 0 and
- it means the same as the original backend server.
-
* *type*: the check protocol type:
1. *tcp* is a simple tcp socket connect and peek one byte.
@@ -89,9 +84,6 @@
5. *ajp* sends a AJP Cping packet, receives and parses the AJP
Cpong response to diagnose if the upstream server is alive.
- 6. *fastcgi* send a fastcgi request, receives and parses the
- fastcgi response to diagnose if the upstream server is alive.
-
check_http_send
syntax: *check_http_send http_packet*
@@ -113,36 +105,10 @@
description: These status codes indicate the upstream server's http
response is ok, the backend is alive.
- check_keepalive_requests
- syntax: *check_keepalive_requests num*
-
- default: *check_keepalive_requests 1*
-
- context: *upstream*
-
- description: The directive specifies the number of requests sent on a
- connection, the default vaule 1 indicates that nginx will certainly
- close the connection after a request.
-
- check_fastcgi_param
- Syntax: *check_fastcgi_params parameter value*
-
- default: see below
-
- context: *upstream*
-
- description: If you set the check type is fastcgi, then the check
- function will sends this fastcgi headers to check the upstream server.
- The default directive looks like:
-
- check_fastcgi_param "REQUEST_METHOD" "GET";
- check_fastcgi_param "REQUEST_URI" "/";
- check_fastcgi_param "SCRIPT_FILENAME" "index.php";
-
check_shm_size
syntax: *check_shm_size size*
- default: *1M*
+ default: *1m*
context: *http*
@@ -151,7 +117,7 @@
enlarge it with this directive.
check_status
- syntax: *check_status [html|csv|json]*
+ syntax: *check_status*
default: *none*
@@ -160,60 +126,6 @@
description: Display the health checking servers' status by HTTP. This
directive should be set in the http block.
- You can specify the default display format. The formats can be `html`,
- `csv` or `json`. The default type is `html`. It also supports to specify
- the format by the request argument. Suppose your `check_status` location
- is '/status', the argument of `format` can change the display page's
- format. You can do like this:
-
- /status?format=html
- /status?format=csv
- /status?format=json
-
- At present, you can fetch the list of servers with the same status by
- the argument of `status`. For example:
-
- /status?format=html&status=down
- /status?format=csv&status=up
-
- Below it's the sample html page:
-
- <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
- <title>Nginx http upstream check status</title>
- <h1>Nginx http upstream check status</h1>
- <h2>Check upstream server number: 1, generation: 3</h2>
- <th>Index</th>
- <th>Upstream</th>
- <th>Name</th>
- <th>Status</th>
- <th>Rise counts</th>
- <th>Fall counts</th>
- <th>Check type</th>
- <th>Check port</th>
- <td>0</td>
- <td>backend</td>
- <td>106.187.48.116:80</td>
- <td>up</td>
- <td>39</td>
- <td>0</td>
- <td>http</td>
- <td>80</td>
-
- Below it's the sample of csv page:
-
- 0,backend,106.187.48.116:80,up,46,0,http,80
-
- Below it's the sample of json page:
-
- {"servers": {
- "total": 1,
- "generation": 3,
- "server": [
- {"index": 0, "upstream": "backend", "name": "106.187.48.116:80", "status": "up", "rise": 58, "fall": 0, "type": "http", "port": 80}
- ]
- }}
-
Installation
Download the latest version of the release tarball of this module from
github (<http://github.com/yaoweibin/nginx_upstream_check_module>)
@@ -243,12 +155,6 @@
If you use nginx-1.2.6+ or nginx-1.3.9+, It adjusted the round robin
module. You should use the patch named 'check_1.2.6+.patch'.
- If you use nginx-1.5.12+, You should use the patch named
- 'check_1.5.12+.patch'.
-
- If you use nginx-1.7.2+, You should use the patch named
- 'check_1.7.2+.patch'.
-
The patch just adds the support for the official Round-Robin, Ip_hash
and least_conn upstream module. But it's easy to expand my module to
other upstream modules. See the patch for detail.
@@ -286,13 +192,6 @@
TODO
Known Issues
Changelogs
- v0.3
- * support keepalive check requests
-
- * fastcgi check requests
-
- * json/csv check status page support
-
v0.1
* first release
@@ -310,15 +209,9 @@
This module is licensed under the BSD license.
- Copyright (C) 2014 by Weibin Yao <yaoweibin@gmail.com>
-
- Copyright (C) 2010-2014 Alibaba Group Holding Limited
-
- Copyright (C) 2014 by LiangBin Li
-
- Copyright (C) 2014 by Zhuo Yuan
+ Copyright (C) 2012 by Weibin Yao <yaoweibin@gmail.com>.
- Copyright (C) 2012 by Matthieu Tourne
+ Copyright (C) 2012 by Matthieu Tourne.
All rights reserved.
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/check.patch
^
|
@@ -6,8 +6,8 @@
#include <ngx_core.h>
#include <ngx_http.h>
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
+
@@ -17,11 +17,11 @@
if (!peer->down) {
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
+ "get ip_hash peer, check_index: %ui",
+ peer->check_index);
-+ if (!ngx_http_upstream_check_peer_down(peer->check_index)) {
++ if (!ngx_http_check_peer_down(peer->check_index)) {
+#endif
if (peer->max_fails == 0 || peer->fails < peer->max_fails) {
break;
@@ -30,7 +30,7 @@
peer->fails = 0;
break;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ }
+#endif
}
@@ -44,8 +44,8 @@
#include <ngx_core.h>
#include <ngx_http.h>
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
static ngx_int_t ngx_http_upstream_cmp_servers(const void *one,
@@ -55,10 +55,10 @@
peers->peer[n].weight = server[i].down ? 0 : server[i].weight;
peers->peer[n].current_weight = peers->peer[n].weight;
+
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ if (!server[i].down) {
+ peers->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
+ }
+ else {
+ peers->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
@@ -73,10 +73,10 @@
backup->peer[n].fail_timeout = server[i].fail_timeout;
backup->peer[n].down = server[i].down;
+
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ if (!server[i].down) {
+ backup->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
+ }
+ else {
+ backup->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
@@ -90,7 +90,7 @@
peers->peer[i].current_weight = 1;
peers->peer[i].max_fails = 1;
peers->peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
}
@@ -100,7 +100,7 @@
peers->peer[0].current_weight = 1;
peers->peer[0].max_fails = 1;
peers->peer[0].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[0].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
@@ -110,7 +110,7 @@
peers->peer[i].current_weight = 1;
peers->peer[i].max_fails = 1;
peers->peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
}
@@ -121,8 +121,8 @@
if (rrp->peers->single) {
peer = &rrp->peers->peer[0];
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ return NGX_BUSY;
+ }
+#endif
@@ -133,11 +133,11 @@
if (!peer->down) {
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
+ "get rr peer, check_index: %ui",
+ peer->check_index);
-+ if (!ngx_http_upstream_check_peer_down(peer->check_index)) {
++ if (!ngx_http_check_peer_down(peer->check_index)) {
+#endif
if (peer->max_fails == 0
|| peer->fails < peer->max_fails)
@@ -146,7 +146,7 @@
peer->fails = 0;
break;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ }
+#endif
@@ -156,11 +156,11 @@
if (!peer->down) {
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
+ "get rr peer2, check_index: %ui",
+ peer->check_index);
-+ if (!ngx_http_upstream_check_peer_down(peer->check_index)) {
++ if (!ngx_http_check_peer_down(peer->check_index)) {
+#endif
if (peer->max_fails == 0
|| peer->fails < peer->max_fails)
@@ -169,7 +169,7 @@
peer->fails = 0;
break;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ }
+#endif
@@ -183,7 +183,7 @@
ngx_uint_t max_fails;
time_t fail_timeout;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_uint_t check_index;
+#endif
+
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/check_1.2.1.patch
^
|
@@ -6,8 +6,8 @@
#include <ngx_core.h>
#include <ngx_http.h>
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
+
@@ -17,11 +17,11 @@
if (!peer->down) {
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
+ "get ip_hash peer, check_index: %ui",
+ peer->check_index);
-+ if (!ngx_http_upstream_check_peer_down(peer->check_index)) {
++ if (!ngx_http_check_peer_down(peer->check_index)) {
+#endif
if (peer->max_fails == 0 || peer->fails < peer->max_fails) {
break;
@@ -30,7 +30,7 @@
peer->checked = now;
break;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ }
+#endif
}
@@ -44,8 +44,8 @@
#include <ngx_core.h>
#include <ngx_http.h>
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
static ngx_int_t ngx_http_upstream_cmp_servers(const void *one,
@@ -56,10 +56,10 @@
peers->peer[n].current_weight = 0;
- n++;
+
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ if (!server[i].down) {
+ peers->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
+ }
+ else {
+ peers->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
@@ -74,10 +74,10 @@
backup->peer[n].fail_timeout = server[i].fail_timeout;
backup->peer[n].down = server[i].down;
+
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ if (!server[i].down) {
+ backup->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
+ }
+ else {
+ backup->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
@@ -91,7 +91,7 @@
peers->peer[i].current_weight = 0;
peers->peer[i].max_fails = 1;
peers->peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
}
@@ -101,7 +101,7 @@
peers->peer[0].current_weight = 0;
peers->peer[0].max_fails = 1;
peers->peer[0].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[0].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
@@ -111,7 +111,7 @@
peers->peer[i].current_weight = 0;
peers->peer[i].max_fails = 1;
peers->peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
}
@@ -122,8 +122,8 @@
if (rrp->peers->single) {
peer = &rrp->peers->peer[0];
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ return NGX_BUSY;
+ }
+#endif
@@ -134,8 +134,8 @@
continue;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ continue;
+ }
+#endif
@@ -151,7 +151,7 @@
ngx_uint_t max_fails;
time_t fail_timeout;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_uint_t check_index;
+#endif
+
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/check_1.2.2+.patch
^
|
@@ -6,8 +6,8 @@
#include <ngx_core.h>
#include <ngx_http.h>
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
+
@@ -17,11 +17,11 @@
if (!peer->down) {
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
+ "get ip_hash peer, check_index: %ui",
+ peer->check_index);
-+ if (!ngx_http_upstream_check_peer_down(peer->check_index)) {
++ if (!ngx_http_check_peer_down(peer->check_index)) {
+#endif
if (peer->max_fails == 0 || peer->fails < peer->max_fails) {
break;
@@ -30,7 +30,7 @@
peer->checked = now;
break;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ }
+#endif
}
@@ -44,8 +44,8 @@
#include <ngx_core.h>
#include <ngx_http.h>
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
+
@@ -55,12 +55,12 @@
continue;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
+ "get least_conn peer, check_index: %ui",
+ peer->check_index);
+
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ continue;
+ }
+#endif
@@ -72,12 +72,12 @@
continue;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
+ "get least_conn peer, check_index: %ui",
+ peer->check_index);
+
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ continue;
+ }
+#endif
@@ -93,8 +93,8 @@
#include <ngx_core.h>
#include <ngx_http.h>
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
static ngx_int_t ngx_http_upstream_cmp_servers(const void *one,
@@ -105,10 +105,10 @@
peers->peer[n].current_weight = 0;
- n++;
+
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ if (!server[i].down) {
+ peers->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
+ }
+ else {
+ peers->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
@@ -123,10 +123,10 @@
backup->peer[n].fail_timeout = server[i].fail_timeout;
backup->peer[n].down = server[i].down;
+
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ if (!server[i].down) {
+ backup->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
+ }
+ else {
+ backup->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
@@ -140,7 +140,7 @@
peers->peer[i].current_weight = 0;
peers->peer[i].max_fails = 1;
peers->peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
}
@@ -150,7 +150,7 @@
peers->peer[0].current_weight = 0;
peers->peer[0].max_fails = 1;
peers->peer[0].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[0].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
@@ -160,7 +160,7 @@
peers->peer[i].current_weight = 0;
peers->peer[i].max_fails = 1;
peers->peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
}
@@ -171,8 +171,8 @@
if (rrp->peers->single) {
peer = &rrp->peers->peer[0];
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ return NGX_BUSY;
+ }
+#endif
@@ -183,8 +183,8 @@
continue;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ continue;
+ }
+#endif
@@ -200,7 +200,7 @@
ngx_uint_t max_fails;
time_t fail_timeout;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_uint_t check_index;
+#endif
+
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/check_1.2.6+.patch
^
|
@@ -6,8 +6,8 @@
#include <ngx_core.h>
#include <ngx_http.h>
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
+
@@ -17,11 +17,11 @@
if (!peer->down) {
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
+ "get ip_hash peer, check_index: %ui",
+ peer->check_index);
-+ if (!ngx_http_upstream_check_peer_down(peer->check_index)) {
++ if (!ngx_http_check_peer_down(peer->check_index)) {
+#endif
if (peer->max_fails == 0 || peer->fails < peer->max_fails) {
break;
@@ -30,7 +30,7 @@
peer->checked = now;
break;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ }
+#endif
}
@@ -44,8 +44,8 @@
#include <ngx_core.h>
#include <ngx_http.h>
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
+
@@ -55,12 +55,12 @@
continue;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
+ "get least_conn peer, check_index: %ui",
+ peer->check_index);
+
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ continue;
+ }
+#endif
@@ -72,12 +72,12 @@
continue;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
+ "get least_conn peer, check_index: %ui",
+ peer->check_index);
+
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ continue;
+ }
+#endif
@@ -93,8 +93,8 @@
#include <ngx_core.h>
#include <ngx_http.h>
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
static ngx_int_t ngx_http_upstream_cmp_servers(const void *one,
@@ -105,10 +105,10 @@
peers->peer[n].current_weight = 0;
- n++;
+
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ if (!server[i].down) {
+ peers->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
+ }
+ else {
+ peers->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
@@ -123,10 +123,10 @@
backup->peer[n].fail_timeout = server[i].fail_timeout;
backup->peer[n].down = server[i].down;
+
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ if (!server[i].down) {
+ backup->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
+ }
+ else {
+ backup->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
@@ -140,7 +140,7 @@
peers->peer[i].current_weight = 0;
peers->peer[i].max_fails = 1;
peers->peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
}
@@ -150,7 +150,7 @@
peers->peer[0].current_weight = 0;
peers->peer[0].max_fails = 1;
peers->peer[0].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[0].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
@@ -160,7 +160,7 @@
peers->peer[i].current_weight = 0;
peers->peer[i].max_fails = 1;
peers->peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
}
@@ -170,8 +170,8 @@
goto failed;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ goto failed;
+ }
+#endif
@@ -183,8 +183,8 @@
continue;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ continue;
+ }
+#endif
@@ -200,7 +200,7 @@
ngx_uint_t max_fails;
time_t fail_timeout;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_uint_t check_index;
+#endif
+
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/check_1.5.12+.patch
^
|
@@ -1,198 +0,0 @@
-diff --git a/src/http/modules/ngx_http_upstream_ip_hash_module.c b/src/http/modules/ngx_http_upstream_ip_hash_module.c
-index 041883f..b1bc7d0 100644
---- a/src/http/modules/ngx_http_upstream_ip_hash_module.c
-+++ b/src/http/modules/ngx_http_upstream_ip_hash_module.c
-@@ -9,6 +9,10 @@
- #include <ngx_core.h>
- #include <ngx_http.h>
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
-+#endif
-+
-
- typedef struct {
- /* the round robin data must be first */
-@@ -212,6 +216,15 @@ ngx_http_upstream_get_ip_hash_peer(ngx_peer_connection_t *pc, void *data)
- goto next_try;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
-+ "get ip_hash peer, check_index: %ui",
-+ peer->check_index);
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ goto next_try;
-+ }
-+#endif
-+
- if (peer->max_fails
- && peer->fails >= peer->max_fails
- && now - peer->checked <= peer->fail_timeout)
-diff --git a/src/http/modules/ngx_http_upstream_least_conn_module.c b/src/http/modules/ngx_http_upstream_least_conn_module.c
-index dbef95d..dc9b518 100644
---- a/src/http/modules/ngx_http_upstream_least_conn_module.c
-+++ b/src/http/modules/ngx_http_upstream_least_conn_module.c
-@@ -9,6 +9,10 @@
- #include <ngx_core.h>
- #include <ngx_http.h>
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
-+#endif
-+
-
- typedef struct {
- ngx_uint_t *conns;
-@@ -203,6 +207,16 @@ ngx_http_upstream_get_least_conn_peer(ngx_peer_connection_t *pc, void *data)
- continue;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
-+ "get least_conn peer, check_index: %ui",
-+ peer->check_index);
-+
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ continue;
-+ }
-+#endif
-+
- if (peer->max_fails
- && peer->fails >= peer->max_fails
- && now - peer->checked <= peer->fail_timeout)
-@@ -256,6 +270,16 @@ ngx_http_upstream_get_least_conn_peer(ngx_peer_connection_t *pc, void *data)
- continue;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
-+ "get least_conn peer, check_index: %ui",
-+ peer->check_index);
-+
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ continue;
-+ }
-+#endif
-+
- if (lcp->conns[i] * best->weight != lcp->conns[p] * peer->weight) {
- continue;
- }
-diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c
-index 85ff558..2fe9bb6 100644
---- a/src/http/ngx_http_upstream_round_robin.c
-+++ b/src/http/ngx_http_upstream_round_robin.c
-@@ -9,6 +9,9 @@
- #include <ngx_core.h>
- #include <ngx_http.h>
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
-+#endif
-
- static ngx_http_upstream_rr_peer_t *ngx_http_upstream_get_peer(
- ngx_http_upstream_rr_peer_data_t *rrp);
-@@ -85,6 +88,14 @@ ngx_http_upstream_init_round_robin(ngx_conf_t *cf,
- peers->peer[n].max_fails = server[i].max_fails;
- peers->peer[n].fail_timeout = server[i].fail_timeout;
- peers->peer[n].down = server[i].down;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (!server[i].down) {
-+ peers->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
-+ } else {
-+ peers->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
-+ }
-+#endif
- n++;
- }
- }
-@@ -139,6 +150,17 @@ ngx_http_upstream_init_round_robin(ngx_conf_t *cf,
- backup->peer[n].max_fails = server[i].max_fails;
- backup->peer[n].fail_timeout = server[i].fail_timeout;
- backup->peer[n].down = server[i].down;
-+
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (!server[i].down) {
-+ backup->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
-+ }
-+ else {
-+ backup->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
-+ }
-+#endif
-+
- n++;
- }
- }
-@@ -196,6 +218,9 @@ ngx_http_upstream_init_round_robin(ngx_conf_t *cf,
- peers->peer[i].current_weight = 0;
- peers->peer[i].max_fails = 1;
- peers->peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
-+#endif
- }
-
- us->peer.data = peers;
-@@ -302,6 +327,9 @@ ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r,
- peers->peer[0].current_weight = 0;
- peers->peer[0].max_fails = 1;
- peers->peer[0].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ peers->peer[0].check_index = (ngx_uint_t) NGX_ERROR;
-+#endif
-
- } else {
-
-@@ -342,6 +370,9 @@ ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r,
- peers->peer[i].current_weight = 0;
- peers->peer[i].max_fails = 1;
- peers->peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
-+#endif
- }
- }
-
-@@ -399,6 +430,12 @@ ngx_http_upstream_get_round_robin_peer(ngx_peer_connection_t *pc, void *data)
- goto failed;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ goto failed;
-+ }
-+#endif
-+
- } else {
-
- /* there are several peers */
-@@ -498,6 +535,12 @@ ngx_http_upstream_get_peer(ngx_http_upstream_rr_peer_data_t *rrp)
- continue;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ continue;
-+ }
-+#endif
-+
- if (peer->max_fails
- && peer->fails >= peer->max_fails
- && now - peer->checked <= peer->fail_timeout)
-diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h
-index ea90ab9..a6fb33f 100644
---- a/src/http/ngx_http_upstream_round_robin.h
-+++ b/src/http/ngx_http_upstream_round_robin.h
-@@ -30,6 +30,10 @@ typedef struct {
- ngx_uint_t max_fails;
- time_t fail_timeout;
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ ngx_uint_t check_index;
-+#endif
-+
- ngx_uint_t down; /* unsigned down:1; */
-
- #if (NGX_HTTP_SSL)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/check_1.7.2+.patch
^
|
@@ -1,195 +0,0 @@
-diff --git a/src/http/modules/ngx_http_upstream_ip_hash_module.c b/src/http/modules/ngx_http_upstream_ip_hash_module.c
-index 148d73a..913e395 100644
---- a/src/http/modules/ngx_http_upstream_ip_hash_module.c
-+++ b/src/http/modules/ngx_http_upstream_ip_hash_module.c
-@@ -9,6 +9,9 @@
- #include <ngx_core.h>
- #include <ngx_http.h>
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
-+#endif
-
- typedef struct {
- /* the round robin data must be first */
-@@ -212,6 +215,15 @@ ngx_http_upstream_get_ip_hash_peer(ngx_peer_connection_t *pc, void *data)
- goto next_try;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
-+ "get ip_hash peer, check_index: %ui",
-+ peer->check_index);
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ goto next_try;
-+ }
-+#endif
-+
- if (peer->max_fails
- && peer->fails >= peer->max_fails
- && now - peer->checked <= peer->fail_timeout)
-diff --git a/src/http/modules/ngx_http_upstream_least_conn_module.c b/src/http/modules/ngx_http_upstream_least_conn_module.c
-index dbef95d..bbabb68 100644
---- a/src/http/modules/ngx_http_upstream_least_conn_module.c
-+++ b/src/http/modules/ngx_http_upstream_least_conn_module.c
-@@ -9,6 +9,9 @@
- #include <ngx_core.h>
- #include <ngx_http.h>
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
-+#endif
-
- typedef struct {
- ngx_uint_t *conns;
-@@ -203,6 +206,16 @@ ngx_http_upstream_get_least_conn_peer(ngx_peer_connection_t *pc, void *data)
- continue;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
-+ "get least_conn peer, check_index: %ui",
-+ peer->check_index);
-+
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ continue;
-+ }
-+#endif
-+
- if (peer->max_fails
- && peer->fails >= peer->max_fails
- && now - peer->checked <= peer->fail_timeout)
-@@ -256,6 +269,16 @@ ngx_http_upstream_get_least_conn_peer(ngx_peer_connection_t *pc, void *data)
- continue;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
-+ "get least_conn peer, check_index: %ui",
-+ peer->check_index);
-+
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ continue;
-+ }
-+#endif
-+
- if (lcp->conns[i] * best->weight != lcp->conns[p] * peer->weight) {
- continue;
- }
-diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c
-index 37c835c..54aa44d 100644
---- a/src/http/ngx_http_upstream_round_robin.c
-+++ b/src/http/ngx_http_upstream_round_robin.c
-@@ -9,6 +9,9 @@
- #include <ngx_core.h>
- #include <ngx_http.h>
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
-+#endif
-
- static ngx_http_upstream_rr_peer_t *ngx_http_upstream_get_peer(
- ngx_http_upstream_rr_peer_data_t *rrp);
-@@ -88,6 +91,14 @@ ngx_http_upstream_init_round_robin(ngx_conf_t *cf,
- peer[n].fail_timeout = server[i].fail_timeout;
- peer[n].down = server[i].down;
- peer[n].server = server[i].name;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (!server[i].down) {
-+ peers->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
-+ } else {
-+ peers->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
-+ }
-+#endif
- n++;
- }
- }
-@@ -144,6 +155,15 @@ ngx_http_upstream_init_round_robin(ngx_conf_t *cf,
- peer[n].fail_timeout = server[i].fail_timeout;
- peer[n].down = server[i].down;
- peer[n].server = server[i].name;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (!server[i].down) {
-+ backup->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
-+ }
-+ else {
-+ backup->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
-+ }
-+#endif
- n++;
- }
- }
-@@ -203,6 +223,9 @@ ngx_http_upstream_init_round_robin(ngx_conf_t *cf,
- peer[i].current_weight = 0;
- peer[i].max_fails = 1;
- peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
-+#endif
- }
-
- us->peer.data = peers;
-@@ -312,7 +335,9 @@ ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r,
- peer[0].current_weight = 0;
- peer[0].max_fails = 1;
- peer[0].fail_timeout = 10;
--
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ peers->peer[0].check_index = (ngx_uint_t) NGX_ERROR;
-+#endif
- } else {
-
- for (i = 0; i < ur->naddrs; i++) {
-@@ -352,6 +377,9 @@ ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r,
- peer[i].current_weight = 0;
- peer[i].max_fails = 1;
- peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
-+#endif
- }
- }
-
-@@ -411,6 +439,12 @@ ngx_http_upstream_get_round_robin_peer(ngx_peer_connection_t *pc, void *data)
- goto failed;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ goto failed;
-+ }
-+#endif
-+
- } else {
-
- /* there are several peers */
-@@ -508,6 +542,12 @@ ngx_http_upstream_get_peer(ngx_http_upstream_rr_peer_data_t *rrp)
- continue;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ continue;
-+ }
-+#endif
-+
- if (peer->max_fails
- && peer->fails >= peer->max_fails
- && now - peer->checked <= peer->fail_timeout)
-diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h
-index 9db82a6..6e19a65 100644
---- a/src/http/ngx_http_upstream_round_robin.h
-+++ b/src/http/ngx_http_upstream_round_robin.h
-@@ -31,6 +31,10 @@ typedef struct {
- ngx_uint_t max_fails;
- time_t fail_timeout;
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ ngx_uint_t check_index;
-+#endif
-+
- ngx_uint_t down; /* unsigned down:1; */
-
- #if (NGX_HTTP_SSL)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/check_1.7.5+.patch
^
|
@@ -1,195 +0,0 @@
-diff --git a/src/http/modules/ngx_http_upstream_ip_hash_module.c b/src/http/modules/ngx_http_upstream_ip_hash_module.c
-index 148d73a..913e395 100644
---- a/src/http/modules/ngx_http_upstream_ip_hash_module.c
-+++ b/src/http/modules/ngx_http_upstream_ip_hash_module.c
-@@ -9,6 +9,9 @@
- #include <ngx_core.h>
- #include <ngx_http.h>
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
-+#endif
-
- typedef struct {
- /* the round robin data must be first */
-@@ -212,6 +215,15 @@ ngx_http_upstream_get_ip_hash_peer(ngx_peer_connection_t *pc, void *data)
- goto next_try;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
-+ "get ip_hash peer, check_index: %ui",
-+ peer->check_index);
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ goto next_try;
-+ }
-+#endif
-+
- if (peer->max_fails
- && peer->fails >= peer->max_fails
- && now - peer->checked <= peer->fail_timeout)
-diff --git a/src/http/modules/ngx_http_upstream_least_conn_module.c b/src/http/modules/ngx_http_upstream_least_conn_module.c
-index 623bc9b..a223839 100644
---- a/src/http/modules/ngx_http_upstream_least_conn_module.c
-+++ b/src/http/modules/ngx_http_upstream_least_conn_module.c
-@@ -9,6 +9,9 @@
- #include <ngx_core.h>
- #include <ngx_http.h>
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
-+#endif
-
- typedef struct {
- ngx_uint_t *conns;
-@@ -203,6 +206,16 @@ ngx_http_upstream_get_least_conn_peer(ngx_peer_connection_t *pc, void *data)
- continue;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
-+ "get least_conn peer, check_index: %ui",
-+ peer->check_index);
-+
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ continue;
-+ }
-+#endif
-+
- if (peer->max_fails
- && peer->fails >= peer->max_fails
- && now - peer->checked <= peer->fail_timeout)
-@@ -256,6 +269,16 @@ ngx_http_upstream_get_least_conn_peer(ngx_peer_connection_t *pc, void *data)
- continue;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
-+ "get least_conn peer, check_index: %ui",
-+ peer->check_index);
-+
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ continue;
-+ }
-+#endif
-+
- if (lcp->conns[i] * best->weight != lcp->conns[p] * peer->weight) {
- continue;
- }
-diff --git a/src/http/ngx_http_upstream_round_robin.c b/src/http/ngx_http_upstream_round_robin.c
-index 2d0649b..b9789eb 100644
---- a/src/http/ngx_http_upstream_round_robin.c
-+++ b/src/http/ngx_http_upstream_round_robin.c
-@@ -9,6 +9,9 @@
- #include <ngx_core.h>
- #include <ngx_http.h>
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
-+#endif
-
- #define ngx_http_upstream_tries(p) ((p)->number \
- + ((p)->next ? (p)->next->number : 0))
-@@ -92,6 +95,14 @@ ngx_http_upstream_init_round_robin(ngx_conf_t *cf,
- peer[n].fail_timeout = server[i].fail_timeout;
- peer[n].down = server[i].down;
- peer[n].server = server[i].name;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (!server[i].down) {
-+ peers->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
-+ } else {
-+ peers->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
-+ }
-+#endif
- n++;
- }
- }
-@@ -148,6 +159,15 @@ ngx_http_upstream_init_round_robin(ngx_conf_t *cf,
- peer[n].fail_timeout = server[i].fail_timeout;
- peer[n].down = server[i].down;
- peer[n].server = server[i].name;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (!server[i].down) {
-+ backup->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
-+ }
-+ else {
-+ backup->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
-+ }
-+#endif
- n++;
- }
- }
-@@ -207,6 +227,9 @@ ngx_http_upstream_init_round_robin(ngx_conf_t *cf,
- peer[i].current_weight = 0;
- peer[i].max_fails = 1;
- peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
-+#endif
- }
-
- us->peer.data = peers;
-@@ -316,7 +339,9 @@ ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r,
- peer[0].current_weight = 0;
- peer[0].max_fails = 1;
- peer[0].fail_timeout = 10;
--
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ peers->peer[0].check_index = (ngx_uint_t) NGX_ERROR;
-+#endif
- } else {
-
- for (i = 0; i < ur->naddrs; i++) {
-@@ -356,6 +381,9 @@ ngx_http_upstream_create_round_robin_peer(ngx_http_request_t *r,
- peer[i].current_weight = 0;
- peer[i].max_fails = 1;
- peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
-+#endif
- }
- }
-
-@@ -415,6 +443,12 @@ ngx_http_upstream_get_round_robin_peer(ngx_peer_connection_t *pc, void *data)
- goto failed;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ goto failed;
-+ }
-+#endif
-+
- } else {
-
- /* there are several peers */
-@@ -507,6 +541,12 @@ ngx_http_upstream_get_peer(ngx_http_upstream_rr_peer_data_t *rrp)
- continue;
- }
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
-+ continue;
-+ }
-+#endif
-+
- if (peer->max_fails
- && peer->fails >= peer->max_fails
- && now - peer->checked <= peer->fail_timeout)
-diff --git a/src/http/ngx_http_upstream_round_robin.h b/src/http/ngx_http_upstream_round_robin.h
-index 9db82a6..6e19a65 100644
---- a/src/http/ngx_http_upstream_round_robin.h
-+++ b/src/http/ngx_http_upstream_round_robin.h
-@@ -31,6 +31,10 @@ typedef struct {
- ngx_uint_t max_fails;
- time_t fail_timeout;
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ ngx_uint_t check_index;
-+#endif
-+
- ngx_uint_t down; /* unsigned down:1; */
-
- #if (NGX_HTTP_SSL)
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/config
^
|
@@ -3,13 +3,13 @@
ngx_feature_run=no
ngx_feature_incs=
ngx_feature_path="$ngx_addon_dir"
-ngx_feature_deps="$ngx_addon_dir/ngx_http_upstream_check_module.h"
-ngx_check_src="$ngx_addon_dir/ngx_http_upstream_check_module.c"
+ngx_feature_deps="$ngx_addon_dir/ngx_http_upstream_check_module.h $ngx_addon_dir/ngx_http_upstream_check_handler.h"
+ngx_check_src="$ngx_addon_dir/ngx_http_upstream_check_module.c $ngx_addon_dir/ngx_http_upstream_check_handler.c"
ngx_feature_test="int a;"
. auto/feature
if [ $ngx_found = yes ]; then
- have=NGX_HTTP_UPSTREAM_CHECK . auto/have
+ have=NGX_UPSTREAM_CHECK_MODULE . auto/have
CORE_INCS="$CORE_INCS $ngx_feature_path"
ngx_addon_name=ngx_http_upstream_check_module
HTTP_MODULES="$HTTP_MODULES ngx_http_upstream_check_module"
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/doc/README.txt
^
|
@@ -1,6 +1,5 @@
Name
- nginx_http_upstream_check_module - support upstream health check with
- Nginx
+ nginx_http_upstream_check_module - support health check with Nginx
Synopsis
http {
@@ -11,12 +10,12 @@
server 192.168.0.1:80;
server 192.168.0.2:80;
- check interval=5000 rise=1 fall=3 timeout=4000;
+ check interval=3000 rise=2 fall=5 timeout=1000;
#check interval=3000 rise=2 fall=5 timeout=1000 type=ssl_hello;
#check interval=3000 rise=2 fall=5 timeout=1000 type=http;
- #check_http_send "HEAD / HTTP/1.0\r\n\r\n";
+ #check_http_send "GET / HTTP/1.0\r\n\r\n";
#check_http_expect_alive http_2xx http_3xx;
}
@@ -45,7 +44,7 @@
check
syntax: *check interval=milliseconds [fall=count] [rise=count]
[timeout=milliseconds] [default_down=true|false]
- [type=tcp|http|ssl_hello|mysql|ajp|fastcgi]*
+ [type=tcp|http|ssl_hello|mysql|ajp]*
default: *none, if parameters omitted, default parameters are
interval=30000 fall=5 rise=2 timeout=1000 default_down=true type=tcp*
@@ -69,10 +68,6 @@
* *default_down*: set initial state of backend server, default is
down.
- * *port*: specify the check port in the backend servers. It can be
- different with the original servers port. Default the port is 0 and
- it means the same as the original backend server.
-
* *type*: the check protocol type:
1. *tcp* is a simple tcp socket connect and peek one byte.
@@ -89,9 +84,6 @@
5. *ajp* sends a AJP Cping packet, receives and parses the AJP
Cpong response to diagnose if the upstream server is alive.
- 6. *fastcgi* send a fastcgi request, receives and parses the
- fastcgi response to diagnose if the upstream server is alive.
-
check_http_send
syntax: *check_http_send http_packet*
@@ -113,36 +105,10 @@
description: These status codes indicate the upstream server's http
response is ok, the backend is alive.
- check_keepalive_requests
- syntax: *check_keepalive_requests num*
-
- default: *check_keepalive_requests 1*
-
- context: *upstream*
-
- description: The directive specifies the number of requests sent on a
- connection, the default vaule 1 indicates that nginx will certainly
- close the connection after a request.
-
- check_fastcgi_param
- Syntax: *check_fastcgi_params parameter value*
-
- default: see below
-
- context: *upstream*
-
- description: If you set the check type is fastcgi, then the check
- function will sends this fastcgi headers to check the upstream server.
- The default directive looks like:
-
- check_fastcgi_param "REQUEST_METHOD" "GET";
- check_fastcgi_param "REQUEST_URI" "/";
- check_fastcgi_param "SCRIPT_FILENAME" "index.php";
-
check_shm_size
syntax: *check_shm_size size*
- default: *1M*
+ default: *1m*
context: *http*
@@ -151,7 +117,7 @@
enlarge it with this directive.
check_status
- syntax: *check_status [html|csv|json]*
+ syntax: *check_status*
default: *none*
@@ -160,60 +126,6 @@
description: Display the health checking servers' status by HTTP. This
directive should be set in the http block.
- You can specify the default display format. The formats can be `html`,
- `csv` or `json`. The default type is `html`. It also supports to specify
- the format by the request argument. Suppose your `check_status` location
- is '/status', the argument of `format` can change the display page's
- format. You can do like this:
-
- /status?format=html
- /status?format=csv
- /status?format=json
-
- At present, you can fetch the list of servers with the same status by
- the argument of `status`. For example:
-
- /status?format=html&status=down
- /status?format=csv&status=up
-
- Below it's the sample html page:
-
- <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
- <title>Nginx http upstream check status</title>
- <h1>Nginx http upstream check status</h1>
- <h2>Check upstream server number: 1, generation: 3</h2>
- <th>Index</th>
- <th>Upstream</th>
- <th>Name</th>
- <th>Status</th>
- <th>Rise counts</th>
- <th>Fall counts</th>
- <th>Check type</th>
- <th>Check port</th>
- <td>0</td>
- <td>backend</td>
- <td>106.187.48.116:80</td>
- <td>up</td>
- <td>39</td>
- <td>0</td>
- <td>http</td>
- <td>80</td>
-
- Below it's the sample of csv page:
-
- 0,backend,106.187.48.116:80,up,46,0,http,80
-
- Below it's the sample of json page:
-
- {"servers": {
- "total": 1,
- "generation": 3,
- "server": [
- {"index": 0, "upstream": "backend", "name": "106.187.48.116:80", "status": "up", "rise": 58, "fall": 0, "type": "http", "port": 80}
- ]
- }}
-
Installation
Download the latest version of the release tarball of this module from
github (<http://github.com/yaoweibin/nginx_upstream_check_module>)
@@ -243,12 +155,6 @@
If you use nginx-1.2.6+ or nginx-1.3.9+, It adjusted the round robin
module. You should use the patch named 'check_1.2.6+.patch'.
- If you use nginx-1.5.12+, You should use the patch named
- 'check_1.5.12+.patch'.
-
- If you use nginx-1.7.2+, You should use the patch named
- 'check_1.7.2+.patch'.
-
The patch just adds the support for the official Round-Robin, Ip_hash
and least_conn upstream module. But it's easy to expand my module to
other upstream modules. See the patch for detail.
@@ -286,13 +192,6 @@
TODO
Known Issues
Changelogs
- v0.3
- * support keepalive check requests
-
- * fastcgi check requests
-
- * json/csv check status page support
-
v0.1
* first release
@@ -310,15 +209,9 @@
This module is licensed under the BSD license.
- Copyright (C) 2014 by Weibin Yao <yaoweibin@gmail.com>
-
- Copyright (C) 2010-2014 Alibaba Group Holding Limited
-
- Copyright (C) 2014 by LiangBin Li
-
- Copyright (C) 2014 by Zhuo Yuan
+ Copyright (C) 2012 by Weibin Yao <yaoweibin@gmail.com>.
- Copyright (C) 2012 by Matthieu Tourne
+ Copyright (C) 2012 by Matthieu Tourne.
All rights reserved.
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/doc/README.wiki
^
|
@@ -1,6 +1,6 @@
= Name =
-'''nginx_http_upstream_check_module''' - support upstream health check with Nginx
+'''nginx_http_upstream_check_module''' - support health check with Nginx
= Synopsis =
@@ -14,12 +14,12 @@
server 192.168.0.1:80;
server 192.168.0.2:80;
- check interval=5000 rise=1 fall=3 timeout=4000;
+ check interval=3000 rise=2 fall=5 timeout=1000;
#check interval=3000 rise=2 fall=5 timeout=1000 type=ssl_hello;
#check interval=3000 rise=2 fall=5 timeout=1000 type=http;
- #check_http_send "HEAD / HTTP/1.0\r\n\r\n";
+ #check_http_send "GET / HTTP/1.0\r\n\r\n";
#check_http_expect_alive http_2xx http_3xx;
}
@@ -50,29 +50,27 @@
== check ==
-'''syntax:''' ''check interval=milliseconds [fall=count] [rise=count] [timeout=milliseconds] [default_down=true|false] [type=tcp|http|ssl_hello|mysql|ajp|fastcgi]''
+'''syntax:''' ''check interval=milliseconds [fall=count] [rise=count] [timeout=milliseconds] [default_down=true|false] [type=tcp|http|ssl_hello|mysql|ajp]''
'''default:''' ''none, if parameters omitted, default parameters are interval=30000 fall=5 rise=2 timeout=1000 default_down=true type=tcp''
'''context:''' ''upstream''
-'''description:''' Add the health check for the upstream servers.
+'''description:''' Add the health check for the upstream servers.
The parameters' meanings are:
* ''interval'': the check request's interval time.
-* ''fall''(fall_count): After fall_count check failures, the server is marked down.
-* ''rise''(rise_count): After rise_count check success, the server is marked up.
+* ''fall''(fall_count): After fall_count check failures, the server is marked down.
+* ''rise''(rise_count): After rise_count check success, the server is marked up.
* ''timeout'': the check request's timeout.
* ''default_down'': set initial state of backend server, default is down.
-* ''port'': specify the check port in the backend servers. It can be different with the original servers port. Default the port is 0 and it means the same as the original backend server.
* ''type'': the check protocol type:
-# ''tcp'' is a simple tcp socket connect and peek one byte.
+# ''tcp'' is a simple tcp socket connect and peek one byte.
# ''ssl_hello'' sends a client ssl hello packet and receives the server ssl hello packet.
-# ''http'' sends a http request packet, receives and parses the http response to diagnose if the upstream server is alive.
-# ''mysql'' connects to the mysql server, receives the greeting response to diagnose if the upstream server is alive.
-# ''ajp'' sends a AJP Cping packet, receives and parses the AJP Cpong response to diagnose if the upstream server is alive.
-# ''fastcgi'' send a fastcgi request, receives and parses the fastcgi response to diagnose if the upstream server is alive.
+# ''http'' sends a http request packet, receives and parses the http response to diagnose if the upstream server is alive.
+# ''mysql'' connects to the mysql server, receives the greeting response to diagnose if the upstream server is alive.
+# ''ajp'' sends a AJP Cping packet, receives and parses the AJP Cpong response to diagnose if the upstream server is alive.
== check_http_send ==
@@ -94,37 +92,11 @@
'''description:''' These status codes indicate the upstream server's http response is ok, the backend is alive.
-== check_keepalive_requests ==
-
-'''syntax:''' ''check_keepalive_requests num''
-
-'''default:''' ''check_keepalive_requests 1''
-
-'''context:''' ''upstream''
-
-'''description:''' The directive specifies the number of requests sent on a connection, the default vaule 1 indicates that nginx will certainly close the connection after a request.
-
-== check_fastcgi_param ==
-
-'''Syntax:''' ''check_fastcgi_params parameter value''
-
-'''default:''' see below
-
-'''context:''' ''upstream''
-
-'''description:''' If you set the check type is fastcgi, then the check function will sends this fastcgi headers to check the upstream server. The default directive looks like:
-
-<geshi lang="nginx">
- check_fastcgi_param "REQUEST_METHOD" "GET";
- check_fastcgi_param "REQUEST_URI" "/";
- check_fastcgi_param "SCRIPT_FILENAME" "index.php";
-</geshi>
-
== check_shm_size ==
'''syntax:''' ''check_shm_size size''
-'''default:''' ''1M''
+'''default:''' ''1m''
'''context:''' ''http''
@@ -132,7 +104,7 @@
== check_status ==
-'''syntax:''' ''check_status [html|csv|json]''
+'''syntax:''' ''check_status''
'''default:''' ''none''
@@ -140,77 +112,6 @@
'''description:''' Display the health checking servers' status by HTTP. This directive should be set in the http block.
-You can specify the default display format. The formats can be `html`, `csv` or `json`. The default type is `html`. It also supports to specify the format by the request argument. Suppose your `check_status` location is '/status', the argument of `format` can change the display page's format. You can do like this:
-
-<geshi lang="bash">
- /status?format=html
- /status?format=csv
- /status?format=json
-</geshi>
-
-At present, you can fetch the list of servers with the same status by the argument of `status`. For example:
-
-<geshi lang="bash">
- /status?format=html&status=down
- /status?format=csv&status=up
-</geshi>
-
-Below it's the sample html page:
-
-<geshi lang="bash">
- <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
- <html xmlns="http://www.w3.org/1999/xhtml">
- <head>
- <title>Nginx http upstream check status</title>
- </head>
- <body>
- <h1>Nginx http upstream check status</h1>
- <h2>Check upstream server number: 1, generation: 3</h2>
- <table style="background-color:white" cellspacing="0" cellpadding="3" border="1">
- <tr bgcolor="#C0C0C0">
- <th>Index</th>
- <th>Upstream</th>
- <th>Name</th>
- <th>Status</th>
- <th>Rise counts</th>
- <th>Fall counts</th>
- <th>Check type</th>
- <th>Check port</th>
- </tr>
- <tr>
- <td>0</td>
- <td>backend</td>
- <td>106.187.48.116:80</td>
- <td>up</td>
- <td>39</td>
- <td>0</td>
- <td>http</td>
- <td>80</td>
- </tr>
- </table>
- </body>
- </html>
-
-Below it's the sample of csv page:
-
-<geshi lang="bash">
- 0,backend,106.187.48.116:80,up,46,0,http,80
-</geshi>
-
-Below it's the sample of json page:
-
-<geshi lang="bash">
- {"servers": {
- "total": 1,
- "generation": 3,
- "server": [
- {"index": 0, "upstream": "backend", "name": "106.187.48.116:80", "status": "up", "rise": 58, "fall": 0, "type": "http", "port": 80}
- ]
- }}
-</geshi>
-
-
= Installation =
Download the latest version of the release tarball of this module from [http://github.com/yaoweibin/nginx_upstream_check_module github]
@@ -237,10 +138,6 @@
If you use nginx-1.2.6+ or nginx-1.3.9+, It adjusted the round robin module. You should use the patch named 'check_1.2.6+.patch'.
-If you use nginx-1.5.12+, You should use the patch named 'check_1.5.12+.patch'.
-
-If you use nginx-1.7.2+, You should use the patch named 'check_1.7.2+.patch'.
-
The patch just adds the support for the official Round-Robin, Ip_hash and least_conn upstream module. But it's easy to expand my module to other upstream modules. See the patch for detail.
If you want to add the support for upstream fair module, you can do it like this:
@@ -268,8 +165,8 @@
</geshi>
Note that, the nginx-sticky-module also needs the original check.patch.
-
-
+
+
= Compatibility =
* The module version 0.1.5 should be compatibility with 0.7.67+
@@ -283,11 +180,6 @@
= Changelogs =
-== v0.3 ==
-* support keepalive check requests
-* fastcgi check requests
-* json/csv check status page support
-
== v0.1 ==
* first release
@@ -295,7 +187,7 @@
Weibin Yao(姚伟斌) ''yaoweibin at gmail dot com''
-Matthieu Tourne
+Matthieu Tourne
= Copyright & License =
@@ -306,15 +198,9 @@
This module is licensed under the BSD license.
-Copyright (C) 2014 by Weibin Yao <yaoweibin@gmail.com>
-
-Copyright (C) 2010-2014 Alibaba Group Holding Limited
-
-Copyright (C) 2014 by LiangBin Li
-
-Copyright (C) 2014 by Zhuo Yuan
+Copyright (C) 2012 by Weibin Yao <yaoweibin@gmail.com>.
-Copyright (C) 2012 by Matthieu Tourne
+Copyright (C) 2012 by Matthieu Tourne.
All rights reserved.
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/nginx-sticky-module.patch
^
|
@@ -6,8 +6,8 @@
#include "ngx_http_sticky_misc.h"
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
+
+
@@ -18,12 +18,12 @@
return NGX_BUSY;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
+ "get sticky peer, check_index: %ui",
+ peer->check_index);
+
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ return NGX_BUSY;
+ }
+#endif
@@ -35,12 +35,12 @@
/* ensure the peer is not marked as down */
if (!peer->down) {
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
+ "get sticky peer, check_index: %ui",
+ peer->check_index);
+
-+ if (!ngx_http_upstream_check_peer_down(peer->check_index)) {
++ if (!ngx_http_check_peer_down(peer->check_index)) {
+#endif
+
/* if it's not failedi, use it */
@@ -50,7 +50,7 @@
/* mark the peer as tried */
iphp->rrp.tried[n] |= m;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ }
+#endif
}
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/nginx-tests
^
|
-(directory)
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/nginx-tests/fastcgi_check.t
^
|
@@ -1,370 +0,0 @@
-#!/usr/bin/perl
-
-use warnings;
-use strict;
-
-use Test::More;
-
-BEGIN { use FindBin; chdir($FindBin::Bin); }
-
-use lib 'lib';
-use Test::Nginx;
-
-###############################################################################
-
-select STDERR; $| = 1;
-select STDOUT; $| = 1;
-
-eval { require FCGI; };
-plan(skip_all => 'FCGI not installed') if $@;
-plan(skip_all => 'win32') if $^O eq 'MSWin32';
-
-my $t = Test::Nginx->new()->has(qw/http fastcgi/)->plan(30)
- ->write_file_expand('nginx.conf', <<'EOF');
-
-%%TEST_GLOBALS%%
-
-daemon off;
-
-events {
-}
-
-http {
- %%TEST_GLOBALS_HTTP%%
-
- server {
- listen 127.0.0.1:8080;
- server_name localhost;
-
- location / {
- fastcgi_pass 127.0.0.1:8081;
- fastcgi_param REQUEST_URI $request_uri;
- }
- }
-}
-
-EOF
-
-$t->run_daemon(\&fastcgi_daemon);
-$t->run();
-
-###############################################################################
-
-like(http_get('/'), qr/SEE-THIS/, 'fastcgi request');
-like(http_get('/redir'), qr/302/, 'fastcgi redirect');
-like(http_get('/'), qr/^3$/m, 'fastcgi third request');
-
-unlike(http_head('/'), qr/SEE-THIS/, 'no data in HEAD');
-
-like(http_get('/stderr'), qr/SEE-THIS/, 'large stderr handled');
-
-$t->stop();
-$t->stop_daemons();
-
-###############################################################################
-
-$t->write_file_expand('nginx.conf', <<'EOF');
-
-%%TEST_GLOBALS%%
-
-daemon off;
-
-worker_processes auto;
-
-events {
- accept_mutex off;
-}
-
-http {
- %%TEST_GLOBALS_HTTP%%
-
- upstream fastcgi {
- server 127.0.0.1:8081;
- check interval=3000 rise=2 fall=3 timeout=1000 type=fastcgi default_down=false;
- check_fastcgi_param "REQUEST_METHOD" "GET";
- check_fastcgi_param "REQUEST_URI" "/redir";
- check_http_expect_alive http_3xx;
- }
-
- server {
- listen 127.0.0.1:8080;
- server_name localhost;
-
- location / {
- fastcgi_pass fastcgi;
- fastcgi_param REQUEST_URI $request_uri;
- }
- }
-}
-
-EOF
-
-$t->run();
-$t->run_daemon(\&fastcgi_daemon);
-
-###############################################################################
-
-like(http_get('/'), qr/SEE-THIS/, 'fastcgi request default_down=false');
-like(http_get('/redir'), qr/302/, 'fastcgi redirect default_down=false');
-like(http_get('/'), qr/^3$/m, 'fastcgi third request default_down=false');
-
-unlike(http_head('/'), qr/SEE-THIS/, 'no data in HEAD default_down=false');
-
-like(http_get('/stderr'), qr/SEE-THIS/, 'large stderr handled default_down=false');
-
-$t->stop();
-$t->stop_daemons();
-
-###############################################################################
-
-$t->write_file_expand('nginx.conf', <<'EOF');
-
-%%TEST_GLOBALS%%
-
-daemon off;
-
-worker_processes auto;
-
-events {
- accept_mutex off;
-}
-
-http {
- %%TEST_GLOBALS_HTTP%%
-
- upstream fastcgi {
- server 127.0.0.1:8081;
- check interval=3000 rise=2 fall=3 timeout=1000 type=fastcgi;
- check_fastcgi_param "REQUEST_METHOD" "GET";
- check_fastcgi_param "REQUEST_URI" "/redir";
- check_http_expect_alive http_3xx;
- }
-
- server {
- listen 127.0.0.1:8080;
- server_name localhost;
-
- location / {
- fastcgi_pass fastcgi;
- fastcgi_param REQUEST_URI $request_uri;
- }
- }
-}
-
-EOF
-
-$t->run();
-$t->run_daemon(\&fastcgi_daemon);
-
-###############################################################################
-
-like(http_get('/'), qr/502/m, 'fastcgi request default_down=true');
-like(http_get('/redir'), qr/502/m, 'fastcgi redirect default_down=true');
-like(http_get('/'), qr/502/m, 'fastcgi third request default_down=true');
-like(http_head('/'), qr/502/m, 'no data in HEAD default_down=true');
-like(http_get('/stderr'), qr/502/m, 'large stderr handled default_down=true');
-
-$t->stop();
-$t->stop_daemons();
-
-###############################################################################
-
-$t->write_file_expand('nginx.conf', <<'EOF');
-
-%%TEST_GLOBALS%%
-
-daemon off;
-
-worker_processes auto;
-
-events {
- accept_mutex off;
-}
-
-http {
- %%TEST_GLOBALS_HTTP%%
-
- upstream fastcgi {
- server 127.0.0.1:8081;
- check interval=3000 rise=2 fall=3 timeout=1000 type=fastcgi;
- check_fastcgi_param "REQUEST_METHOD" "GET";
- check_fastcgi_param "REQUEST_URI" "/redir";
- check_http_expect_alive http_3xx;
- }
-
- server {
- listen 127.0.0.1:8080;
- server_name localhost;
-
- location / {
- fastcgi_pass fastcgi;
- fastcgi_param REQUEST_URI $request_uri;
- }
- }
-}
-
-EOF
-
-$t->run();
-$t->run_daemon(\&fastcgi_daemon);
-
-###############################################################################
-
-sleep(5);
-
-like(http_get('/'), qr/SEE-THIS/, 'fastcgi request default_down=false check 302');
-like(http_get('/redir'), qr/302/, 'fastcgi redirect default_down=false check 302');
-like(http_get('/'), qr/^\d$/m, 'fastcgi third request default_down=false check 302');
-
-unlike(http_head('/'), qr/SEE-THIS/, 'no data in HEAD default_down=false check 302');
-
-like(http_get('/stderr'), qr/SEE-THIS/, 'large stderr handled default_down=false check 302');
-
-$t->stop();
-$t->stop_daemons();
-
-
-###############################################################################
-
-$t->write_file_expand('nginx.conf', <<'EOF');
-
-%%TEST_GLOBALS%%
-
-daemon off;
-
-worker_processes auto;
-
-events {
- accept_mutex off;
-}
-
-http {
- %%TEST_GLOBALS_HTTP%%
-
- upstream fastcgi {
- server 127.0.0.1:8081;
- check interval=1000 rise=1 fall=1 timeout=1000 type=fastcgi;
- check_fastcgi_param "REQUEST_METHOD" "GET";
- check_fastcgi_param "REQUEST_URI" "/404";
- check_http_expect_alive http_2xx;
- }
-
- server {
- listen 127.0.0.1:8080;
- server_name localhost;
-
- location / {
- fastcgi_pass fastcgi;
- fastcgi_param REQUEST_URI $request_uri;
- }
- }
-}
-
-EOF
-
-$t->run();
-$t->run_daemon(\&fastcgi_daemon);
-
-###############################################################################
-
-sleep(5);
-
-like(http_get('/'), qr/502/m, 'fastcgi request default_down=true check status heaer');
-like(http_get('/redir'), qr/502/m, 'fastcgi redirect default_down=true check status heaer');
-like(http_get('/'), qr/502/m, 'fastcgi third request default_down=true check status heaer');
-like(http_head('/'), qr/502/m, 'no data in HEAD default_down=true check status heaer');
-like(http_get('/stderr'), qr/502/m, 'large stderr handled default_down=true check status heaer');
-
-$t->stop();
-$t->stop_daemons();
-
-
-###############################################################################
-
-$t->write_file_expand('nginx.conf', <<'EOF');
-
-%%TEST_GLOBALS%%
-
-daemon off;
-
-worker_processes auto;
-
-events {
- accept_mutex off;
-}
-
-http {
- %%TEST_GLOBALS_HTTP%%
-
- upstream fastcgi {
- server 127.0.0.1:8081;
- check interval=1000 rise=1 fall=1 timeout=1000 type=fastcgi;
- check_fastcgi_param "REQUEST_METHOD" "GET";
- check_fastcgi_param "REQUEST_URI" "/";
- check_http_expect_alive http_4xx;
- }
-
- server {
- listen 127.0.0.1:8080;
- server_name localhost;
-
- location / {
- fastcgi_pass fastcgi;
- fastcgi_param REQUEST_URI $request_uri;
- }
- }
-}
-
-EOF
-
-$t->run();
-$t->run_daemon(\&fastcgi_daemon);
-
-###############################################################################
-
-sleep(5);
-
-like(http_get('/'), qr/SEE-THIS/, 'fastcgi request default_down=false without status header');
-like(http_get('/redir'), qr/302/, 'fastcgi redirect default_down=false without status header');
-like(http_get('/'), qr/^\d$/m, 'fastcgi third request default_down=false without status header');
-
-unlike(http_head('/'), qr/SEE-THIS/, 'no data in HEAD default_down=false without status header');
-
-like(http_get('/stderr'), qr/SEE-THIS/, 'large stderr handled default_down=false without status header');
-
-$t->stop();
-$t->stop_daemons();
-
-
-###############################################################################
-
-sub fastcgi_daemon {
- my $socket = FCGI::OpenSocket('127.0.0.1:8081', 5);
- my $request = FCGI::Request(\*STDIN, \*STDOUT, \*STDERR, \%ENV,
- $socket);
-
- my $count;
- while ( $request->Accept() >= 0 ) {
- $count++;
-
- if ($ENV{REQUEST_URI} eq '/stderr') {
- warn "sample stderr text" x 512;
- }
-
- if ($ENV{REQUEST_URI} eq '/404') {
- print <<EOF;
-Status: 404
-EOF
- }
-
- print <<EOF;
-Location: http://127.0.0.1:8080/redirect
-Content-Type: text/html
-
-SEE-THIS
-$count
-EOF
- }
-
- FCGI::CloseSocket($socket);
-}
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/ngx_http_upstream_check_handler.c
^
|
@@ -55,9 +55,9 @@
static ngx_shm_zone_t * ngx_shared_memory_find(ngx_cycle_t *cycle,
ngx_str_t *name, void *tag);
static ngx_http_check_peer_shm_t * ngx_http_check_find_shm_peer(
- ngx_http_check_peers_shm_t *peers_shm, ngx_addr_t *addr,ngx_str_t *upstream_name);
+ ngx_http_check_peers_shm_t *peers_shm, ngx_addr_t *addr);
static void ngx_http_check_set_shm_peer(ngx_http_check_peer_shm_t *peer_shm,
- ngx_http_check_peer_shm_t *opeer_shm, ngx_uint_t init_down,ngx_str_t *upstream_name);
+ ngx_http_check_peer_shm_t *opeer_shm, ngx_uint_t init_down);
static ngx_int_t ngx_http_upstream_check_init_shm_zone(
ngx_shm_zone_t *shm_zone, void *data);
@@ -1443,19 +1443,21 @@
if (opeers_shm) {
- opeer_shm = ngx_http_check_find_shm_peer(opeers_shm,peer[i].peer_addr,peer[i].upstream_name);
+ opeer_shm = ngx_http_check_find_shm_peer(opeers_shm,
+ peer[i].peer_addr);
if (opeer_shm) {
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, shm_zone->shm.log, 0,
"http upstream check: inherit opeer:%V",
&peer[i].peer_addr->name);
- ngx_http_check_set_shm_peer(peer_shm, opeer_shm, 0,peer[i].upstream_name);
+ ngx_http_check_set_shm_peer(peer_shm, opeer_shm, 0);
+
continue;
}
}
ucscf = peer[i].conf;
- ngx_http_check_set_shm_peer(peer_shm, NULL, ucscf->default_down,peer[i].upstream_name);
+ ngx_http_check_set_shm_peer(peer_shm, NULL, ucscf->default_down);
}
peers->peers_shm = peers_shm;
@@ -1473,8 +1475,7 @@
static ngx_http_check_peer_shm_t *
ngx_http_check_find_shm_peer(ngx_http_check_peers_shm_t *peers_shm,
- ngx_addr_t *addr,
- ngx_str_t *upstream_name)
+ ngx_addr_t *addr)
{
ngx_uint_t i;
ngx_http_check_peer_shm_t *peer_shm;
@@ -1483,12 +1484,11 @@
peer_shm = &peers_shm->peers[i];
- if (addr->socklen != peer_shm->socklen) {
- continue;
+ if (addr->socklen != peer_shm->socklen) {
+ continue;
}
-
- if (ngx_memcmp(addr->sockaddr, peer_shm->sockaddr, addr->socklen) == 0 && ngx_strcmp(upstream_name->data,peer_shm->upstream_name->data) == 0)
- {
+
+ if (ngx_memcmp(addr->sockaddr, peer_shm->sockaddr, addr->socklen) == 0) {
return peer_shm;
}
}
@@ -1500,8 +1500,7 @@
static void
ngx_http_check_set_shm_peer(ngx_http_check_peer_shm_t *peer_shm,
ngx_http_check_peer_shm_t *opeer_shm,
- ngx_uint_t init_down,
- ngx_str_t *upstream_name)
+ ngx_uint_t init_down)
{
if (opeer_shm) {
@@ -1524,7 +1523,6 @@
peer_shm->down = init_down;
}
- peer_shm->upstream_name = upstream_name;
}
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/ngx_http_upstream_check_handler.h
^
|
@@ -72,7 +72,6 @@
ngx_atomic_t down;
ngx_uint_t access_count;
- ngx_str_t *upstream_name;
struct sockaddr *sockaddr;
socklen_t socklen;
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/ngx_http_upstream_check_module.c
^
|
@@ -1,3067 +1,175 @@
-/*
- * Copyright (C) 2010-2014 Weibin Yao (yaoweibin@gmail.com)
- * Copyright (C) 2010-2014 Alibaba Group Holding Limited
- */
+#include <ngx_core.h>
+#include <ngx_http.h>
+#include <ngx_config.h>
+#include <ngx_murmurhash.h>
+#include <ngx_http_upstream.h>
+#include "ngx_http_upstream_check_module.h"
+#include "ngx_http_upstream_check_handler.h"
-#include <nginx.h>
-#include "ngx_http_upstream_check_module.h"
-
-
-typedef struct ngx_http_upstream_check_peer_s ngx_http_upstream_check_peer_t;
-typedef struct ngx_http_upstream_check_srv_conf_s
- ngx_http_upstream_check_srv_conf_t;
-
-
-#pragma pack(push, 1)
-
-typedef struct {
- u_char major;
- u_char minor;
-} ngx_ssl_protocol_version_t;
-
-
-typedef struct {
- u_char msg_type;
- ngx_ssl_protocol_version_t version;
- uint16_t length;
-
- u_char handshake_type;
- u_char handshake_length[3];
- ngx_ssl_protocol_version_t hello_version;
-
- time_t time;
- u_char random[28];
-
- u_char others[0];
-} ngx_ssl_server_hello_t;
-
-
-typedef struct {
- u_char packet_length[3];
- u_char packet_number;
-
- u_char protocol_version;
- u_char others[0];
-} ngx_mysql_handshake_init_t;
-
-
-typedef struct {
- uint16_t preamble;
- uint16_t length;
- u_char type;
-} ngx_ajp_raw_packet_t;
-
-#pragma pack()
-
-
-typedef struct {
- ngx_buf_t send;
- ngx_buf_t recv;
-
- ngx_uint_t state;
- ngx_http_status_t status;
-
- size_t padding;
- size_t length;
-} ngx_http_upstream_check_ctx_t;
-
-
-typedef struct {
- ngx_shmtx_t mutex;
-#if (nginx_version >= 1002000)
- ngx_shmtx_sh_t lock;
-#else
- ngx_atomic_t lock;
-#endif
-
- ngx_pid_t owner;
-
- ngx_msec_t access_time;
-
- ngx_uint_t fall_count;
- ngx_uint_t rise_count;
-
- ngx_uint_t busyness;
- ngx_uint_t access_count;
- ngx_str_t *upstream_name;
-
- struct sockaddr *sockaddr;
- socklen_t socklen;
-
- ngx_atomic_t down;
-
- u_char padding[64];
-} ngx_http_upstream_check_peer_shm_t;
-
-
-typedef struct {
- ngx_uint_t generation;
- ngx_uint_t checksum;
- ngx_uint_t number;
-
- /* ngx_http_upstream_check_status_peer_t */
- ngx_http_upstream_check_peer_shm_t peers[1];
-} ngx_http_upstream_check_peers_shm_t;
-
-
-#define NGX_HTTP_CHECK_CONNECT_DONE 0x0001
-#define NGX_HTTP_CHECK_SEND_DONE 0x0002
-#define NGX_HTTP_CHECK_RECV_DONE 0x0004
-#define NGX_HTTP_CHECK_ALL_DONE 0x0008
-
-
-typedef ngx_int_t (*ngx_http_upstream_check_packet_init_pt)
- (ngx_http_upstream_check_peer_t *peer);
-typedef ngx_int_t (*ngx_http_upstream_check_packet_parse_pt)
- (ngx_http_upstream_check_peer_t *peer);
-typedef void (*ngx_http_upstream_check_packet_clean_pt)
- (ngx_http_upstream_check_peer_t *peer);
-
-struct ngx_http_upstream_check_peer_s {
- ngx_flag_t state;
- ngx_pool_t *pool;
- ngx_uint_t index;
- ngx_uint_t max_busy;
- ngx_str_t *upstream_name;
- ngx_addr_t *check_peer_addr;
- ngx_addr_t *peer_addr;
- ngx_event_t check_ev;
- ngx_event_t check_timeout_ev;
- ngx_peer_connection_t pc;
-
- void *check_data;
- ngx_event_handler_pt send_handler;
- ngx_event_handler_pt recv_handler;
-
- ngx_http_upstream_check_packet_init_pt init;
- ngx_http_upstream_check_packet_parse_pt parse;
- ngx_http_upstream_check_packet_clean_pt reinit;
-
- ngx_http_upstream_check_peer_shm_t *shm;
- ngx_http_upstream_check_srv_conf_t *conf;
-};
-
-
-typedef struct {
- ngx_str_t check_shm_name;
- ngx_uint_t checksum;
- ngx_array_t peers;
-
- ngx_http_upstream_check_peers_shm_t *peers_shm;
-} ngx_http_upstream_check_peers_t;
-
-
-#define NGX_HTTP_CHECK_TCP 0x0001
-#define NGX_HTTP_CHECK_HTTP 0x0002
-#define NGX_HTTP_CHECK_SSL_HELLO 0x0004
-#define NGX_HTTP_CHECK_MYSQL 0x0008
-#define NGX_HTTP_CHECK_AJP 0x0010
-
-#define NGX_CHECK_HTTP_2XX 0x0002
-#define NGX_CHECK_HTTP_3XX 0x0004
-#define NGX_CHECK_HTTP_4XX 0x0008
-#define NGX_CHECK_HTTP_5XX 0x0010
-#define NGX_CHECK_HTTP_ERR 0x8000
-
-typedef struct {
- ngx_uint_t type;
-
- ngx_str_t name;
-
- ngx_str_t default_send;
-
- /* HTTP */
- ngx_uint_t default_status_alive;
-
- ngx_event_handler_pt send_handler;
- ngx_event_handler_pt recv_handler;
-
- ngx_http_upstream_check_packet_init_pt init;
- ngx_http_upstream_check_packet_parse_pt parse;
- ngx_http_upstream_check_packet_clean_pt reinit;
-
- unsigned need_pool;
- unsigned need_keepalive;
-} ngx_check_conf_t;
-
-
-typedef void (*ngx_http_upstream_check_status_format_pt) (ngx_buf_t *b,
- ngx_http_upstream_check_peers_t *peers, ngx_uint_t flag);
-
-typedef struct {
- ngx_str_t format;
- ngx_str_t content_type;
-
- ngx_http_upstream_check_status_format_pt output;
-} ngx_check_status_conf_t;
-
-
-#define NGX_CHECK_STATUS_DOWN 0x0001
-#define NGX_CHECK_STATUS_UP 0x0002
-
-typedef struct {
- ngx_check_status_conf_t *format;
- ngx_flag_t flag;
-} ngx_http_upstream_check_status_ctx_t;
-
-
-typedef ngx_int_t (*ngx_http_upstream_check_status_command_pt)
- (ngx_http_upstream_check_status_ctx_t *ctx, ngx_str_t *value);
-
-typedef struct {
- ngx_str_t name;
- ngx_http_upstream_check_status_command_pt handler;
-} ngx_check_status_command_t;
-
-
-typedef struct {
- ngx_uint_t check_shm_size;
- ngx_http_upstream_check_peers_t *peers;
-} ngx_http_upstream_check_main_conf_t;
-
-
-struct ngx_http_upstream_check_srv_conf_s {
- ngx_uint_t port;
- ngx_uint_t fall_count;
- ngx_uint_t rise_count;
- ngx_msec_t check_interval;
- ngx_msec_t check_timeout;
- ngx_uint_t check_keepalive_requests;
-
- ngx_check_conf_t *check_type_conf;
- ngx_str_t send;
-
- union {
- ngx_uint_t return_code;
- ngx_uint_t status_alive;
- } code;
-
- ngx_array_t *fastcgi_params;
-
- ngx_uint_t default_down;
-};
-
-
-typedef struct {
- ngx_check_status_conf_t *format;
-} ngx_http_upstream_check_loc_conf_t;
-
-
-typedef struct {
- u_char version;
- u_char type;
- u_char request_id_hi;
- u_char request_id_lo;
- u_char content_length_hi;
- u_char content_length_lo;
- u_char padding_length;
- u_char reserved;
-} ngx_http_fastcgi_header_t;
-
-
-typedef struct {
- u_char role_hi;
- u_char role_lo;
- u_char flags;
- u_char reserved[5];
-} ngx_http_fastcgi_begin_request_t;
-
-
-typedef struct {
- u_char version;
- u_char type;
- u_char request_id_hi;
- u_char request_id_lo;
-} ngx_http_fastcgi_header_small_t;
-
-
-typedef struct {
- ngx_http_fastcgi_header_t h0;
- ngx_http_fastcgi_begin_request_t br;
- ngx_http_fastcgi_header_small_t h1;
-} ngx_http_fastcgi_request_start_t;
-
-
-#define NGX_HTTP_FASTCGI_RESPONDER 1
-
-#define NGX_HTTP_FASTCGI_KEEP_CONN 1
-
-#define NGX_HTTP_FASTCGI_BEGIN_REQUEST 1
-#define NGX_HTTP_FASTCGI_ABORT_REQUEST 2
-#define NGX_HTTP_FASTCGI_END_REQUEST 3
-#define NGX_HTTP_FASTCGI_PARAMS 4
-#define NGX_HTTP_FASTCGI_STDIN 5
-#define NGX_HTTP_FASTCGI_STDOUT 6
-#define NGX_HTTP_FASTCGI_STDERR 7
-#define NGX_HTTP_FASTCGI_DATA 8
-
-
-typedef enum {
- ngx_http_fastcgi_st_version = 0,
- ngx_http_fastcgi_st_type,
- ngx_http_fastcgi_st_request_id_hi,
- ngx_http_fastcgi_st_request_id_lo,
- ngx_http_fastcgi_st_content_length_hi,
- ngx_http_fastcgi_st_content_length_lo,
- ngx_http_fastcgi_st_padding_length,
- ngx_http_fastcgi_st_reserved,
- ngx_http_fastcgi_st_data,
- ngx_http_fastcgi_st_padding
-} ngx_http_fastcgi_state_e;
-
-
-static ngx_http_fastcgi_request_start_t ngx_http_fastcgi_request_start = {
- { 1, /* version */
- NGX_HTTP_FASTCGI_BEGIN_REQUEST, /* type */
- 0, /* request_id_hi */
- 1, /* request_id_lo */
- 0, /* content_length_hi */
- sizeof(ngx_http_fastcgi_begin_request_t), /* content_length_lo */
- 0, /* padding_length */
- 0 }, /* reserved */
-
- { 0, /* role_hi */
- NGX_HTTP_FASTCGI_RESPONDER, /* role_lo */
- 0, /* NGX_HTTP_FASTCGI_KEEP_CONN */ /* flags */
- { 0, 0, 0, 0, 0 } }, /* reserved[5] */
-
- { 1, /* version */
- NGX_HTTP_FASTCGI_PARAMS, /* type */
- 0, /* request_id_hi */
- 1 }, /* request_id_lo */
-
-};
-
-
-static ngx_int_t ngx_http_upstream_check_add_timers(ngx_cycle_t *cycle);
-
-static ngx_int_t ngx_http_upstream_check_peek_one_byte(ngx_connection_t *c);
-
-static void ngx_http_upstream_check_begin_handler(ngx_event_t *event);
-static void ngx_http_upstream_check_connect_handler(ngx_event_t *event);
-
-static void ngx_http_upstream_check_peek_handler(ngx_event_t *event);
-
-static void ngx_http_upstream_check_send_handler(ngx_event_t *event);
-static void ngx_http_upstream_check_recv_handler(ngx_event_t *event);
-
-static void ngx_http_upstream_check_discard_handler(ngx_event_t *event);
-static void ngx_http_upstream_check_dummy_handler(ngx_event_t *event);
-
-static ngx_int_t ngx_http_upstream_check_http_init(
- ngx_http_upstream_check_peer_t *peer);
-static ngx_int_t ngx_http_upstream_check_http_parse(
- ngx_http_upstream_check_peer_t *peer);
-static ngx_int_t ngx_http_upstream_check_parse_status_line(
- ngx_http_upstream_check_ctx_t *ctx, ngx_buf_t *b,
- ngx_http_status_t *status);
-static void ngx_http_upstream_check_http_reinit(
- ngx_http_upstream_check_peer_t *peer);
-
-static ngx_buf_t *ngx_http_upstream_check_create_fastcgi_request(
- ngx_pool_t *pool, ngx_str_t *params, ngx_uint_t num);
-
-static ngx_int_t ngx_http_upstream_check_fastcgi_parse(
- ngx_http_upstream_check_peer_t *peer);
-static ngx_int_t ngx_http_upstream_check_fastcgi_process_record(
- ngx_http_upstream_check_ctx_t *ctx, ngx_buf_t *b,
- ngx_http_status_t *status);
-static ngx_int_t ngx_http_upstream_check_parse_fastcgi_status(
- ngx_http_upstream_check_ctx_t *ctx, ngx_buf_t *b,
- ngx_http_status_t *status);
-
-static ngx_int_t ngx_http_upstream_check_ssl_hello_init(
- ngx_http_upstream_check_peer_t *peer);
-static ngx_int_t ngx_http_upstream_check_ssl_hello_parse(
- ngx_http_upstream_check_peer_t *peer);
-static void ngx_http_upstream_check_ssl_hello_reinit(
- ngx_http_upstream_check_peer_t *peer);
-
-static ngx_int_t ngx_http_upstream_check_mysql_init(
- ngx_http_upstream_check_peer_t *peer);
-static ngx_int_t ngx_http_upstream_check_mysql_parse(
- ngx_http_upstream_check_peer_t *peer);
-static void ngx_http_upstream_check_mysql_reinit(
- ngx_http_upstream_check_peer_t *peer);
-
-static ngx_int_t ngx_http_upstream_check_ajp_init(
- ngx_http_upstream_check_peer_t *peer);
-static ngx_int_t ngx_http_upstream_check_ajp_parse(
- ngx_http_upstream_check_peer_t *peer);
-static void ngx_http_upstream_check_ajp_reinit(
- ngx_http_upstream_check_peer_t *peer);
-
-static void ngx_http_upstream_check_status_update(
- ngx_http_upstream_check_peer_t *peer,
- ngx_int_t result);
-
-static void ngx_http_upstream_check_clean_event(
- ngx_http_upstream_check_peer_t *peer);
-
-static void ngx_http_upstream_check_timeout_handler(ngx_event_t *event);
-static void ngx_http_upstream_check_finish_handler(ngx_event_t *event);
-
-static ngx_int_t ngx_http_upstream_check_need_exit();
-static void ngx_http_upstream_check_clear_all_events();
-
-static ngx_int_t ngx_http_upstream_check_status_handler(
- ngx_http_request_t *r);
-
-static void ngx_http_upstream_check_status_parse_args(ngx_http_request_t *r,
- ngx_http_upstream_check_status_ctx_t *ctx);
-
-static ngx_int_t ngx_http_upstream_check_status_command_format(
- ngx_http_upstream_check_status_ctx_t *ctx, ngx_str_t *value);
-static ngx_int_t ngx_http_upstream_check_status_command_status(
- ngx_http_upstream_check_status_ctx_t *ctx, ngx_str_t *value);
-
-static void ngx_http_upstream_check_status_html_format(ngx_buf_t *b,
- ngx_http_upstream_check_peers_t *peers, ngx_uint_t flag);
-static void ngx_http_upstream_check_status_csv_format(ngx_buf_t *b,
- ngx_http_upstream_check_peers_t *peers, ngx_uint_t flag);
-static void ngx_http_upstream_check_status_json_format(ngx_buf_t *b,
- ngx_http_upstream_check_peers_t *peers, ngx_uint_t flag);
-
-static ngx_int_t ngx_http_upstream_check_addr_change_port(ngx_pool_t *pool,
- ngx_addr_t *dst, ngx_addr_t *src, ngx_uint_t port);
-
-static ngx_check_conf_t *ngx_http_get_check_type_conf(ngx_str_t *str);
-
-static char *ngx_http_upstream_check(ngx_conf_t *cf,
- ngx_command_t *cmd, void *conf);
-static char *ngx_http_upstream_check_keepalive_requests(ngx_conf_t *cf,
- ngx_command_t *cmd, void *conf);
-static char *ngx_http_upstream_check_http_send(ngx_conf_t *cf,
- ngx_command_t *cmd, void *conf);
-static char *ngx_http_upstream_check_http_expect_alive(ngx_conf_t *cf,
- ngx_command_t *cmd, void *conf);
-
-static char *ngx_http_upstream_check_fastcgi_params(ngx_conf_t *cf,
- ngx_command_t *cmd, void *conf);
-
-static char *ngx_http_upstream_check_shm_size(ngx_conf_t *cf,
- ngx_command_t *cmd, void *conf);
-
-static ngx_check_status_conf_t *ngx_http_get_check_status_format_conf(
- ngx_str_t *str);
-static char *ngx_http_upstream_check_status(ngx_conf_t *cf,
- ngx_command_t *cmd, void *conf);
-
-static void *ngx_http_upstream_check_create_main_conf(ngx_conf_t *cf);
-static char *ngx_http_upstream_check_init_main_conf(ngx_conf_t *cf,
- void *conf);
-
-static void *ngx_http_upstream_check_create_srv_conf(ngx_conf_t *cf);
-static char *ngx_http_upstream_check_init_srv_conf(ngx_conf_t *cf, void *conf);
-
-static void *ngx_http_upstream_check_create_loc_conf(ngx_conf_t *cf);
-static char * ngx_http_upstream_check_merge_loc_conf(ngx_conf_t *cf,
- void *parent, void *child);
-
-#define SHM_NAME_LEN 256
-
-static char *ngx_http_upstream_check_init_shm(ngx_conf_t *cf, void *conf);
-
-static ngx_int_t ngx_http_upstream_check_get_shm_name(ngx_str_t *shm_name,
- ngx_pool_t *pool, ngx_uint_t generation);
-static ngx_shm_zone_t *ngx_shared_memory_find(ngx_cycle_t *cycle,
- ngx_str_t *name, void *tag);
-static ngx_http_upstream_check_peer_shm_t *
-ngx_http_upstream_check_find_shm_peer(ngx_http_upstream_check_peers_shm_t *peers_shm,
- ngx_addr_t *addr, ngx_str_t *upstream_name);
-
-static ngx_int_t ngx_http_upstream_check_init_shm_peer(
- ngx_http_upstream_check_peer_shm_t *peer_shm,
- ngx_http_upstream_check_peer_shm_t *opeer_shm,
- ngx_uint_t init_down, ngx_pool_t *pool, ngx_str_t *peer_name,
- ngx_str_t *upstream_name);
-
-static ngx_int_t ngx_http_upstream_check_init_shm_zone(
- ngx_shm_zone_t *shm_zone, void *data);
-
-
-static ngx_int_t ngx_http_upstream_check_init_process(ngx_cycle_t *cycle);
-
-
-static ngx_conf_bitmask_t ngx_check_http_expect_alive_masks[] = {
- { ngx_string("http_2xx"), NGX_CHECK_HTTP_2XX },
- { ngx_string("http_3xx"), NGX_CHECK_HTTP_3XX },
- { ngx_string("http_4xx"), NGX_CHECK_HTTP_4XX },
- { ngx_string("http_5xx"), NGX_CHECK_HTTP_5XX },
- { ngx_null_string, 0 }
-};
-
-
-static ngx_command_t ngx_http_upstream_check_commands[] = {
-
- { ngx_string("check"),
- NGX_HTTP_UPS_CONF|NGX_CONF_1MORE,
- ngx_http_upstream_check,
- 0,
- 0,
- NULL },
-
- { ngx_string("check_keepalive_requests"),
- NGX_HTTP_UPS_CONF|NGX_CONF_TAKE1,
- ngx_http_upstream_check_keepalive_requests,
- 0,
- 0,
- NULL },
-
- { ngx_string("check_http_send"),
- NGX_HTTP_UPS_CONF|NGX_CONF_TAKE1,
- ngx_http_upstream_check_http_send,
- 0,
- 0,
- NULL },
-
- { ngx_string("check_http_expect_alive"),
- NGX_HTTP_UPS_CONF|NGX_CONF_1MORE,
- ngx_http_upstream_check_http_expect_alive,
- 0,
- 0,
- NULL },
-
- { ngx_string("check_fastcgi_param"),
- NGX_HTTP_UPS_CONF|NGX_CONF_TAKE2,
- ngx_http_upstream_check_fastcgi_params,
- 0,
- 0,
- NULL },
-
- { ngx_string("check_shm_size"),
- NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE1,
- ngx_http_upstream_check_shm_size,
- 0,
- 0,
- NULL },
-
- { ngx_string("check_status"),
- NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1|NGX_CONF_NOARGS,
- ngx_http_upstream_check_status,
- 0,
- 0,
- NULL },
-
- ngx_null_command
-};
-
-
-static ngx_http_module_t ngx_http_upstream_check_module_ctx = {
- NULL, /* preconfiguration */
- NULL, /* postconfiguration */
-
- ngx_http_upstream_check_create_main_conf,/* create main configuration */
- ngx_http_upstream_check_init_main_conf, /* init main configuration */
-
- ngx_http_upstream_check_create_srv_conf, /* create server configuration */
- NULL, /* merge server configuration */
-
- ngx_http_upstream_check_create_loc_conf, /* create location configuration */
- ngx_http_upstream_check_merge_loc_conf /* merge location configuration */
-};
-
-
-ngx_module_t ngx_http_upstream_check_module = {
- NGX_MODULE_V1,
- &ngx_http_upstream_check_module_ctx, /* module context */
- ngx_http_upstream_check_commands, /* module directives */
- NGX_HTTP_MODULE, /* module type */
- NULL, /* init master */
- NULL, /* init module */
- ngx_http_upstream_check_init_process, /* init process */
- NULL, /* init thread */
- NULL, /* exit thread */
- NULL, /* exit process */
- NULL, /* exit master */
- NGX_MODULE_V1_PADDING
-};
-
-
-static ngx_str_t fastcgi_default_request;
-static ngx_str_t fastcgi_default_params[] = {
- ngx_string("REQUEST_METHOD"), ngx_string("GET"),
- ngx_string("REQUEST_URI"), ngx_string("/"),
- ngx_string("SCRIPT_FILENAME"), ngx_string("index.php"),
-};
-
-
-#define NGX_SSL_RANDOM "NGX_HTTP_CHECK_SSL_HELLO\n\n\n\n"
-
-/*
- * This is the SSLv3 CLIENT HELLO packet used in conjunction with the
- * check type of ssl_hello to ensure that the remote server speaks SSL.
- *
- * Check RFC 2246 (TLSv1.0) sections A.3 and A.4 for details.
- */
-static char sslv3_client_hello_pkt[] = {
- "\x16" /* ContentType : 0x16 = Hanshake */
- "\x03\x00" /* ProtocolVersion : 0x0300 = SSLv3 */
- "\x00\x79" /* ContentLength : 0x79 bytes after this one */
- "\x01" /* HanshakeType : 0x01 = CLIENT HELLO */
- "\x00\x00\x75" /* HandshakeLength : 0x75 bytes after this one */
- "\x03\x00" /* Hello Version : 0x0300 = v3 */
- "\x00\x00\x00\x00" /* Unix GMT Time (s) : filled with <now> (@0x0B) */
- NGX_SSL_RANDOM /* Random : must be exactly 28 bytes */
- "\x00" /* Session ID length : empty (no session ID) */
- "\x00\x4E" /* Cipher Suite Length : 78 bytes after this one */
- "\x00\x01" "\x00\x02" "\x00\x03" "\x00\x04" /* 39 most common ciphers : */
- "\x00\x05" "\x00\x06" "\x00\x07" "\x00\x08" /* 0x01...0x1B, 0x2F...0x3A */
- "\x00\x09" "\x00\x0A" "\x00\x0B" "\x00\x0C" /* This covers RSA/DH, */
- "\x00\x0D" "\x00\x0E" "\x00\x0F" "\x00\x10" /* various bit lengths, */
- "\x00\x11" "\x00\x12" "\x00\x13" "\x00\x14" /* SHA1/MD5, DES/3DES/AES... */
- "\x00\x15" "\x00\x16" "\x00\x17" "\x00\x18"
- "\x00\x19" "\x00\x1A" "\x00\x1B" "\x00\x2F"
- "\x00\x30" "\x00\x31" "\x00\x32" "\x00\x33"
- "\x00\x34" "\x00\x35" "\x00\x36" "\x00\x37"
- "\x00\x38" "\x00\x39" "\x00\x3A"
- "\x01" /* Compression Length : 0x01 = 1 byte for types */
- "\x00" /* Compression Type : 0x00 = NULL compression */
-};
-
-
-#define NGX_SSL_HANDSHAKE 0x16
-#define NGX_SSL_SERVER_HELLO 0x02
-
-
-#define NGX_AJP_CPING 0x0a
-#define NGX_AJP_CPONG 0x09
-
-
-static char ngx_ajp_cping_packet[] = {
- 0x12, 0x34, 0x00, 0x01, NGX_AJP_CPING, 0x00
-};
-
-static char ngx_ajp_cpong_packet[] = {
- 0x41, 0x42, 0x00, 0x01, NGX_AJP_CPONG
-};
-
-
-static ngx_check_conf_t ngx_check_types[] = {
-
- { NGX_HTTP_CHECK_TCP,
- ngx_string("tcp"),
- ngx_null_string,
- 0,
- ngx_http_upstream_check_peek_handler,
- ngx_http_upstream_check_peek_handler,
- NULL,
- NULL,
- NULL,
- 0,
- 1 },
-
- { NGX_HTTP_CHECK_HTTP,
- ngx_string("http"),
- ngx_string("GET / HTTP/1.0\r\n\r\n"),
- NGX_CONF_BITMASK_SET | NGX_CHECK_HTTP_2XX | NGX_CHECK_HTTP_3XX,
- ngx_http_upstream_check_send_handler,
- ngx_http_upstream_check_recv_handler,
- ngx_http_upstream_check_http_init,
- ngx_http_upstream_check_http_parse,
- ngx_http_upstream_check_http_reinit,
- 1,
- 1 },
-
- { NGX_HTTP_CHECK_HTTP,
- ngx_string("fastcgi"),
- ngx_null_string,
- 0,
- ngx_http_upstream_check_send_handler,
- ngx_http_upstream_check_recv_handler,
- ngx_http_upstream_check_http_init,
- ngx_http_upstream_check_fastcgi_parse,
- ngx_http_upstream_check_http_reinit,
- 1,
- 0 },
-
- { NGX_HTTP_CHECK_SSL_HELLO,
- ngx_string("ssl_hello"),
- ngx_string(sslv3_client_hello_pkt),
- 0,
- ngx_http_upstream_check_send_handler,
- ngx_http_upstream_check_recv_handler,
- ngx_http_upstream_check_ssl_hello_init,
- ngx_http_upstream_check_ssl_hello_parse,
- ngx_http_upstream_check_ssl_hello_reinit,
- 1,
- 0 },
-
- { NGX_HTTP_CHECK_MYSQL,
- ngx_string("mysql"),
- ngx_null_string,
- 0,
- ngx_http_upstream_check_send_handler,
- ngx_http_upstream_check_recv_handler,
- ngx_http_upstream_check_mysql_init,
- ngx_http_upstream_check_mysql_parse,
- ngx_http_upstream_check_mysql_reinit,
- 1,
- 0 },
-
- { NGX_HTTP_CHECK_AJP,
- ngx_string("ajp"),
- ngx_string(ngx_ajp_cping_packet),
- 0,
- ngx_http_upstream_check_send_handler,
- ngx_http_upstream_check_recv_handler,
- ngx_http_upstream_check_ajp_init,
- ngx_http_upstream_check_ajp_parse,
- ngx_http_upstream_check_ajp_reinit,
- 1,
- 0 },
-
- { 0,
- ngx_null_string,
- ngx_null_string,
- 0,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- 0,
- 0 }
-};
-
-
-static ngx_check_status_conf_t ngx_check_status_formats[] = {
-
- { ngx_string("html"),
- ngx_string("text/html"),
- ngx_http_upstream_check_status_html_format },
-
- { ngx_string("csv"),
- ngx_string("text/plain"),
- ngx_http_upstream_check_status_csv_format },
-
- { ngx_string("json"),
- ngx_string("application/json"), /* RFC 4627 */
- ngx_http_upstream_check_status_json_format },
-
- { ngx_null_string, ngx_null_string, NULL }
-};
-
-
-static ngx_check_status_command_t ngx_check_status_commands[] = {
-
- { ngx_string("format"),
- ngx_http_upstream_check_status_command_format },
-
- { ngx_string("status"),
- ngx_http_upstream_check_status_command_status },
-
- { ngx_null_string, NULL }
-};
-
-
-static ngx_uint_t ngx_http_upstream_check_shm_generation = 0;
-static ngx_http_upstream_check_peers_t *check_peers_ctx = NULL;
-
-
-ngx_uint_t
-ngx_http_upstream_check_add_peer(ngx_conf_t *cf,
- ngx_http_upstream_srv_conf_t *us, ngx_addr_t *peer_addr)
-{
- ngx_http_upstream_check_peer_t *peer;
- ngx_http_upstream_check_peers_t *peers;
- ngx_http_upstream_check_srv_conf_t *ucscf;
- ngx_http_upstream_check_main_conf_t *ucmcf;
-
- if (us->srv_conf == NULL) {
- return NGX_ERROR;
- }
-
- ucscf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_check_module);
-
- if(ucscf->check_interval == 0) {
- return NGX_ERROR;
- }
-
- ucmcf = ngx_http_conf_get_module_main_conf(cf,
- ngx_http_upstream_check_module);
- peers = ucmcf->peers;
-
- peer = ngx_array_push(&peers->peers);
- if (peer == NULL) {
- return NGX_ERROR;
- }
-
- ngx_memzero(peer, sizeof(ngx_http_upstream_check_peer_t));
-
- peer->index = peers->peers.nelts - 1;
- peer->conf = ucscf;
- peer->upstream_name = &us->host;
- peer->peer_addr = peer_addr;
-
- if (ucscf->port) {
- peer->check_peer_addr = ngx_pcalloc(cf->pool, sizeof(ngx_addr_t));
- if (peer->check_peer_addr == NULL) {
- return NGX_ERROR;
- }
-
- if (ngx_http_upstream_check_addr_change_port(cf->pool,
- peer->check_peer_addr, peer_addr, ucscf->port)
- != NGX_OK) {
-
- return NGX_ERROR;
- }
-
- } else {
- peer->check_peer_addr = peer->peer_addr;
- }
-
- peers->checksum +=
- ngx_murmur_hash2(peer_addr->name.data, peer_addr->name.len);
-
- return peer->index;
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_addr_change_port(ngx_pool_t *pool, ngx_addr_t *dst,
- ngx_addr_t *src, ngx_uint_t port)
-{
- size_t len;
- u_char *p;
- struct sockaddr_in *sin;
-#if (NGX_HAVE_INET6)
- struct sockaddr_in6 *sin6;
-#endif
-
- dst->socklen = src->socklen;
- dst->sockaddr = ngx_palloc(pool, dst->socklen);
- if (dst->sockaddr == NULL) {
- return NGX_ERROR;
- }
-
- ngx_memcpy(dst->sockaddr, src->sockaddr, dst->socklen);
-
- switch (dst->sockaddr->sa_family) {
-
- case AF_INET:
-
- len = NGX_INET_ADDRSTRLEN + sizeof(":65535") - 1;
- sin = (struct sockaddr_in *) dst->sockaddr;
- sin->sin_port = htons(port);
-
- break;
-
-#if (NGX_HAVE_INET6)
- case AF_INET6:
-
- len = NGX_INET6_ADDRSTRLEN + sizeof(":65535") - 1;
- sin6 = (struct sockaddr_in6 *) dst->sockaddr;
- sin6->sin6_port = htons(port);
-
- break;
-#endif
-
- default:
- return NGX_ERROR;
- }
-
- p = ngx_pnalloc(pool, len);
- if (p == NULL) {
- return NGX_ERROR;
- }
-
-#if (nginx_version >= 1005012)
- len = ngx_sock_ntop(dst->sockaddr, dst->socklen, p, len, 1);
-#else
- len = ngx_sock_ntop(dst->sockaddr, p, len, 1);
-#endif
-
- dst->name.len = len;
- dst->name.data = p;
-
- return NGX_OK;
-}
-
-
-ngx_uint_t
-ngx_http_upstream_check_peer_down(ngx_uint_t index)
-{
- ngx_http_upstream_check_peer_t *peer;
-
- if (check_peers_ctx == NULL || index >= check_peers_ctx->peers.nelts) {
- return 0;
- }
-
- peer = check_peers_ctx->peers.elts;
-
- return (peer[index].shm->down);
-}
-
-
-/* TODO: this interface can count each peer's busyness */
-void
-ngx_http_upstream_check_get_peer(ngx_uint_t index)
-{
- ngx_http_upstream_check_peer_t *peer;
-
- if (check_peers_ctx == NULL || index >= check_peers_ctx->peers.nelts) {
- return;
- }
-
- peer = check_peers_ctx->peers.elts;
-
- ngx_shmtx_lock(&peer[index].shm->mutex);
-
- peer[index].shm->busyness++;
- peer[index].shm->access_count++;
-
- ngx_shmtx_unlock(&peer[index].shm->mutex);
-}
-
-
-void
-ngx_http_upstream_check_free_peer(ngx_uint_t index)
-{
- ngx_http_upstream_check_peer_t *peer;
-
- if (check_peers_ctx == NULL || index >= check_peers_ctx->peers.nelts) {
- return;
- }
-
- peer = check_peers_ctx->peers.elts;
-
- ngx_shmtx_lock(&peer[index].shm->mutex);
-
- if (peer[index].shm->busyness > 0) {
- peer[index].shm->busyness--;
- }
-
- ngx_shmtx_unlock(&peer[index].shm->mutex);
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_add_timers(ngx_cycle_t *cycle)
-{
- ngx_uint_t i;
- ngx_msec_t t, delay;
- ngx_check_conf_t *cf;
- ngx_http_upstream_check_peer_t *peer;
- ngx_http_upstream_check_peers_t *peers;
- ngx_http_upstream_check_srv_conf_t *ucscf;
- ngx_http_upstream_check_peer_shm_t *peer_shm;
- ngx_http_upstream_check_peers_shm_t *peers_shm;
-
- peers = check_peers_ctx;
- if (peers == NULL) {
- return NGX_OK;
- }
-
- peers_shm = peers->peers_shm;
- if (peers_shm == NULL) {
- return NGX_OK;
- }
-
- ngx_log_debug2(NGX_LOG_DEBUG_HTTP, cycle->log, 0,
- "http check upstream init_process, shm_name: %V, "
- "peer number: %ud",
- &peers->check_shm_name,
- peers->peers.nelts);
-
- srandom(ngx_pid);
-
- peer = peers->peers.elts;
- peer_shm = peers_shm->peers;
-
- for (i = 0; i < peers->peers.nelts; i++) {
- peer[i].shm = &peer_shm[i];
-
- peer[i].check_ev.handler = ngx_http_upstream_check_begin_handler;
- peer[i].check_ev.log = cycle->log;
- peer[i].check_ev.data = &peer[i];
- peer[i].check_ev.timer_set = 0;
-
- peer[i].check_timeout_ev.handler =
- ngx_http_upstream_check_timeout_handler;
- peer[i].check_timeout_ev.log = cycle->log;
- peer[i].check_timeout_ev.data = &peer[i];
- peer[i].check_timeout_ev.timer_set = 0;
-
- ucscf = peer[i].conf;
- cf = ucscf->check_type_conf;
-
- if (cf->need_pool) {
- peer[i].pool = ngx_create_pool(ngx_pagesize, cycle->log);
- if (peer[i].pool == NULL) {
- return NGX_ERROR;
- }
- }
-
- peer[i].send_handler = cf->send_handler;
- peer[i].recv_handler = cf->recv_handler;
-
- peer[i].init = cf->init;
- peer[i].parse = cf->parse;
- peer[i].reinit = cf->reinit;
-
- /*
- * We add a random start time here, since we don't want to trigger
- * the check events too close to each other at the beginning.
- */
- delay = ucscf->check_interval > 1000 ? ucscf->check_interval : 1000;
- t = ngx_random() % delay;
-
- ngx_add_timer(&peer[i].check_ev, t);
- }
-
- return NGX_OK;
-}
-
-
-static void
-ngx_http_upstream_check_begin_handler(ngx_event_t *event)
-{
- ngx_msec_t interval;
- ngx_http_upstream_check_peer_t *peer;
- ngx_http_upstream_check_peers_t *peers;
- ngx_http_upstream_check_srv_conf_t *ucscf;
- ngx_http_upstream_check_peers_shm_t *peers_shm;
-
- if (ngx_http_upstream_check_need_exit()) {
- return;
- }
-
- peers = check_peers_ctx;
- if (peers == NULL) {
- return;
- }
-
- peers_shm = peers->peers_shm;
- if (peers_shm == NULL) {
- return;
- }
-
- peer = event->data;
- ucscf = peer->conf;
-
- ngx_add_timer(event, ucscf->check_interval / 2);
-
- /* This process is processing this peer now. */
- if ((peer->shm->owner == ngx_pid ||
- (peer->pc.connection != NULL) ||
- peer->check_timeout_ev.timer_set)) {
- return;
- }
-
- interval = ngx_current_msec - peer->shm->access_time;
- ngx_log_debug5(NGX_LOG_DEBUG_HTTP, event->log, 0,
- "http check begin handler index: %ui, owner: %P, "
- "ngx_pid: %P, interval: %M, check_interval: %M",
- peer->index, peer->shm->owner,
- ngx_pid, interval,
- ucscf->check_interval);
-
- ngx_shmtx_lock(&peer->shm->mutex);
-
- if (peers_shm->generation != ngx_http_upstream_check_shm_generation) {
- ngx_shmtx_unlock(&peer->shm->mutex);
- return;
- }
-
- if ((interval >= ucscf->check_interval)
- && (peer->shm->owner == NGX_INVALID_PID))
- {
- peer->shm->owner = ngx_pid;
-
- } else if (interval >= (ucscf->check_interval << 4)) {
-
- /*
- * If the check peer has been untouched for 2^4 times of
- * the check interval, activate the current timer.
- * Sometimes, the checking process may disappear
- * in some circumstances, and the clean event will never
- * be triggered.
- */
- peer->shm->owner = ngx_pid;
- peer->shm->access_time = ngx_current_msec;
- }
-
- ngx_shmtx_unlock(&peer->shm->mutex);
-
- if (peer->shm->owner == ngx_pid) {
- ngx_http_upstream_check_connect_handler(event);
- }
-}
-
-
-static void
-ngx_http_upstream_check_connect_handler(ngx_event_t *event)
-{
- ngx_int_t rc;
- ngx_connection_t *c;
- ngx_http_upstream_check_peer_t *peer;
- ngx_http_upstream_check_srv_conf_t *ucscf;
-
- if (ngx_http_upstream_check_need_exit()) {
- return;
- }
-
- peer = event->data;
- ucscf = peer->conf;
-
- if (peer->pc.connection != NULL) {
- c = peer->pc.connection;
- if ((rc = ngx_http_upstream_check_peek_one_byte(c)) == NGX_OK) {
- goto upstream_check_connect_done;
- } else {
- ngx_close_connection(c);
- peer->pc.connection = NULL;
- }
- }
- ngx_memzero(&peer->pc, sizeof(ngx_peer_connection_t));
-
- peer->pc.sockaddr = peer->check_peer_addr->sockaddr;
- peer->pc.socklen = peer->check_peer_addr->socklen;
- peer->pc.name = &peer->check_peer_addr->name;
-
- peer->pc.get = ngx_event_get_peer;
- peer->pc.log = event->log;
- peer->pc.log_error = NGX_ERROR_ERR;
-
- peer->pc.cached = 0;
- peer->pc.connection = NULL;
-
- rc = ngx_event_connect_peer(&peer->pc);
-
- if (rc == NGX_ERROR || rc == NGX_DECLINED) {
- ngx_http_upstream_check_status_update(peer, 0);
- return;
- }
-
- /* NGX_OK or NGX_AGAIN */
- c = peer->pc.connection;
- c->data = peer;
- c->log = peer->pc.log;
- c->sendfile = 0;
- c->read->log = c->log;
- c->write->log = c->log;
- c->pool = peer->pool;
-
-upstream_check_connect_done:
- peer->state = NGX_HTTP_CHECK_CONNECT_DONE;
-
- c->write->handler = peer->send_handler;
- c->read->handler = peer->recv_handler;
-
- ngx_add_timer(&peer->check_timeout_ev, ucscf->check_timeout);
-
- /* The kqueue's loop interface needs it. */
- if (rc == NGX_OK) {
- c->write->handler(c->write);
- }
-}
-
-static ngx_int_t
-ngx_http_upstream_check_peek_one_byte(ngx_connection_t *c)
-{
- char buf[1];
- ngx_int_t n;
- ngx_err_t err;
-
- n = recv(c->fd, buf, 1, MSG_PEEK);
- err = ngx_socket_errno;
-
- ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, err,
- "http check upstream recv(): %i, fd: %d",
- n, c->fd);
-
- if (n == 1 || (n == -1 && err == NGX_EAGAIN)) {
- return NGX_OK;
- } else {
- return NGX_ERROR;
- }
-}
-
-static void
-ngx_http_upstream_check_peek_handler(ngx_event_t *event)
-{
- ngx_connection_t *c;
- ngx_http_upstream_check_peer_t *peer;
-
- if (ngx_http_upstream_check_need_exit()) {
- return;
- }
-
- c = event->data;
- peer = c->data;
-
- if (ngx_http_upstream_check_peek_one_byte(c) == NGX_OK) {
- ngx_http_upstream_check_status_update(peer, 1);
-
- } else {
- c->error = 1;
- ngx_http_upstream_check_status_update(peer, 0);
- }
-
- ngx_http_upstream_check_clean_event(peer);
-
- ngx_http_upstream_check_finish_handler(event);
-}
-
-
-static void
-ngx_http_upstream_check_discard_handler(ngx_event_t *event)
-{
- u_char buf[4096];
- ssize_t size;
- ngx_connection_t *c;
- ngx_http_upstream_check_peer_t *peer;
-
- c = event->data;
-
- ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
- "upstream check discard handler");
-
- if (ngx_http_upstream_check_need_exit()) {
- return;
- }
-
- peer = c->data;
-
- while (1) {
- size = c->recv(c, buf, 4096);
-
- if (size > 0) {
- continue;
-
- } else if (size == NGX_AGAIN) {
- break;
-
- } else {
- if (size == 0) {
- ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
- "peer closed its half side of the connection");
- }
-
- goto check_discard_fail;
- }
- }
-
- if (ngx_handle_read_event(c->read, 0) != NGX_OK) {
- goto check_discard_fail;
- }
-
- return;
-
- check_discard_fail:
- c->error = 1;
- ngx_http_upstream_check_clean_event(peer);
-}
-
-
-static void
-ngx_http_upstream_check_dummy_handler(ngx_event_t *event)
-{
- return;
-}
-
-
-static void
-ngx_http_upstream_check_send_handler(ngx_event_t *event)
-{
- ssize_t size;
- ngx_connection_t *c;
- ngx_http_upstream_check_ctx_t *ctx;
- ngx_http_upstream_check_peer_t *peer;
-
- if (ngx_http_upstream_check_need_exit()) {
- return;
- }
-
- c = event->data;
- peer = c->data;
-
- ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http check send.");
-
- if (c->pool == NULL) {
- ngx_log_error(NGX_LOG_ERR, event->log, 0,
- "check pool NULL with peer: %V ",
- &peer->check_peer_addr->name);
-
- goto check_send_fail;
- }
-
- if (peer->state != NGX_HTTP_CHECK_CONNECT_DONE) {
- if (ngx_handle_write_event(c->write, 0) != NGX_OK) {
-
- ngx_log_error(NGX_LOG_ERR, event->log, 0,
- "check handle write event error with peer: %V ",
- &peer->check_peer_addr->name);
-
- goto check_send_fail;
- }
-
- return;
- }
-
- if (peer->check_data == NULL) {
-
- peer->check_data = ngx_pcalloc(peer->pool,
- sizeof(ngx_http_upstream_check_ctx_t));
- if (peer->check_data == NULL) {
- goto check_send_fail;
- }
-
- if (peer->init == NULL || peer->init(peer) != NGX_OK) {
-
- ngx_log_error(NGX_LOG_ERR, event->log, 0,
- "check init error with peer: %V ",
- &peer->check_peer_addr->name);
-
- goto check_send_fail;
- }
- }
-
- ctx = peer->check_data;
-
- while (ctx->send.pos < ctx->send.last) {
-
- size = c->send(c, ctx->send.pos, ctx->send.last - ctx->send.pos);
-
-#if (NGX_DEBUG)
- {
- ngx_err_t err;
-
- err = (size >=0) ? 0 : ngx_socket_errno;
- ngx_log_error(NGX_LOG_DEBUG, ngx_cycle->log, err,
- "http check send size: %z, total: %z",
- size, ctx->send.last - ctx->send.pos);
- }
-#endif
-
- if (size > 0) {
- ctx->send.pos += size;
- } else if (size == 0 || size == NGX_AGAIN) {
- return;
- } else {
- c->error = 1;
- goto check_send_fail;
- }
- }
-
- if (ctx->send.pos == ctx->send.last) {
- ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http check send done.");
- peer->state = NGX_HTTP_CHECK_SEND_DONE;
- c->requests++;
- }
-
- return;
-
-check_send_fail:
- ngx_http_upstream_check_status_update(peer, 0);
- ngx_http_upstream_check_clean_event(peer);
-}
-
-
-static void
-ngx_http_upstream_check_recv_handler(ngx_event_t *event)
-{
- u_char *new_buf;
- ssize_t size, n;
- ngx_int_t rc;
- ngx_connection_t *c;
- ngx_http_upstream_check_ctx_t *ctx;
- ngx_http_upstream_check_peer_t *peer;
-
- if (ngx_http_upstream_check_need_exit()) {
- return;
- }
-
- c = event->data;
- peer = c->data;
-
- if (peer->state != NGX_HTTP_CHECK_SEND_DONE) {
-
- if (ngx_handle_read_event(c->read, 0) != NGX_OK) {
- goto check_recv_fail;
- }
-
- return;
- }
-
- ctx = peer->check_data;
-
- if (ctx->recv.start == NULL) {
- /* 1/2 of the page_size, is it enough? */
- ctx->recv.start = ngx_palloc(c->pool, ngx_pagesize / 2);
- if (ctx->recv.start == NULL) {
- goto check_recv_fail;
- }
-
- ctx->recv.last = ctx->recv.pos = ctx->recv.start;
- ctx->recv.end = ctx->recv.start + ngx_pagesize / 2;
- }
-
- while (1) {
- n = ctx->recv.end - ctx->recv.last;
-
- /* buffer not big enough? enlarge it by twice */
- if (n == 0) {
- size = ctx->recv.end - ctx->recv.start;
- new_buf = ngx_palloc(c->pool, size * 2);
- if (new_buf == NULL) {
- goto check_recv_fail;
- }
-
- ngx_memcpy(new_buf, ctx->recv.start, size);
-
- ctx->recv.pos = ctx->recv.start = new_buf;
- ctx->recv.last = new_buf + size;
- ctx->recv.end = new_buf + size * 2;
-
- n = ctx->recv.end - ctx->recv.last;
- }
-
- size = c->recv(c, ctx->recv.last, n);
-
-#if (NGX_DEBUG)
- {
- ngx_err_t err;
-
- err = (size >= 0) ? 0 : ngx_socket_errno;
- ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, err,
- "http check recv size: %z, peer: %V ",
- size, &peer->check_peer_addr->name);
- }
-#endif
-
- if (size > 0) {
- ctx->recv.last += size;
- continue;
- } else if (size == 0 || size == NGX_AGAIN) {
- break;
- } else {
- c->error = 1;
- goto check_recv_fail;
- }
- }
-
- rc = peer->parse(peer);
-
- ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
- "http check parse rc: %i, peer: %V ",
- rc, &peer->check_peer_addr->name);
-
- switch (rc) {
-
- case NGX_AGAIN:
- /* The peer has closed its half side of the connection. */
- if (size == 0) {
- ngx_http_upstream_check_status_update(peer, 0);
- c->error = 1;
- break;
- }
-
- return;
-
- case NGX_ERROR:
- ngx_log_error(NGX_LOG_ERR, event->log, 0,
- "check protocol %V error with peer: %V ",
- &peer->conf->check_type_conf->name,
- &peer->check_peer_addr->name);
-
- ngx_http_upstream_check_status_update(peer, 0);
- break;
-
- case NGX_OK:
- /* fall through */
-
- default:
- ngx_http_upstream_check_status_update(peer, 1);
- break;
- }
-
- peer->state = NGX_HTTP_CHECK_RECV_DONE;
- ngx_http_upstream_check_clean_event(peer);
- return;
-
-check_recv_fail:
- ngx_http_upstream_check_status_update(peer, 0);
- ngx_http_upstream_check_clean_event(peer);
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_http_init(ngx_http_upstream_check_peer_t *peer)
-{
- ngx_http_upstream_check_ctx_t *ctx;
- ngx_http_upstream_check_srv_conf_t *ucscf;
-
- ctx = peer->check_data;
- ucscf = peer->conf;
-
- ctx->send.start = ctx->send.pos = (u_char *)ucscf->send.data;
- ctx->send.end = ctx->send.last = ctx->send.start + ucscf->send.len;
-
- ctx->recv.start = ctx->recv.pos = NULL;
- ctx->recv.end = ctx->recv.last = NULL;
-
- ctx->state = 0;
-
- ngx_memzero(&ctx->status, sizeof(ngx_http_status_t));
-
- return NGX_OK;
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_http_parse(ngx_http_upstream_check_peer_t *peer)
-{
- ngx_int_t rc;
- ngx_uint_t code, code_n;
- ngx_http_upstream_check_ctx_t *ctx;
- ngx_http_upstream_check_srv_conf_t *ucscf;
-
- ucscf = peer->conf;
- ctx = peer->check_data;
-
- if ((ctx->recv.last - ctx->recv.pos) > 0) {
-
- rc = ngx_http_upstream_check_parse_status_line(ctx,
- &ctx->recv,
- &ctx->status);
- if (rc == NGX_AGAIN) {
- return rc;
- }
-
- if (rc == NGX_ERROR) {
- ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0,
- "http parse status line error with peer: %V ",
- &peer->check_peer_addr->name);
- return rc;
- }
-
- code = ctx->status.code;
-
- if (code >= 200 && code < 300) {
- code_n = NGX_CHECK_HTTP_2XX;
- } else if (code >= 300 && code < 400) {
- code_n = NGX_CHECK_HTTP_3XX;
- } else if (code >= 400 && code < 500) {
- peer->pc.connection->error = 1;
- code_n = NGX_CHECK_HTTP_4XX;
- } else if (code >= 500 && code < 600) {
- peer->pc.connection->error = 1;
- code_n = NGX_CHECK_HTTP_5XX;
- } else {
- peer->pc.connection->error = 1;
- code_n = NGX_CHECK_HTTP_ERR;
- }
-
- ngx_log_debug2(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0,
- "http_parse: code_n: %ui, conf: %ui",
- code_n, ucscf->code.status_alive);
-
- if (code_n & ucscf->code.status_alive) {
- return NGX_OK;
- } else {
- return NGX_ERROR;
- }
- } else {
- return NGX_AGAIN;
- }
-
- return NGX_OK;
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_fastcgi_process_record(
- ngx_http_upstream_check_ctx_t *ctx, ngx_buf_t *b, ngx_http_status_t *status)
-{
- u_char ch, *p;
- ngx_http_fastcgi_state_e state;
-
- state = ctx->state;
-
- for (p = b->pos; p < b->last; p++) {
-
- ch = *p;
-
- ngx_log_debug1(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0,
- "http fastcgi record byte: %02Xd", ch);
-
- switch (state) {
-
- case ngx_http_fastcgi_st_version:
- if (ch != 1) {
- ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0,
- "upstream sent unsupported FastCGI "
- "protocol version: %d", ch);
- return NGX_ERROR;
- }
- state = ngx_http_fastcgi_st_type;
- break;
-
- case ngx_http_fastcgi_st_type:
- switch (ch) {
- case NGX_HTTP_FASTCGI_STDOUT:
- case NGX_HTTP_FASTCGI_STDERR:
- case NGX_HTTP_FASTCGI_END_REQUEST:
- status->code = (ngx_uint_t) ch;
- break;
- default:
- ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0,
- "upstream sent invalid FastCGI "
- "record type: %d", ch);
- return NGX_ERROR;
-
- }
- state = ngx_http_fastcgi_st_request_id_hi;
- break;
-
- /* we support the single request per connection */
-
- case ngx_http_fastcgi_st_request_id_hi:
- if (ch != 0) {
- ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0,
- "upstream sent unexpected FastCGI "
- "request id high byte: %d", ch);
- return NGX_ERROR;
- }
- state = ngx_http_fastcgi_st_request_id_lo;
- break;
-
- case ngx_http_fastcgi_st_request_id_lo:
- if (ch != 1) {
- ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0,
- "upstream sent unexpected FastCGI "
- "request id low byte: %d", ch);
- return NGX_ERROR;
- }
- state = ngx_http_fastcgi_st_content_length_hi;
- break;
-
- case ngx_http_fastcgi_st_content_length_hi:
- ctx->length = ch << 8;
- state = ngx_http_fastcgi_st_content_length_lo;
- break;
-
- case ngx_http_fastcgi_st_content_length_lo:
- ctx->length |= (size_t) ch;
- state = ngx_http_fastcgi_st_padding_length;
- break;
-
- case ngx_http_fastcgi_st_padding_length:
- ctx->padding = (size_t) ch;
- state = ngx_http_fastcgi_st_reserved;
- break;
-
- case ngx_http_fastcgi_st_reserved:
- state = ngx_http_fastcgi_st_data;
-
- b->pos = p + 1;
- ctx->state = state;
-
- return NGX_OK;
-
- /* suppress warning */
- case ngx_http_fastcgi_st_data:
- case ngx_http_fastcgi_st_padding:
- break;
- }
- }
-
- ctx->state = state;
-
- return NGX_AGAIN;
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_fastcgi_parse(ngx_http_upstream_check_peer_t *peer)
-{
- ngx_int_t rc;
- ngx_flag_t done;
- ngx_uint_t type, code, code_n;
- ngx_http_upstream_check_ctx_t *ctx;
- ngx_http_upstream_check_srv_conf_t *ucscf;
-
- ucscf = peer->conf;
- ctx = peer->check_data;
-
- if ((ctx->recv.last - ctx->recv.pos) <= 0) {
- return NGX_AGAIN;
- }
-
- done = 0;
-
- for ( ;; ) {
-
- if (ctx->state < ngx_http_fastcgi_st_data) {
- rc = ngx_http_upstream_check_fastcgi_process_record(ctx,
- &ctx->recv, &ctx->status);
-
- type = ctx->status.code;
-
- ngx_log_debug2(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0,
- "fastcgi_parse rc: [%i], type: [%ui]", rc, type);
-
- if (rc == NGX_AGAIN) {
- return rc;
- }
-
- if (rc == NGX_ERROR) {
- ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0,
- "check fastcgi parse status line error with peer: %V",
- &peer->check_peer_addr->name);
-
- return rc;
- }
-
- if (type != NGX_HTTP_FASTCGI_STDOUT
- && type != NGX_HTTP_FASTCGI_STDERR)
- {
- ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0,
- "check fastcgi sent unexpected FastCGI record: %d", type);
-
- return NGX_ERROR;
- }
-
- if (type == NGX_HTTP_FASTCGI_STDOUT && ctx->length == 0) {
- ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0,
- "check fastcgi prematurely closed FastCGI stdout");
-
- return NGX_ERROR;
- }
- }
-
- if (ctx->state == ngx_http_fastcgi_st_padding) {
-
- if (ctx->recv.pos + ctx->padding < ctx->recv.last) {
- ctx->status.code = ngx_http_fastcgi_st_version;
- ctx->recv.pos += ctx->padding;
-
- continue;
- }
-
- if (ctx->recv.pos + ctx->padding == ctx->recv.last) {
- ctx->status.code = ngx_http_fastcgi_st_version;
- ctx->recv.pos = ctx->recv.last;
-
- return NGX_AGAIN;
- }
-
- ctx->padding -= ctx->recv.last - ctx->recv.pos;
- ctx->recv.pos = ctx->recv.last;
-
- return NGX_AGAIN;
- }
-
- if (ctx->status.code == NGX_HTTP_FASTCGI_STDERR) {
-
- ngx_log_error(NGX_LOG_WARN, ngx_cycle->log, 0,
- "fastcgi check error");
-
- return NGX_ERROR;
- }
-
- /* ctx->status.code == NGX_HTTP_FASTCGI_STDOUT */
-
- if (ctx->recv.pos + ctx->length < ctx->recv.last) {
- ctx->recv.last = ctx->recv.pos + ctx->length;
- } else {
- return NGX_ERROR;
- }
-
- ctx->status.code = 0;
-
- for ( ;; ) {
- rc = ngx_http_upstream_check_parse_fastcgi_status(ctx,
- &ctx->recv,
- &ctx->status);
- ngx_log_error(NGX_LOG_INFO, ngx_cycle->log, 0,
- "fastcgi http parse status line rc: %i ", rc);
-
- if (rc == NGX_ERROR) {
- ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0,
- "fastcgi http parse status line error with peer: %V ",
- &peer->check_peer_addr->name);
- return NGX_ERROR;
- }
-
- if (rc == NGX_AGAIN) {
- break;
- }
-
- if (rc == NGX_DONE) {
- done = 1;
- ngx_log_error(NGX_LOG_DEBUG, ngx_cycle->log, 0,
- "fastcgi http parse status: %i",
- ctx->status.code);
- break;
- }
-
- /* rc = NGX_OK */
- }
-
- if (ucscf->code.status_alive == 0 || done == 0) {
- return NGX_OK;
- }
-
- code = ctx->status.code;
-
- if (code >= 200 && code < 300) {
- code_n = NGX_CHECK_HTTP_2XX;
- } else if (code >= 300 && code < 400) {
- code_n = NGX_CHECK_HTTP_3XX;
- } else if (code >= 400 && code < 500) {
- code_n = NGX_CHECK_HTTP_4XX;
- } else if (code >= 500 && code < 600) {
- code_n = NGX_CHECK_HTTP_5XX;
- } else {
- code_n = NGX_CHECK_HTTP_ERR;
- }
-
- ngx_log_debug2(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0,
- "fastcgi http_parse: code_n: %ui, conf: %ui",
- code_n, ucscf->code.status_alive);
-
- if (code_n & ucscf->code.status_alive) {
- return NGX_OK;
- } else {
- return NGX_ERROR;
- }
-
- }
-
- return NGX_OK;
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_parse_fastcgi_status(ngx_http_upstream_check_ctx_t *ctx,
- ngx_buf_t *b, ngx_http_status_t *status)
-{
- u_char c, ch, *p, *name_s, *name_e;
- ngx_flag_t find;
-
- enum {
- sw_start = 0,
- sw_name,
- sw_space_before_value,
- sw_value,
- sw_space_after_value,
- sw_ignore_line,
- sw_almost_done,
- sw_header_almost_done
- } state;
-
- /* the last '\0' is not needed because string is zero terminated */
-
- static u_char lowcase[] =
- "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
- "\0\0\0\0\0\0\0\0\0\0\0\0\0-\0\0" "0123456789\0\0\0\0\0\0"
- "\0abcdefghijklmnopqrstuvwxyz\0\0\0\0\0"
- "\0abcdefghijklmnopqrstuvwxyz\0\0\0\0\0"
- "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
- "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
- "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
- "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
-
- status->count = 0;
- status->code = 0;
- find = 0;
- name_s = name_e = NULL;
- state = sw_start;
-
- for (p = b->pos; p < b->last; p++) {
- ch = *p;
-
- switch (state) {
-
- /* first char */
- case sw_start:
-
- switch (ch) {
- case CR:
- state = sw_header_almost_done;
- break;
- case LF:
- goto header_done;
- default:
- state = sw_name;
-
- c = lowcase[ch];
-
- if (c) {
- name_s = p;
- break;
- }
-
- if (ch == '\0') {
- return NGX_ERROR;
- }
-
-
- break;
- }
-
- break;
-
- /* header name */
- case sw_name:
- c = lowcase[ch];
-
- if (c) {
- break;
- }
-
- if (ch == ':') {
- name_e = p;
-#if (NGX_DEBUG)
- ngx_str_t name;
- name.data = name_s;
- name.len = name_e - name_s;
- ngx_log_debug1(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0,
- "fastcgi header: %V", &name);
-#endif
- state = sw_space_before_value;
-
- if (ngx_strncasecmp(name_s, (u_char *) "status",
- name_e - name_s)
- == 0)
- {
-
- ngx_log_debug0(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0,
- "find status header");
-
- find = 1;
- }
-
- break;
- }
-
- if (ch == CR) {
- state = sw_almost_done;
- break;
- }
-
- if (ch == LF) {
- goto done;
- }
-
- /* IIS may send the duplicate "HTTP/1.1 ..." lines */
- if (ch == '\0') {
- return NGX_ERROR;
- }
-
- break;
-
- /* space* before header value */
- case sw_space_before_value:
- switch (ch) {
- case ' ':
- break;
- case CR:
- state = sw_almost_done;
- break;
- case LF:
- goto done;
- case '\0':
- return NGX_ERROR;
- default:
- state = sw_value;
- if (find) {
- if (ch < '1' || ch > '9') {
- return NGX_ERROR;
- }
-
- status->code = status->code * 10 + ch - '0';
- if (status->count++ != 0) {
- return NGX_ERROR;
- }
- }
-
- break;
- }
-
- break;
-
- /* header value */
- case sw_value:
-
- if (find) {
- if (ch < '0' || ch > '9') {
- return NGX_ERROR;
- }
-
- status->code = status->code * 10 + ch - '0';
-
- if (++status->count == 3) {
- return NGX_DONE;
- }
- }
-
- switch (ch) {
- case ' ':
- state = sw_space_after_value;
- break;
- case CR:
- state = sw_almost_done;
- break;
- case LF:
- goto done;
- case '\0':
- return NGX_ERROR;
- }
-
- break;
-
- /* space* before end of header line */
- case sw_space_after_value:
- switch (ch) {
- case ' ':
- break;
- case CR:
- state = sw_almost_done;
- break;
- case LF:
- state = sw_start;
- break;
- case '\0':
- return NGX_ERROR;
- default:
- state = sw_value;
- break;
- }
- break;
-
- /* ignore header line */
- case sw_ignore_line:
- switch (ch) {
- case LF:
- state = sw_start;
- break;
- default:
- break;
- }
- break;
-
- /* end of header line */
- case sw_almost_done:
- switch (ch) {
- case LF:
- goto done;
- case CR:
- break;
- default:
- return NGX_ERROR;
- }
- break;
-
- /* end of header */
- case sw_header_almost_done:
- switch (ch) {
- case LF:
- goto header_done;
- default:
- return NGX_ERROR;
- }
- }
- }
-
- b->pos = p;
- ctx->state = state;
-
- return NGX_AGAIN;
-
-done:
-
- b->pos = p + 1;
- ctx->state = sw_start;
-
- return NGX_OK;
-
-header_done:
-
- b->pos = p + 1;
- ctx->state = sw_start;
-
- return NGX_OK;
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_parse_status_line(ngx_http_upstream_check_ctx_t *ctx,
- ngx_buf_t *b, ngx_http_status_t *status)
-{
- u_char ch, *p;
- enum {
- sw_start = 0,
- sw_H,
- sw_HT,
- sw_HTT,
- sw_HTTP,
- sw_first_major_digit,
- sw_major_digit,
- sw_first_minor_digit,
- sw_minor_digit,
- sw_status,
- sw_space_after_status,
- sw_status_text,
- sw_almost_done
- } state;
-
- state = ctx->state;
-
- for (p = b->pos; p < b->last; p++) {
- ch = *p;
-
- switch (state) {
-
- /* "HTTP/" */
- case sw_start:
- if (ch != 'H') {
- return NGX_ERROR;
- }
-
- state = sw_H;
- break;
-
- case sw_H:
- if (ch != 'T') {
- return NGX_ERROR;
- }
-
- state = sw_HT;
- break;
-
- case sw_HT:
- if (ch != 'T') {
- return NGX_ERROR;
- }
-
- state = sw_HTT;
- break;
-
- case sw_HTT:
- if (ch != 'P') {
- return NGX_ERROR;
- }
-
- state = sw_HTTP;
- break;
-
- case sw_HTTP:
- if (ch != '/') {
- return NGX_ERROR;
- }
-
- state = sw_first_major_digit;
- break;
-
- /* the first digit of major HTTP version */
- case sw_first_major_digit:
- if (ch < '1' || ch > '9') {
- return NGX_ERROR;
- }
-
- state = sw_major_digit;
- break;
-
- /* the major HTTP version or dot */
- case sw_major_digit:
- if (ch == '.') {
- state = sw_first_minor_digit;
- break;
- }
-
- if (ch < '0' || ch > '9') {
- return NGX_ERROR;
- }
-
- break;
-
- /* the first digit of minor HTTP version */
- case sw_first_minor_digit:
- if (ch < '0' || ch > '9') {
- return NGX_ERROR;
- }
-
- state = sw_minor_digit;
- break;
-
- /* the minor HTTP version or the end of the request line */
- case sw_minor_digit:
- if (ch == ' ') {
- state = sw_status;
- break;
- }
-
- if (ch < '0' || ch > '9') {
- return NGX_ERROR;
- }
-
- break;
-
- /* HTTP status code */
- case sw_status:
- if (ch == ' ') {
- break;
- }
-
- if (ch < '0' || ch > '9') {
- return NGX_ERROR;
- }
-
- status->code = status->code * 10 + ch - '0';
-
- if (++status->count == 3) {
- state = sw_space_after_status;
- status->start = p - 2;
- }
-
- break;
-
- /* space or end of line */
- case sw_space_after_status:
- switch (ch) {
- case ' ':
- state = sw_status_text;
- break;
- case '.': /* IIS may send 403.1, 403.2, etc */
- state = sw_status_text;
- break;
- case CR:
- state = sw_almost_done;
- break;
- case LF:
- goto done;
- default:
- return NGX_ERROR;
- }
- break;
-
- /* any text until end of line */
- case sw_status_text:
- switch (ch) {
- case CR:
- state = sw_almost_done;
-
- break;
- case LF:
- goto done;
- }
- break;
-
- /* end of status line */
- case sw_almost_done:
- status->end = p - 1;
- if (ch == LF) {
- goto done;
- } else {
- return NGX_ERROR;
- }
- }
- }
-
- b->pos = p;
- ctx->state = state;
-
- return NGX_AGAIN;
-
-done:
-
- b->pos = p + 1;
-
- if (status->end == NULL) {
- status->end = p;
- }
-
- ctx->state = sw_start;
-
- return NGX_OK;
-}
-
-
-static void
-ngx_http_upstream_check_http_reinit(ngx_http_upstream_check_peer_t *peer)
-{
- ngx_http_upstream_check_ctx_t *ctx;
-
- ctx = peer->check_data;
-
- ctx->send.pos = ctx->send.start;
- ctx->send.last = ctx->send.end;
-
- ctx->recv.pos = ctx->recv.last = ctx->recv.start;
-
- ctx->state = 0;
-
- ngx_memzero(&ctx->status, sizeof(ngx_http_status_t));
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_ssl_hello_init(ngx_http_upstream_check_peer_t *peer)
-{
- ngx_http_upstream_check_ctx_t *ctx;
- ngx_http_upstream_check_srv_conf_t *ucscf;
-
- ctx = peer->check_data;
- ucscf = peer->conf;
-
- ctx->send.start = ctx->send.pos = (u_char *)ucscf->send.data;
- ctx->send.end = ctx->send.last = ctx->send.start + ucscf->send.len;
-
- ctx->recv.start = ctx->recv.pos = NULL;
- ctx->recv.end = ctx->recv.last = NULL;
-
- return NGX_OK;
-}
-
-
-/* a rough check of server ssl_hello responses */
-static ngx_int_t
-ngx_http_upstream_check_ssl_hello_parse(ngx_http_upstream_check_peer_t *peer)
-{
- size_t size;
- ngx_ssl_server_hello_t *resp;
- ngx_http_upstream_check_ctx_t *ctx;
-
- ctx = peer->check_data;
-
- size = ctx->recv.last - ctx->recv.pos;
- if (size < sizeof(ngx_ssl_server_hello_t)) {
- return NGX_AGAIN;
- }
-
- resp = (ngx_ssl_server_hello_t *) ctx->recv.pos;
-
- ngx_log_debug7(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0,
- "http check ssl_parse, type: %ud, version: %ud.%ud, "
- "length: %ud, handshanke_type: %ud, hello_version: %ud.%ud",
- resp->msg_type, resp->version.major, resp->version.minor,
- ntohs(resp->length), resp->handshake_type,
- resp->hello_version.major, resp->hello_version.minor);
-
- if (resp->msg_type != NGX_SSL_HANDSHAKE) {
- return NGX_ERROR;
- }
-
- if (resp->handshake_type != NGX_SSL_SERVER_HELLO) {
- return NGX_ERROR;
- }
-
- return NGX_OK;
-}
-
-
-static void
-ngx_http_upstream_check_ssl_hello_reinit(ngx_http_upstream_check_peer_t *peer)
-{
- ngx_http_upstream_check_ctx_t *ctx;
-
- ctx = peer->check_data;
-
- ctx->send.pos = ctx->send.start;
- ctx->send.last = ctx->send.end;
-
- ctx->recv.pos = ctx->recv.last = ctx->recv.start;
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_mysql_init(ngx_http_upstream_check_peer_t *peer)
-{
- ngx_http_upstream_check_ctx_t *ctx;
- ngx_http_upstream_check_srv_conf_t *ucscf;
-
- ctx = peer->check_data;
- ucscf = peer->conf;
-
- ctx->send.start = ctx->send.pos = (u_char *)ucscf->send.data;
- ctx->send.end = ctx->send.last = ctx->send.start + ucscf->send.len;
-
- ctx->recv.start = ctx->recv.pos = NULL;
- ctx->recv.end = ctx->recv.last = NULL;
-
- return NGX_OK;
-}
-
-
-/* a rough check of mysql greeting responses */
-static ngx_int_t
-ngx_http_upstream_check_mysql_parse(ngx_http_upstream_check_peer_t *peer)
-{
- size_t size;
- ngx_mysql_handshake_init_t *handshake;
- ngx_http_upstream_check_ctx_t *ctx;
-
- ctx = peer->check_data;
-
- size = ctx->recv.last - ctx->recv.pos;
- if (size < sizeof(ngx_mysql_handshake_init_t)) {
- return NGX_AGAIN;
- }
-
- handshake = (ngx_mysql_handshake_init_t *) ctx->recv.pos;
-
- ngx_log_debug3(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0,
- "mysql_parse: packet_number=%ud, protocol=%ud, server=%s",
- handshake->packet_number, handshake->protocol_version,
- handshake->others);
-
- /* The mysql greeting packet's serial number always begins with 0. */
- if (handshake->packet_number != 0x00) {
- return NGX_ERROR;
- }
-
- return NGX_OK;
-}
-
-
-static void
-ngx_http_upstream_check_mysql_reinit(ngx_http_upstream_check_peer_t *peer)
-{
- ngx_http_upstream_check_ctx_t *ctx;
-
- ctx = peer->check_data;
-
- ctx->send.pos = ctx->send.start;
- ctx->send.last = ctx->send.end;
-
- ctx->recv.pos = ctx->recv.last = ctx->recv.start;
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_ajp_init(ngx_http_upstream_check_peer_t *peer)
-{
- ngx_http_upstream_check_ctx_t *ctx;
- ngx_http_upstream_check_srv_conf_t *ucscf;
-
- ctx = peer->check_data;
- ucscf = peer->conf;
-
- ctx->send.start = ctx->send.pos = (u_char *)ucscf->send.data;
- ctx->send.end = ctx->send.last = ctx->send.start + ucscf->send.len;
-
- ctx->recv.start = ctx->recv.pos = NULL;
- ctx->recv.end = ctx->recv.last = NULL;
-
- return NGX_OK;
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_ajp_parse(ngx_http_upstream_check_peer_t *peer)
-{
- size_t size;
- u_char *p;
- ngx_http_upstream_check_ctx_t *ctx;
-
- ctx = peer->check_data;
-
- size = ctx->recv.last - ctx->recv.pos;
- if (size < sizeof(ngx_ajp_cpong_packet)) {
- return NGX_AGAIN;
- }
-
- p = ctx->recv.pos;
-
-#if (NGX_DEBUG)
- {
- ngx_ajp_raw_packet_t *ajp;
-
- ajp = (ngx_ajp_raw_packet_t *) p;
- ngx_log_debug3(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0,
- "ajp_parse: preamble=0x%uxd, length=0x%uxd, type=0x%uxd",
- ntohs(ajp->preamble), ntohs(ajp->length), ajp->type);
- }
-#endif
-
- if (ngx_memcmp(ngx_ajp_cpong_packet, p, sizeof(ngx_ajp_cpong_packet)) == 0)
- {
- return NGX_OK;
- } else {
- return NGX_ERROR;
- }
-}
-
-
-static void
-ngx_http_upstream_check_ajp_reinit(ngx_http_upstream_check_peer_t *peer)
-{
- ngx_http_upstream_check_ctx_t *ctx;
-
- ctx = peer->check_data;
-
- ctx->send.pos = ctx->send.start;
- ctx->send.last = ctx->send.end;
-
- ctx->recv.pos = ctx->recv.last = ctx->recv.start;
-}
-
-
-static void
-ngx_http_upstream_check_status_update(ngx_http_upstream_check_peer_t *peer,
- ngx_int_t result)
-{
- ngx_http_upstream_check_srv_conf_t *ucscf;
-
- ucscf = peer->conf;
-
- if (result) {
- peer->shm->rise_count++;
- peer->shm->fall_count = 0;
- if (peer->shm->down && peer->shm->rise_count >= ucscf->rise_count) {
- peer->shm->down = 0;
- ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0,
- "enable check peer: %V ",
- &peer->check_peer_addr->name);
- }
- } else {
- peer->shm->rise_count = 0;
- peer->shm->fall_count++;
- if (!peer->shm->down && peer->shm->fall_count >= ucscf->fall_count) {
- peer->shm->down = 1;
- ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0,
- "disable check peer: %V ",
- &peer->check_peer_addr->name);
- }
- }
-
- peer->shm->access_time = ngx_current_msec;
-}
-
-
-static void
-ngx_http_upstream_check_clean_event(ngx_http_upstream_check_peer_t *peer)
-{
- ngx_connection_t *c;
- ngx_http_upstream_check_srv_conf_t *ucscf;
- ngx_check_conf_t *cf;
-
- c = peer->pc.connection;
- ucscf = peer->conf;
- cf = ucscf->check_type_conf;
-
- if (c) {
- ngx_log_debug2(NGX_LOG_DEBUG_HTTP, c->log, 0,
- "http check clean event: index:%i, fd: %d",
- peer->index, c->fd);
- if (c->error == 0 &&
- cf->need_keepalive &&
- (c->requests < ucscf->check_keepalive_requests))
- {
- c->write->handler = ngx_http_upstream_check_dummy_handler;
- c->read->handler = ngx_http_upstream_check_discard_handler;
- } else {
- ngx_close_connection(c);
- peer->pc.connection = NULL;
- }
- }
-
- if (peer->check_timeout_ev.timer_set) {
- ngx_del_timer(&peer->check_timeout_ev);
- }
-
- peer->state = NGX_HTTP_CHECK_ALL_DONE;
-
- if (peer->check_data != NULL && peer->reinit) {
- peer->reinit(peer);
- }
-
- peer->shm->owner = NGX_INVALID_PID;
-}
-
-
-static void
-ngx_http_upstream_check_timeout_handler(ngx_event_t *event)
-{
- ngx_http_upstream_check_peer_t *peer;
-
- if (ngx_http_upstream_check_need_exit()) {
- return;
- }
-
- peer = event->data;
- peer->pc.connection->error = 1;
-
- ngx_log_error(NGX_LOG_ERR, event->log, 0,
- "check time out with peer: %V ",
- &peer->check_peer_addr->name);
-
- ngx_http_upstream_check_status_update(peer, 0);
- ngx_http_upstream_check_clean_event(peer);
-}
-
-
-static void
-ngx_http_upstream_check_finish_handler(ngx_event_t *event)
-{
- if (ngx_http_upstream_check_need_exit()) {
- return;
- }
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_need_exit()
-{
- if (ngx_terminate || ngx_exiting || ngx_quit) {
- ngx_http_upstream_check_clear_all_events();
- return 1;
- }
-
- return 0;
-}
-
-
-static void
-ngx_http_upstream_check_clear_all_events()
-{
- ngx_uint_t i;
- ngx_connection_t *c;
- ngx_http_upstream_check_peer_t *peer;
- ngx_http_upstream_check_peers_t *peers;
-
- static ngx_flag_t has_cleared = 0;
-
- if (has_cleared || check_peers_ctx == NULL) {
- return;
- }
-
- ngx_log_error(NGX_LOG_NOTICE, ngx_cycle->log, 0,
- "clear all the events on %P ", ngx_pid);
-
- has_cleared = 1;
-
- peers = check_peers_ctx;
-
- peer = peers->peers.elts;
- for (i = 0; i < peers->peers.nelts; i++) {
-
- if (peer[i].check_ev.timer_set) {
- ngx_del_timer(&peer[i].check_ev);
- }
-
- if (peer[i].check_timeout_ev.timer_set) {
- ngx_del_timer(&peer[i].check_timeout_ev);
- }
-
- c = peer[i].pc.connection;
- if (c) {
- ngx_close_connection(c);
- peer[i].pc.connection = NULL;
- }
-
- if (peer[i].pool != NULL) {
- ngx_destroy_pool(peer[i].pool);
- peer[i].pool = NULL;
- }
- }
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_status_handler(ngx_http_request_t *r)
-{
- size_t buffer_size;
- ngx_int_t rc;
- ngx_buf_t *b;
- ngx_chain_t out;
- ngx_http_upstream_check_peers_t *peers;
- ngx_http_upstream_check_loc_conf_t *uclcf;
- ngx_http_upstream_check_status_ctx_t *ctx;
-
- if (r->method != NGX_HTTP_GET && r->method != NGX_HTTP_HEAD) {
- return NGX_HTTP_NOT_ALLOWED;
- }
-
- rc = ngx_http_discard_request_body(r);
- if (rc != NGX_OK) {
- return rc;
- }
+static char *ngx_http_upstream_check(ngx_conf_t *cf,
+ ngx_command_t *cmd, void *conf);
+static char * ngx_http_upstream_check_http_send(ngx_conf_t *cf,
+ ngx_command_t *cmd, void *conf);
+static char * ngx_http_upstream_check_http_expect_alive(ngx_conf_t *cf,
+ ngx_command_t *cmd, void *conf);
+
+static char * ngx_http_upstream_check_shm_size(ngx_conf_t *cf,
+ ngx_command_t *cmd, void *conf);
+static char * ngx_http_upstream_check_status(ngx_conf_t *cf,
+ ngx_command_t *cmd, void *conf);
- uclcf = ngx_http_get_module_loc_conf(r, ngx_http_upstream_check_module);
+static void *ngx_http_upstream_check_create_main_conf(ngx_conf_t *cf);
+static char *ngx_http_upstream_check_init_main_conf(ngx_conf_t *cf, void *conf);
- ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_upstream_check_status_ctx_t));
- if (ctx == NULL) {
- return NGX_HTTP_INTERNAL_SERVER_ERROR;
- }
+static void * ngx_http_upstream_check_create_srv_conf(ngx_conf_t *cf);
+static char * ngx_http_upstream_check_init_srv_conf(ngx_conf_t *cf, void *conf);
- ngx_http_upstream_check_status_parse_args(r, ctx);
+static ngx_int_t ngx_http_check_init_process(ngx_cycle_t *cycle);
- if (ctx->format == NULL) {
- ctx->format = uclcf->format;
- }
- r->headers_out.content_type = ctx->format->content_type;
+static ngx_conf_bitmask_t ngx_check_http_expect_alive_masks[] = {
+ { ngx_string("http_2xx"), NGX_CHECK_HTTP_2XX },
+ { ngx_string("http_3xx"), NGX_CHECK_HTTP_3XX },
+ { ngx_string("http_4xx"), NGX_CHECK_HTTP_4XX },
+ { ngx_string("http_5xx"), NGX_CHECK_HTTP_5XX },
+ { ngx_null_string, 0 }
+};
- if (r->method == NGX_HTTP_HEAD) {
- r->headers_out.status = NGX_HTTP_OK;
- rc = ngx_http_send_header(r);
+static ngx_command_t ngx_http_upstream_check_commands[] = {
- if (rc == NGX_ERROR || rc > NGX_OK || r->header_only) {
- return rc;
- }
- }
+ { ngx_string("check"),
+ NGX_HTTP_UPS_CONF|NGX_CONF_1MORE,
+ ngx_http_upstream_check,
+ 0,
+ 0,
+ NULL },
- peers = check_peers_ctx;
- if (peers == NULL) {
- ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
- "http upstream check module can not find any check "
- "server, make sure you've added the check servers");
+ { ngx_string("check_http_send"),
+ NGX_HTTP_UPS_CONF|NGX_CONF_TAKE1,
+ ngx_http_upstream_check_http_send,
+ 0,
+ 0,
+ NULL },
- return NGX_HTTP_INTERNAL_SERVER_ERROR;
- }
+ { ngx_string("check_http_expect_alive"),
+ NGX_HTTP_UPS_CONF|NGX_CONF_1MORE,
+ ngx_http_upstream_check_http_expect_alive,
+ 0,
+ 0,
+ NULL },
- /* 1/4 pagesize for each record */
- buffer_size = peers->peers.nelts * ngx_pagesize / 4;
- buffer_size = ngx_align(buffer_size, ngx_pagesize) + ngx_pagesize;
+ { ngx_string("check_shm_size"),
+ NGX_HTTP_MAIN_CONF|NGX_CONF_TAKE1,
+ ngx_http_upstream_check_shm_size,
+ 0,
+ 0,
+ NULL },
- b = ngx_create_temp_buf(r->pool, buffer_size);
- if (b == NULL) {
- return NGX_HTTP_INTERNAL_SERVER_ERROR;
- }
+ { ngx_string("check_status"),
+ NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_NOARGS,
+ ngx_http_upstream_check_status,
+ 0,
+ 0,
+ NULL },
- out.buf = b;
- out.next = NULL;
+ ngx_null_command
+};
- ctx->format->output(b, peers, ctx->flag);
- r->headers_out.status = NGX_HTTP_OK;
- r->headers_out.content_length_n = b->last - b->pos;
+static ngx_http_module_t ngx_http_upstream_check_module_ctx = {
+ NULL, /* preconfiguration */
+ NULL, /* postconfiguration */
- if (r->headers_out.content_length_n == 0) {
- r->header_only = 1;
- }
+ ngx_http_upstream_check_create_main_conf, /* create main configuration */
+ ngx_http_upstream_check_init_main_conf, /* init main configuration */
- b->last_buf = 1;
+ ngx_http_upstream_check_create_srv_conf, /* create server configuration */
+ NULL, /* merge server configuration */
- rc = ngx_http_send_header(r);
+ NULL, /* create location configuration */
+ NULL /* merge location configuration */
+};
- if (rc == NGX_ERROR || rc > NGX_OK || r->header_only) {
- return rc;
- }
- return ngx_http_output_filter(r, &out);
-}
+ngx_module_t ngx_http_upstream_check_module = {
+ NGX_MODULE_V1,
+ &ngx_http_upstream_check_module_ctx, /* module context */
+ ngx_http_upstream_check_commands, /* module directives */
+ NGX_HTTP_MODULE, /* module type */
+ NULL, /* init master */
+ NULL, /* init module */
+ ngx_http_check_init_process, /* init process */
+ NULL, /* init thread */
+ NULL, /* exit thread */
+ NULL, /* exit process */
+ NULL, /* exit master */
+ NGX_MODULE_V1_PADDING
+};
-static void
-ngx_http_upstream_check_status_parse_args(ngx_http_request_t *r,
- ngx_http_upstream_check_status_ctx_t *ctx)
+check_conf_t *
+ngx_http_get_check_type_conf(ngx_str_t *str)
{
- ngx_str_t value;
- ngx_uint_t i;
- ngx_check_status_command_t *command;
-
- if (r->args.len == 0) {
- return;
- }
+ ngx_uint_t i;
- for (i = 0; /* void */ ; i++) {
+ for (i = 0; ;i++) {
- command = &ngx_check_status_commands[i];
-
- if (command->name.len == 0) {
+ if (ngx_check_types[i].type == 0) {
break;
}
- if (ngx_http_arg(r, command->name.data, command->name.len, &value)
- == NGX_OK) {
-
- if (command->handler(ctx, &value) != NGX_OK) {
- ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
- "http upstream check, bad argument: \"%V\"",
- &value);
- }
+ if (ngx_strncmp(str->data,
+ (u_char *)ngx_check_types[i].name, str->len) == 0) {
+ return &ngx_check_types[i];
}
}
- ngx_log_error(NGX_LOG_DEBUG, r->connection->log, 0,
- "http upstream check, flag: \"%ui\"", ctx->flag);
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_status_command_format(
- ngx_http_upstream_check_status_ctx_t *ctx, ngx_str_t *value)
-{
- ctx->format = ngx_http_get_check_status_format_conf(value);
- if (ctx->format == NULL) {
- return NGX_ERROR;
- }
-
- return NGX_OK;
+ return NULL;
}
-static ngx_int_t
-ngx_http_upstream_check_status_command_status(
- ngx_http_upstream_check_status_ctx_t *ctx, ngx_str_t *value)
+ngx_uint_t
+ngx_http_check_add_peer(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us,
+ ngx_peer_addr_t *peer_addr)
{
- if (value->len == (sizeof("down") - 1)
- && ngx_strncasecmp(value->data, (u_char *) "down", value->len) == 0) {
-
- ctx->flag |= NGX_CHECK_STATUS_DOWN;
-
- } else if (value->len == (sizeof("up") - 1)
- && ngx_strncasecmp(value->data, (u_char *) "up", value->len)
- == 0) {
-
- ctx->flag |= NGX_CHECK_STATUS_UP;
+ ngx_http_check_peer_t *peer;
+ ngx_http_check_peers_t *peers;
+ ngx_http_upstream_check_srv_conf_t *ucscf;
+ ngx_http_upstream_check_main_conf_t *ucmcf;
- } else {
+ if (us->srv_conf == NULL) {
return NGX_ERROR;
}
- return NGX_OK;
-}
-
-
-static void
-ngx_http_upstream_check_status_html_format(ngx_buf_t *b,
- ngx_http_upstream_check_peers_t *peers, ngx_uint_t flag)
-{
- ngx_uint_t i, count;
- ngx_http_upstream_check_peer_t *peer;
-
- peer = peers->peers.elts;
-
- count = 0;
-
- for (i = 0; i < peers->peers.nelts; i++) {
-
- if (flag & NGX_CHECK_STATUS_DOWN) {
-
- if (!peer[i].shm->down) {
- continue;
- }
-
- } else if (flag & NGX_CHECK_STATUS_UP) {
-
- if (peer[i].shm->down) {
- continue;
- }
- }
-
- count++;
- }
-
- b->last = ngx_snprintf(b->last, b->end - b->last,
- "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\n"
- "\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n"
- "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n"
- "<head>\n"
- " <title>Nginx http upstream check status</title>\n"
- "</head>\n"
- "<body>\n"
- "<h1>Nginx http upstream check status</h1>\n"
- "<h2>Check upstream server number: %ui, generation: %ui</h2>\n"
- "<table style=\"background-color:white\" cellspacing=\"0\" "
- " cellpadding=\"3\" border=\"1\">\n"
- " <tr bgcolor=\"#C0C0C0\">\n"
- " <th>Index</th>\n"
- " <th>Upstream</th>\n"
- " <th>Name</th>\n"
- " <th>Status</th>\n"
- " <th>Rise counts</th>\n"
- " <th>Fall counts</th>\n"
- " <th>Check type</th>\n"
- " <th>Check port</th>\n"
- " </tr>\n",
- count, ngx_http_upstream_check_shm_generation);
-
- for (i = 0; i < peers->peers.nelts; i++) {
-
- if (flag & NGX_CHECK_STATUS_DOWN) {
-
- if (!peer[i].shm->down) {
- continue;
- }
-
- } else if (flag & NGX_CHECK_STATUS_UP) {
-
- if (peer[i].shm->down) {
- continue;
- }
- }
-
- b->last = ngx_snprintf(b->last, b->end - b->last,
- " <tr%s>\n"
- " <td>%ui</td>\n"
- " <td>%V</td>\n"
- " <td>%V</td>\n"
- " <td>%s</td>\n"
- " <td>%ui</td>\n"
- " <td>%ui</td>\n"
- " <td>%V</td>\n"
- " <td>%ui</td>\n"
- " </tr>\n",
- peer[i].shm->down ? " bgcolor=\"#FF0000\"" : "",
- i,
- peer[i].upstream_name,
- &peer[i].peer_addr->name,
- peer[i].shm->down ? "down" : "up",
- peer[i].shm->rise_count,
- peer[i].shm->fall_count,
- &peer[i].conf->check_type_conf->name,
- peer[i].conf->port);
- }
-
- b->last = ngx_snprintf(b->last, b->end - b->last,
- "</table>\n"
- "</body>\n"
- "</html>\n");
-}
-
-
-static void
-ngx_http_upstream_check_status_csv_format(ngx_buf_t *b,
- ngx_http_upstream_check_peers_t *peers, ngx_uint_t flag)
-{
- ngx_uint_t i;
- ngx_http_upstream_check_peer_t *peer;
-
- peer = peers->peers.elts;
- for (i = 0; i < peers->peers.nelts; i++) {
-
- if (flag & NGX_CHECK_STATUS_DOWN) {
-
- if (!peer[i].shm->down) {
- continue;
- }
-
- } else if (flag & NGX_CHECK_STATUS_UP) {
-
- if (peer[i].shm->down) {
- continue;
- }
- }
-
- b->last = ngx_snprintf(b->last, b->end - b->last,
- "%ui,%V,%V,%s,%ui,%ui,%V,%ui\n",
- i,
- peer[i].upstream_name,
- &peer[i].peer_addr->name,
- peer[i].shm->down ? "down" : "up",
- peer[i].shm->rise_count,
- peer[i].shm->fall_count,
- &peer[i].conf->check_type_conf->name,
- peer[i].conf->port);
- }
-}
-
-
-static void
-ngx_http_upstream_check_status_json_format(ngx_buf_t *b,
- ngx_http_upstream_check_peers_t *peers, ngx_uint_t flag)
-{
- ngx_uint_t count, i, last;
- ngx_http_upstream_check_peer_t *peer;
-
- peer = peers->peers.elts;
-
- count = 0;
-
- for (i = 0; i < peers->peers.nelts; i++) {
-
- if (flag & NGX_CHECK_STATUS_DOWN) {
-
- if (!peer[i].shm->down) {
- continue;
- }
-
- } else if (flag & NGX_CHECK_STATUS_UP) {
-
- if (peer[i].shm->down) {
- continue;
- }
- }
+ ucscf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_check_module);
- count++;
+ if(ucscf->check_interval == 0) {
+ return NGX_ERROR;
}
- b->last = ngx_snprintf(b->last, b->end - b->last,
- "{\"servers\": {\n"
- " \"total\": %ui,\n"
- " \"generation\": %ui,\n"
- " \"server\": [\n",
- count,
- ngx_http_upstream_check_shm_generation);
-
- last = peers->peers.nelts - 1;
- for (i = 0; i < peers->peers.nelts; i++) {
-
- if (flag & NGX_CHECK_STATUS_DOWN) {
-
- if (!peer[i].shm->down) {
- continue;
- }
-
- } else if (flag & NGX_CHECK_STATUS_UP) {
-
- if (peer[i].shm->down) {
- continue;
- }
- }
+ ucmcf = ngx_http_conf_get_module_main_conf(cf,
+ ngx_http_upstream_check_module);
+ peers = ucmcf->peers;
- b->last = ngx_snprintf(b->last, b->end - b->last,
- " {\"index\": %ui, "
- "\"upstream\": \"%V\", "
- "\"name\": \"%V\", "
- "\"status\": \"%s\", "
- "\"rise\": %ui, "
- "\"fall\": %ui, "
- "\"type\": \"%V\", "
- "\"port\": %ui}"
- "%s\n",
- i,
- peer[i].upstream_name,
- &peer[i].peer_addr->name,
- peer[i].shm->down ? "down" : "up",
- peer[i].shm->rise_count,
- peer[i].shm->fall_count,
- &peer[i].conf->check_type_conf->name,
- peer[i].conf->port,
- (i == last) ? "" : ",");
+ peer = ngx_array_push(&peers->peers);
+ if (peer == NULL) {
+ return NGX_ERROR;
}
- b->last = ngx_snprintf(b->last, b->end - b->last,
- " ]\n");
-
- b->last = ngx_snprintf(b->last, b->end - b->last,
- "}}\n");
-}
-
-
-static ngx_check_conf_t *
-ngx_http_get_check_type_conf(ngx_str_t *str)
-{
- ngx_uint_t i;
-
- for (i = 0; /* void */ ; i++) {
-
- if (ngx_check_types[i].type == 0) {
- break;
- }
+ ngx_memzero(peer, sizeof(ngx_http_check_peer_t));
- if (str->len != ngx_check_types[i].name.len) {
- continue;
- }
+ peer->index = peers->peers.nelts - 1;
+ peer->conf = ucscf;
+ peer->upstream_name = &us->host;
+ peer->peer_addr = peer_addr;
- if (ngx_strncmp(str->data, ngx_check_types[i].name.data,
- str->len) == 0)
- {
- return &ngx_check_types[i];
- }
- }
+ peers->checksum +=
+ ngx_murmur_hash2(peer_addr->name.data, peer_addr->name.len);
- return NULL;
+ return peer->index;
}
@@ -3069,12 +177,11 @@
ngx_http_upstream_check(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_str_t *value, s;
- ngx_uint_t i, port, rise, fall, default_down;
+ ngx_uint_t i, rise, fall, default_down;
ngx_msec_t interval, timeout;
ngx_http_upstream_check_srv_conf_t *ucscf;
- /* default values */
- port = 0;
+ /* set default */
rise = 2;
fall = 5;
interval = 30000;
@@ -3104,24 +211,14 @@
continue;
}
- if (ngx_strncmp(value[i].data, "port=", 5) == 0) {
- s.len = value[i].len - 5;
- s.data = value[i].data + 5;
-
- port = ngx_atoi(s.data, s.len);
- if (port == (ngx_uint_t) NGX_ERROR || port == 0) {
- goto invalid_check_parameter;
- }
-
- continue;
- }
-
if (ngx_strncmp(value[i].data, "interval=", 9) == 0) {
s.len = value[i].len - 9;
s.data = value[i].data + 9;
interval = ngx_atoi(s.data, s.len);
- if (interval == (ngx_msec_t) NGX_ERROR || interval == 0) {
+ if (interval == (ngx_msec_t) NGX_ERROR) {
+ goto invalid_check_parameter;
+ } else if (interval == 0) {
goto invalid_check_parameter;
}
@@ -3133,7 +230,9 @@
s.data = value[i].data + 8;
timeout = ngx_atoi(s.data, s.len);
- if (timeout == (ngx_msec_t) NGX_ERROR || timeout == 0) {
+ if (timeout == (ngx_msec_t) NGX_ERROR) {
+ goto invalid_check_parameter;
+ } else if (timeout == 0) {
goto invalid_check_parameter;
}
@@ -3145,7 +244,9 @@
s.data = value[i].data + 5;
rise = ngx_atoi(s.data, s.len);
- if (rise == (ngx_uint_t) NGX_ERROR || rise == 0) {
+ if (rise == (ngx_uint_t) NGX_ERROR) {
+ goto invalid_check_parameter;
+ } else if (rise == 0) {
goto invalid_check_parameter;
}
@@ -3157,7 +258,9 @@
s.data = value[i].data + 5;
fall = ngx_atoi(s.data, s.len);
- if (fall == (ngx_uint_t) NGX_ERROR || fall == 0) {
+ if (fall == (ngx_uint_t) NGX_ERROR) {
+ goto invalid_check_parameter;
+ } else if (fall == 0) {
goto invalid_check_parameter;
}
@@ -3186,7 +289,6 @@
goto invalid_check_parameter;
}
- ucscf->port = port;
ucscf->check_interval = interval;
ucscf->check_timeout = timeout;
ucscf->fall_count = fall;
@@ -3194,87 +296,34 @@
ucscf->default_down = default_down;
if (ucscf->check_type_conf == NGX_CONF_UNSET_PTR) {
- ngx_str_set(&s, "tcp");
- ucscf->check_type_conf = ngx_http_get_check_type_conf(&s);
- }
-
- return NGX_CONF_OK;
-
-invalid_check_parameter:
-
- ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
- "invalid parameter \"%V\"", &value[i]);
-
- return NGX_CONF_ERROR;
-}
-
-
-static char *
-ngx_http_upstream_check_keepalive_requests(ngx_conf_t *cf, ngx_command_t *cmd,
- void *conf)
-{
- ngx_str_t *value;
- ngx_http_upstream_check_srv_conf_t *ucscf;
- ngx_uint_t requests;
-
- value = cf->args->elts;
-
- ucscf = ngx_http_conf_get_module_srv_conf(cf,
- ngx_http_upstream_check_module);
+ s.len = sizeof("tcp") - 1;
+ s.data =(u_char *) "tcp";
- requests = ngx_atoi(value[1].data, value[1].len);
- if (requests == (ngx_uint_t) NGX_ERROR || requests == 0) {
- return "invalid value";
+ ucscf->check_type_conf = ngx_http_get_check_type_conf(&s);
}
- ucscf->check_keepalive_requests = requests;
-
return NGX_CONF_OK;
-}
-
-
-static char *
-ngx_http_upstream_check_http_send(ngx_conf_t *cf, ngx_command_t *cmd,
- void *conf)
-{
- ngx_str_t *value;
- ngx_http_upstream_check_srv_conf_t *ucscf;
- value = cf->args->elts;
-
- ucscf = ngx_http_conf_get_module_srv_conf(cf,
- ngx_http_upstream_check_module);
+invalid_check_parameter:
- ucscf->send = value[1];
+ ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
+ "invalid parameter \"%V\"", &value[i]);
- return NGX_CONF_OK;
+ return NGX_CONF_ERROR;
}
static char *
-ngx_http_upstream_check_fastcgi_params(ngx_conf_t *cf, ngx_command_t *cmd,
- void *conf)
+ngx_http_upstream_check_http_send(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
- ngx_str_t *value, *k, *v;
+ ngx_str_t *value;
ngx_http_upstream_check_srv_conf_t *ucscf;
value = cf->args->elts;
- ucscf = ngx_http_conf_get_module_srv_conf(cf,
- ngx_http_upstream_check_module);
-
- k = ngx_array_push(ucscf->fastcgi_params);
- if (k == NULL) {
- return NGX_CONF_ERROR;
- }
-
- v = ngx_array_push(ucscf->fastcgi_params);
- if (v == NULL) {
- return NGX_CONF_ERROR;
- }
+ ucscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_upstream_check_module);
- *k = value[1];
- *v = value[2];
+ ucscf->send = value[1];
return NGX_CONF_OK;
}
@@ -3282,7 +331,7 @@
static char *
ngx_http_upstream_check_http_expect_alive(ngx_conf_t *cf, ngx_command_t *cmd,
- void *conf)
+ void *conf)
{
ngx_str_t *value;
ngx_uint_t bit, i, m;
@@ -3337,7 +386,8 @@
ngx_http_upstream_check_main_conf_t *ucmcf;
ucmcf = ngx_http_conf_get_module_main_conf(cf,
- ngx_http_upstream_check_module);
+ ngx_http_upstream_check_module);
+
if (ucmcf->check_shm_size) {
return "is duplicate";
}
@@ -3353,58 +403,16 @@
}
-static ngx_check_status_conf_t *
-ngx_http_get_check_status_format_conf(ngx_str_t *str)
-{
- ngx_uint_t i;
-
- for (i = 0; /* void */ ; i++) {
-
- if (ngx_check_status_formats[i].format.len == 0) {
- break;
- }
-
- if (str->len != ngx_check_status_formats[i].format.len) {
- continue;
- }
-
- if (ngx_strncmp(str->data, ngx_check_status_formats[i].format.data,
- str->len) == 0)
- {
- return &ngx_check_status_formats[i];
- }
- }
-
- return NULL;
-}
-
-
static char *
-ngx_http_upstream_check_status(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
+ngx_http_upstream_check_status(ngx_conf_t *cf,
+ ngx_command_t *cmd, void *conf)
{
- ngx_str_t *value;
- ngx_http_core_loc_conf_t *clcf;
- ngx_http_upstream_check_loc_conf_t *uclcf;
-
- value = cf->args->elts;
+ ngx_http_core_loc_conf_t *clcf;
clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module);
clcf->handler = ngx_http_upstream_check_status_handler;
- if (cf->args->nelts == 2) {
- uclcf = ngx_http_conf_get_module_loc_conf(cf,
- ngx_http_upstream_check_module);
-
- uclcf->format = ngx_http_get_check_status_format_conf(&value[1]);
- if (uclcf->format == NULL) {
- ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
- "invalid check format \"%V\"", &value[1]);
-
- return NGX_CONF_ERROR;
- }
- }
-
return NGX_CONF_OK;
}
@@ -3419,8 +427,7 @@
return NULL;
}
- ucmcf->peers = ngx_pcalloc(cf->pool,
- sizeof(ngx_http_upstream_check_peers_t));
+ ucmcf->peers = ngx_pcalloc(cf->pool, sizeof(ngx_http_check_peers_t));
if (ucmcf->peers == NULL) {
return NULL;
}
@@ -3428,7 +435,7 @@
ucmcf->peers->checksum = 0;
if (ngx_array_init(&ucmcf->peers->peers, cf->pool, 16,
- sizeof(ngx_http_upstream_check_peer_t)) != NGX_OK)
+ sizeof(ngx_http_check_peer_t)) != NGX_OK)
{
return NULL;
}
@@ -3437,133 +444,14 @@
}
-static ngx_buf_t *
-ngx_http_upstream_check_create_fastcgi_request(ngx_pool_t *pool,
- ngx_str_t *params, ngx_uint_t num)
-{
- size_t size, len, padding;
- ngx_buf_t *b;
- ngx_str_t *k, *v;
- ngx_uint_t i, j;
- ngx_http_fastcgi_header_t *h;
-
- len = 0;
- for (i = 0, j = 0; i < num; i++, j = i * 2) {
- k = ¶ms[j];
- v = ¶ms[j + 1];
-
- len += 1 + k->len + ((v->len > 127) ? 4 : 1) + v->len;
- }
-
- padding = 8 - len % 8;
- padding = (padding == 8) ? 0 : padding;
-
- size = sizeof(ngx_http_fastcgi_header_t)
- + sizeof(ngx_http_fastcgi_begin_request_t)
-
- + sizeof(ngx_http_fastcgi_header_t) /* NGX_HTTP_FASTCGI_PARAMS */
- + len + padding
- + sizeof(ngx_http_fastcgi_header_t) /* NGX_HTTP_FASTCGI_PARAMS */
-
- + sizeof(ngx_http_fastcgi_header_t); /* NGX_HTTP_FASTCGI_STDIN */
-
-
- b = ngx_create_temp_buf(pool, size);
- if (b == NULL) {
- return NULL;
- }
-
- ngx_http_fastcgi_request_start.br.flags = 0;
-
- ngx_memcpy(b->pos, &ngx_http_fastcgi_request_start,
- sizeof(ngx_http_fastcgi_request_start_t));
-
- h = (ngx_http_fastcgi_header_t *)
- (b->pos + sizeof(ngx_http_fastcgi_header_t)
- + sizeof(ngx_http_fastcgi_begin_request_t));
-
- h->content_length_hi = (u_char) ((len >> 8) & 0xff);
- h->content_length_lo = (u_char) (len & 0xff);
- h->padding_length = (u_char) padding;
- h->reserved = 0;
-
- b->last = b->pos + sizeof(ngx_http_fastcgi_header_t)
- + sizeof(ngx_http_fastcgi_begin_request_t)
- + sizeof(ngx_http_fastcgi_header_t);
-
- for (i = 0, j = 0; i < num; i++, j = i * 2) {
- k = ¶ms[j];
- v = ¶ms[j + 1];
-
- if (k->len > 127) {
- *b->last++ = (u_char) (((k->len >> 24) & 0x7f) | 0x80);
- *b->last++ = (u_char) ((k->len >> 16) & 0xff);
- *b->last++ = (u_char) ((k->len >> 8) & 0xff);
- *b->last++ = (u_char) (k->len & 0xff);
-
- } else {
- *b->last++ = (u_char) k->len;
- }
-
- if (v->len > 127) {
- *b->last++ = (u_char) (((v->len >> 24) & 0x7f) | 0x80);
- *b->last++ = (u_char) ((v->len >> 16) & 0xff);
- *b->last++ = (u_char) ((v->len >> 8) & 0xff);
- *b->last++ = (u_char) (v->len & 0xff);
-
- } else {
- *b->last++ = (u_char) v->len;
- }
-
- b->last = ngx_copy(b->last, k->data, k->len);
- b->last = ngx_copy(b->last, v->data, v->len);
- }
-
- if (padding) {
- ngx_memzero(b->last, padding);
- b->last += padding;
- }
-
- h = (ngx_http_fastcgi_header_t *) b->last;
- b->last += sizeof(ngx_http_fastcgi_header_t);
-
- h->version = 1;
- h->type = NGX_HTTP_FASTCGI_PARAMS;
- h->request_id_hi = 0;
- h->request_id_lo = 1;
- h->content_length_hi = 0;
- h->content_length_lo = 0;
- h->padding_length = 0;
- h->reserved = 0;
-
- h = (ngx_http_fastcgi_header_t *) b->last;
- b->last += sizeof(ngx_http_fastcgi_header_t);
-
- return b;
-}
-
-
static char *
ngx_http_upstream_check_init_main_conf(ngx_conf_t *cf, void *conf)
{
- ngx_buf_t *b;
- ngx_uint_t i;
- ngx_http_upstream_srv_conf_t **uscfp;
- ngx_http_upstream_main_conf_t *umcf;
+ ngx_uint_t i;
+ ngx_http_upstream_srv_conf_t **uscfp;
+ ngx_http_upstream_main_conf_t *umcf;
umcf = ngx_http_conf_get_module_main_conf(cf, ngx_http_upstream_module);
-
- b = ngx_http_upstream_check_create_fastcgi_request(cf->pool,
- fastcgi_default_params,
- sizeof(fastcgi_default_params) / sizeof(ngx_str_t) / 2);
-
- if (b == NULL) {
- return NGX_CONF_ERROR;
- }
-
- fastcgi_default_request.data = b->pos;
- fastcgi_default_request.len = b->last - b->pos;
-
uscfp = umcf->upstreams.elts;
for (i = 0; i < umcf->upstreams.nelts; i++) {
@@ -3587,46 +475,21 @@
return NULL;
}
- ucscf->fastcgi_params = ngx_array_create(cf->pool, 2 * 4, sizeof(ngx_str_t));
- if (ucscf->fastcgi_params == NULL) {
- return NULL;
- }
-
- ucscf->port = NGX_CONF_UNSET_UINT;
ucscf->fall_count = NGX_CONF_UNSET_UINT;
ucscf->rise_count = NGX_CONF_UNSET_UINT;
ucscf->check_timeout = NGX_CONF_UNSET_MSEC;
- ucscf->check_keepalive_requests = NGX_CONF_UNSET_UINT;
ucscf->check_type_conf = NGX_CONF_UNSET_PTR;
return ucscf;
}
-static void *
-ngx_http_upstream_check_create_loc_conf(ngx_conf_t *cf)
-{
- ngx_http_upstream_check_loc_conf_t *uclcf;
-
- uclcf = ngx_pcalloc(cf->pool, sizeof(ngx_http_upstream_check_loc_conf_t));
- if (uclcf == NULL) {
- return NULL;
- }
-
- uclcf->format = NGX_CONF_UNSET_PTR;
-
- return uclcf;
-}
-
-
static char *
ngx_http_upstream_check_init_srv_conf(ngx_conf_t *cf, void *conf)
{
- ngx_str_t s;
- ngx_buf_t *b;
- ngx_check_conf_t *check;
- ngx_http_upstream_srv_conf_t *us = conf;
- ngx_http_upstream_check_srv_conf_t *ucscf;
+ check_conf_t *check;
+ ngx_http_upstream_srv_conf_t *us = conf;
+ ngx_http_upstream_check_srv_conf_t *ucscf;
if (us->srv_conf == NULL) {
return NGX_CONF_OK;
@@ -3634,10 +497,6 @@
ucscf = ngx_http_conf_upstream_srv_conf(us, ngx_http_upstream_check_module);
- if (ucscf->port == NGX_CONF_UNSET_UINT) {
- ucscf->port = 0;
- }
-
if (ucscf->fall_count == NGX_CONF_UNSET_UINT) {
ucscf->fall_count = 2;
}
@@ -3654,44 +513,17 @@
ucscf->check_timeout = 1000;
}
- if (ucscf->check_keepalive_requests == NGX_CONF_UNSET_UINT) {
- ucscf->check_keepalive_requests = 1;
- }
-
if (ucscf->check_type_conf == NGX_CONF_UNSET_PTR) {
ucscf->check_type_conf = NULL;
}
check = ucscf->check_type_conf;
-
if (check) {
if (ucscf->send.len == 0) {
- ngx_str_set(&s, "fastcgi");
-
- if (check == ngx_http_get_check_type_conf(&s)) {
-
- if (ucscf->fastcgi_params->nelts == 0) {
- ucscf->send.data = fastcgi_default_request.data;
- ucscf->send.len = fastcgi_default_request.len;
-
- } else {
- b = ngx_http_upstream_check_create_fastcgi_request(
- cf->pool, ucscf->fastcgi_params->elts,
- ucscf->fastcgi_params->nelts / 2);
- if (b == NULL) {
- return NGX_CONF_ERROR;
- }
-
- ucscf->send.data = b->pos;
- ucscf->send.len = b->last - b->pos;
- }
- } else {
- ucscf->send.data = check->default_send.data;
- ucscf->send.len = check->default_send.len;
- }
+ ucscf->send.data = check->default_send.data;
+ ucscf->send.len = check->default_send.len;
}
-
if (ucscf->code.status_alive == 0) {
ucscf->code.status_alive = check->default_status_alive;
}
@@ -3701,366 +533,8 @@
}
-static char *
-ngx_http_upstream_check_merge_loc_conf(ngx_conf_t *cf, void *parent,
- void *child)
-{
- ngx_str_t format = ngx_string("html");
- ngx_http_upstream_check_loc_conf_t *prev = parent;
- ngx_http_upstream_check_loc_conf_t *conf = child;
-
- ngx_conf_merge_ptr_value(conf->format, prev->format,
- ngx_http_get_check_status_format_conf(&format));
-
- return NGX_CONF_OK;
-}
-
-
-static char *
-ngx_http_upstream_check_init_shm(ngx_conf_t *cf, void *conf)
-{
- ngx_str_t *shm_name;
- ngx_uint_t shm_size;
- ngx_shm_zone_t *shm_zone;
- ngx_http_upstream_check_main_conf_t *ucmcf = conf;
-
- if (ucmcf->peers->peers.nelts > 0) {
-
- ngx_http_upstream_check_shm_generation++;
-
- shm_name = &ucmcf->peers->check_shm_name;
-
- ngx_http_upstream_check_get_shm_name(shm_name, cf->pool,
- ngx_http_upstream_check_shm_generation);
-
- /* The default check shared memory size is 1M */
- shm_size = 1 * 1024 * 1024;
-
- shm_size = shm_size < ucmcf->check_shm_size ?
- ucmcf->check_shm_size : shm_size;
-
- shm_zone = ngx_shared_memory_add(cf, shm_name, shm_size,
- &ngx_http_upstream_check_module);
-
- ngx_log_debug2(NGX_LOG_DEBUG_HTTP, cf->log, 0,
- "http upstream check, upsteam:%V, shm_zone size:%ui",
- shm_name, shm_size);
-
- shm_zone->data = cf->pool;
- check_peers_ctx = ucmcf->peers;
-
- shm_zone->init = ngx_http_upstream_check_init_shm_zone;
- }
- else {
- check_peers_ctx = NULL;
- }
-
- return NGX_CONF_OK;
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_get_shm_name(ngx_str_t *shm_name, ngx_pool_t *pool,
- ngx_uint_t generation)
-{
- u_char *last;
-
- shm_name->data = ngx_palloc(pool, SHM_NAME_LEN);
- if (shm_name->data == NULL) {
- return NGX_ERROR;
- }
-
- last = ngx_snprintf(shm_name->data, SHM_NAME_LEN, "%s#%ui",
- "ngx_http_upstream_check", generation);
-
- shm_name->len = last - shm_name->data;
-
- return NGX_OK;
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_init_shm_zone(ngx_shm_zone_t *shm_zone, void *data)
-{
- size_t size;
- ngx_str_t oshm_name;
- ngx_int_t rc;
- ngx_uint_t i, same, number;
- ngx_pool_t *pool;
- ngx_shm_zone_t *oshm_zone;
- ngx_slab_pool_t *shpool;
- ngx_http_upstream_check_peer_t *peer;
- ngx_http_upstream_check_peers_t *peers;
- ngx_http_upstream_check_srv_conf_t *ucscf;
- ngx_http_upstream_check_peer_shm_t *peer_shm, *opeer_shm;
- ngx_http_upstream_check_peers_shm_t *peers_shm, *opeers_shm;
-
- opeers_shm = NULL;
- peers_shm = NULL;
- ngx_str_null(&oshm_name);
-
- same = 0;
- peers = check_peers_ctx;
- if (peers == NULL) {
- return NGX_OK;
- }
-
- number = peers->peers.nelts;
- if (number == 0) {
- return NGX_OK;
- }
-
- pool = shm_zone->data;
- if (pool == NULL) {
- pool = ngx_cycle->pool;
- }
-
- shpool = (ngx_slab_pool_t *) shm_zone->shm.addr;
-
- if (data) {
- opeers_shm = data;
-
- if ((opeers_shm->number == number)
- && (opeers_shm->checksum == peers->checksum)) {
-
- peers_shm = data;
- same = 1;
- }
- }
-
- if (!same) {
-
- if (ngx_http_upstream_check_shm_generation > 1) {
-
- ngx_http_upstream_check_get_shm_name(&oshm_name,
- pool, ngx_http_upstream_check_shm_generation - 1);
-
- /* The global variable ngx_cycle still points to the old one */
- oshm_zone = ngx_shared_memory_find((ngx_cycle_t *) ngx_cycle,
- &oshm_name,
- &ngx_http_upstream_check_module);
-
- if (oshm_zone) {
- opeers_shm = oshm_zone->data;
-
- ngx_log_debug2(NGX_LOG_DEBUG_HTTP, shm_zone->shm.log, 0,
- "http upstream check, find oshm_zone:%p, "
- "opeers_shm: %p",
- oshm_zone, opeers_shm);
- }
- }
-
- size = sizeof(*peers_shm) +
- (number - 1) * sizeof(ngx_http_upstream_check_peer_shm_t);
-
- peers_shm = ngx_slab_alloc(shpool, size);
-
- if (peers_shm == NULL) {
- goto failure;
- }
-
- ngx_memzero(peers_shm, size);
- }
-
- peers_shm->generation = ngx_http_upstream_check_shm_generation;
- peers_shm->checksum = peers->checksum;
- peers_shm->number = number;
-
- peer = peers->peers.elts;
-
- for (i = 0; i < number; i++) {
-
- peer_shm = &peers_shm->peers[i];
-
- /*
- * This function may be triggered before the old stale
- * work process exits. The owner may stick to the old
- * pid.
- */
- peer_shm->owner = NGX_INVALID_PID;
-
- if (same) {
- continue;
- }
-
- peer_shm->socklen = peer[i].peer_addr->socklen;
- peer_shm->sockaddr = ngx_slab_alloc(shpool, peer_shm->socklen);
- if (peer_shm->sockaddr == NULL) {
- goto failure;
- }
-
- ngx_memcpy(peer_shm->sockaddr, peer[i].peer_addr->sockaddr,
- peer_shm->socklen);
-
- if (opeers_shm) {
-
- opeer_shm = ngx_http_upstream_check_find_shm_peer(opeers_shm,
- peer[i].peer_addr,
- peer[i].upstream_name);
- if (opeer_shm) {
- ngx_log_debug1(NGX_LOG_DEBUG_HTTP, shm_zone->shm.log, 0,
- "http upstream check, inherit opeer: %V ",
- &peer[i].peer_addr->name);
-
- rc = ngx_http_upstream_check_init_shm_peer(peer_shm, opeer_shm,
- 0, pool, &peer[i].peer_addr->name, peer[i].upstream_name);
- if (rc != NGX_OK) {
- return NGX_ERROR;
- }
-
- continue;
- }
- }
-
- ucscf = peer[i].conf;
- rc = ngx_http_upstream_check_init_shm_peer(peer_shm, NULL,
- ucscf->default_down, pool,
- &peer[i].peer_addr->name,
- peer[i].upstream_name);
- if (rc != NGX_OK) {
- return NGX_ERROR;
- }
- }
-
- peers->peers_shm = peers_shm;
- shm_zone->data = peers_shm;
-
- return NGX_OK;
-
-failure:
- ngx_log_error(NGX_LOG_EMERG, shm_zone->shm.log, 0,
- "http upstream check_shm_size is too small, "
- "you should specify a larger size.");
- return NGX_ERROR;
-}
-
-
-static ngx_shm_zone_t *
-ngx_shared_memory_find(ngx_cycle_t *cycle, ngx_str_t *name, void *tag)
-{
- ngx_uint_t i;
- ngx_shm_zone_t *shm_zone;
- ngx_list_part_t *part;
-
- part = (ngx_list_part_t *) &(cycle->shared_memory.part);
- shm_zone = part->elts;
-
- for (i = 0; /* void */ ; i++) {
-
- if (i >= part->nelts) {
- if (part->next == NULL) {
- break;
- }
- part = part->next;
- shm_zone = part->elts;
- i = 0;
- }
-
- if (name->len != shm_zone[i].shm.name.len) {
- continue;
- }
-
- if (ngx_strncmp(name->data, shm_zone[i].shm.name.data, name->len) != 0)
- {
- continue;
- }
-
- if (tag != shm_zone[i].tag) {
- continue;
- }
-
- return &shm_zone[i];
- }
-
- return NULL;
-}
-
-
-static ngx_http_upstream_check_peer_shm_t *
-ngx_http_upstream_check_find_shm_peer(ngx_http_upstream_check_peers_shm_t *p,
- ngx_addr_t *addr, ngx_str_t *upstream_name)
-{
- ngx_uint_t i;
- ngx_http_upstream_check_peer_shm_t *peer_shm;
-
- for (i = 0; i < p->number; i++) {
-
- peer_shm = &p->peers[i];
-
- if (addr->socklen != peer_shm->socklen) {
- continue;
- }
-
- if (ngx_memcmp(addr->sockaddr, peer_shm->sockaddr, addr->socklen) == 0
- && upstream_name->len == peer_shm->upstream_name->len
- && ngx_strncmp(upstream_name->data, peer_shm->upstream_name->data, upstream_name->len) == 0) {
- return peer_shm;
- }
- }
-
- return NULL;
-}
-
-
-static ngx_int_t
-ngx_http_upstream_check_init_shm_peer(ngx_http_upstream_check_peer_shm_t *psh,
- ngx_http_upstream_check_peer_shm_t *opsh, ngx_uint_t init_down,
- ngx_pool_t *pool, ngx_str_t *name, ngx_str_t *upstream_name)
-{
- u_char *file;
-
- if (opsh) {
- psh->access_time = opsh->access_time;
- psh->access_count = opsh->access_count;
-
- psh->fall_count = opsh->fall_count;
- psh->rise_count = opsh->rise_count;
- psh->busyness = opsh->busyness;
-
- psh->down = opsh->down;
- psh->upstream_name = opsh->upstream_name;
-
- } else {
- psh->access_time = 0;
- psh->access_count = 0;
-
- psh->fall_count = 0;
- psh->rise_count = 0;
- psh->busyness = 0;
-
- psh->down = init_down;
- psh->upstream_name = upstream_name;
- }
-
-#if (NGX_HAVE_ATOMIC_OPS)
-
- file = NULL;
-
-#else
-
- file = ngx_pnalloc(pool, ngx_cycle->lock_file.len + name->len);
- if (file == NULL) {
- return NGX_ERROR;
- }
-
- (void) ngx_sprintf(file, "%V%V%Z", &ngx_cycle->lock_file, name);
-
-#endif
-
-#if (nginx_version >= 1002000)
- if (ngx_shmtx_create(&psh->mutex, &psh->lock, file) != NGX_OK) {
-#else
- if (ngx_shmtx_create(&psh->mutex, (void *) &psh->lock, file) != NGX_OK) {
-#endif
- return NGX_ERROR;
- }
-
- return NGX_OK;
-}
-
-
static ngx_int_t
-ngx_http_upstream_check_init_process(ngx_cycle_t *cycle)
+ngx_http_check_init_process(ngx_cycle_t *cycle)
{
- return ngx_http_upstream_check_add_timers(cycle);
+ return ngx_http_check_add_timers(cycle);
}
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/ngx_http_upstream_check_module.h
^
|
@@ -1,19 +1,101 @@
#ifndef _NGX_HTTP_UPSTREAM_CHECK_MODELE_H_INCLUDED_
#define _NGX_HTTP_UPSTREAM_CHECK_MODELE_H_INCLUDED_
-
#include <ngx_config.h>
#include <ngx_core.h>
+#include <ngx_event.h>
+#include <ngx_event_connect.h>
+#include <ngx_event_pipe.h>
#include <ngx_http.h>
+#include <nginx.h>
+
+
+typedef struct ngx_http_check_peer_s ngx_http_check_peer_t;
+typedef struct ngx_http_check_peers_s ngx_http_check_peers_t;
+typedef struct check_conf_s check_conf_t;
+
+/* make nginx-0.8.22+ happy */
+#if defined(nginx_version) && nginx_version >= 8022
+typedef ngx_addr_t ngx_peer_addr_t;
+#endif
+
+typedef ngx_int_t (*ngx_http_check_packet_init_pt)(ngx_http_check_peer_t *peer);
+typedef ngx_int_t (*ngx_http_check_packet_parse_pt)(ngx_http_check_peer_t *peer);
+typedef void (*ngx_http_check_packet_clean_pt)(ngx_http_check_peer_t *peer);
+
+#define NGX_HTTP_CHECK_TCP 0x0001
+#define NGX_HTTP_CHECK_HTTP 0x0002
+#define NGX_HTTP_CHECK_SSL_HELLO 0x0004
+#define NGX_HTTP_CHECK_SMTP 0x0008
+#define NGX_HTTP_CHECK_MYSQL 0x0010
+#define NGX_HTTP_CHECK_POP3 0x0020
+#define NGX_HTTP_CHECK_IMAP 0x0040
+#define NGX_HTTP_CHECK_AJP 0x0080
+
+
+#define NGX_CHECK_HTTP_2XX 0x0002
+#define NGX_CHECK_HTTP_3XX 0x0004
+#define NGX_CHECK_HTTP_4XX 0x0008
+#define NGX_CHECK_HTTP_5XX 0x0010
+#define NGX_CHECK_HTTP_6XX 0x0020
+#define NGX_CHECK_HTTP_ERR 0x8000
+
+#define NGX_CHECK_SMTP_2XX 0x0002
+#define NGX_CHECK_SMTP_3XX 0x0004
+#define NGX_CHECK_SMTP_4XX 0x0008
+#define NGX_CHECK_SMTP_5XX 0x0010
+#define NGX_CHECK_SMTP_6XX 0x0020
+#define NGX_CHECK_SMTP_ERR 0x8000
+
+struct check_conf_s {
+ ngx_uint_t type;
+
+ char *name;
+
+ ngx_str_t default_send;
+
+ /* HTTP */
+ ngx_uint_t default_status_alive;
+
+ ngx_event_handler_pt send_handler;
+ ngx_event_handler_pt recv_handler;
+
+ ngx_http_check_packet_init_pt init;
+ ngx_http_check_packet_parse_pt parse;
+ ngx_http_check_packet_clean_pt reinit;
+
+ unsigned need_pool;
+};
+
+typedef struct {
+ ngx_uint_t check_shm_size;
+ ngx_http_check_peers_t *peers;
+} ngx_http_upstream_check_main_conf_t;
+
+typedef struct {
+ ngx_uint_t fall_count;
+ ngx_uint_t rise_count;
+ ngx_msec_t check_interval;
+ ngx_msec_t check_timeout;
+
+ check_conf_t *check_type_conf;
+ ngx_str_t send;
+
+ union {
+ ngx_uint_t return_code;
+ ngx_uint_t status_alive;
+ } code;
+
+ ngx_uint_t default_down;
+} ngx_http_upstream_check_srv_conf_t;
-ngx_uint_t ngx_http_upstream_check_add_peer(ngx_conf_t *cf,
- ngx_http_upstream_srv_conf_t *us, ngx_addr_t *peer);
-ngx_uint_t ngx_http_upstream_check_peer_down(ngx_uint_t index);
+ngx_uint_t ngx_http_check_add_peer(ngx_conf_t *cf,
+ ngx_http_upstream_srv_conf_t *us, ngx_peer_addr_t *peer);
-void ngx_http_upstream_check_get_peer(ngx_uint_t index);
-void ngx_http_upstream_check_free_peer(ngx_uint_t index);
+check_conf_t *ngx_http_get_check_type_conf(ngx_str_t *str);
+extern ngx_module_t ngx_http_upstream_check_module;
#endif //_NGX_HTTP_UPSTREAM_CHECK_MODELE_H_INCLUDED_
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/ngx_http_upstream_jvm_route_module.patch
^
|
@@ -6,8 +6,8 @@
#include <ngx_core.h>
#include <ngx_http.h>
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
+
@@ -17,7 +17,7 @@
time_t fail_timeout;
ngx_uint_t down; /* unsigned down:1; */
ngx_str_t srun_id;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_uint_t check_index;
+#endif
@@ -27,10 +27,10 @@
peers->peer[n].fail_timeout = server[i].fail_timeout;
peers->peer[n].down = server[i].down;
peers->peer[n].weight = server[i].down ? 0 : server[i].weight;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ if (!server[i].down) {
+ peers->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
+ }
+ else {
+ peers->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
@@ -43,10 +43,10 @@
backup->peer[n].max_busy = server[i].max_busy;
backup->peer[n].fail_timeout = server[i].fail_timeout;
backup->peer[n].down = server[i].down;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ if (!server[i].down) {
+ peers->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
+ }
+ else {
+ peers->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
@@ -59,7 +59,7 @@
peers->peer[i].max_fails = 1;
peers->peer[i].max_busy = 0;
peers->peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
}
@@ -69,8 +69,8 @@
return NGX_BUSY;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+ if (ngx_http_upstream_check_peer_down(peer->check_index)) {
++#if (NGX_UPSTREAM_CHECK_MODULE)
++ if (ngx_http_check_peer_down(peer->check_index)) {
+ return NGX_BUSY;
+ }
+#endif
|
[-]
[+]
|
Deleted |
nginx_upstream_check_module-master.tar.gz/test/t/check_interface.t
^
|
@@ -1,522 +0,0 @@
-# vi:filetype=perl
-
-use lib 'lib';
-use Test::Nginx::LWP;
-
-plan tests => repeat_each(2) * 3 * blocks();
-
-no_root_location();
-
-run_tests();
-
-__DATA__
-
-=== TEST 1: the http_check interface, default type
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status;
- }
-
---- request
-GET /status
---- response_headers
-Content-Type: text/html
---- response_body_like: ^.*Check upstream server number: 6.*$
-
-=== TEST 2: the http_check interface, html
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status html;
- }
-
---- request
-GET /status
---- response_headers
-Content-Type: text/html
---- response_body_like: ^.*Check upstream server number: 6.*$
-
-=== TEST 3: the http_check interface, csv
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status csv;
- }
-
---- request
-GET /status
---- response_headers
-Content-Type: text/plain
---- response_body_like: ^.*$
-
-=== TEST 4: the http_check interface, json
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status json;
- }
-
---- request
-GET /status
---- response_headers
-Content-Type: application/json
---- response_body_like: ^.*"total": 6,.*$
-
-=== TEST 5: the http_check interface, default html, request csv
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status html;
- }
-
---- request
-GET /status?format=csv
---- response_headers
-Content-Type: text/plain
---- response_body_like: ^.*$
-
-=== TEST 6: the http_check interface, default csv, request json
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status csv;
- }
-
---- request
-GET /status?format=json
---- response_headers
-Content-Type: application/json
---- response_body_like: ^.*"total": 6,.*$
-
-=== TEST 7: the http_check interface, default json, request html
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status json;
- }
-
---- request
-GET /status?format=html
---- response_headers
-Content-Type: text/html
---- response_body_like: ^.*Check upstream server number: 6.*$
-
-=== TEST 8: the http_check interface, default json, request htm, bad format
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status json;
- }
-
---- request
-GET /status?format=htm
---- response_headers
-Content-Type: application/json
---- response_body_like: ^.*"total": 6,.*$
-
-=== TEST 9: the http_check interface, default html, request csv and up
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status html;
- }
-
---- request
-GET /status?format=csv&status=up
---- response_headers
-Content-Type: text/plain
---- response_body_like: ^[:\.,\w]+\n$
-
-=== TEST 10: the http_check interface, default csv, request json and down
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status csv;
- }
-
---- request
-GET /status?format=json&status=down
---- response_headers
-Content-Type: application/json
---- response_body_like: ^.*"total": 5,.*$
-
-=== TEST 11: the http_check interface, default json, request html and up
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check interval=3000 rise=1 fall=1 timeout=2000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status json;
- }
-
---- request
-GET /status?format=html&status=up
---- response_headers
-Content-Type: text/html
---- response_body_like: ^.*Check upstream server number: 1.*$
-
-=== TEST 12: the http_check interface, default json, request html, bad status
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status json;
- }
-
---- request
-GET /status?format=html&status=foo
---- response_headers
-Content-Type: text/html
---- response_body_like: ^.*Check upstream server number: 6.*$
-
-=== TEST 13: the http_check interface, with check_keepalive_requests configured
---- http_config
-upstream backend {
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
- server 127.0.0.1:1973;
- server 127.0.0.1:1970;
- server 127.0.0.1:1974;
- server 127.0.0.1:1975;
-
- check_keepalive_requests 10;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
-}
-
-server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
-}
-
---- config
- location / {
- proxy_pass http://backend;
- }
-
- location /status {
- check_status;
- }
-
---- request
-GET /status
---- response_headers
-Content-Type: text/html
---- response_body_like: ^.*Check upstream server number: 6.*$
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/test/t/http_check.t
^
|
@@ -1,3 +1,21 @@
+#
+#===============================================================================
+#
+# FILE: http_check.t
+#
+# DESCRIPTION: test
+#
+# FILES: ---
+# BUGS: ---
+# NOTES: ---
+# AUTHOR: Weibin Yao (http://yaoweibin.cn/), yaoweibin@gmail.com
+# COMPANY:
+# VERSION: 1.0
+# CREATED: 03/02/2010 03:18:28 PM
+# REVISION: ---
+#===============================================================================
+
+
# vi:filetype=perl
use lib 'lib';
@@ -6,6 +24,7 @@
plan tests => repeat_each(2) * 2 * blocks();
no_root_location();
+#no_diff;
run_tests();
@@ -14,21 +33,12 @@
=== TEST 1: the http_check test-single server
--- http_config
upstream test{
- server 127.0.0.1:1970;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
+ server blog.163.com:80;
+ check interval=3000 rise=1 fall=5 timeout=2000 type=http;
check_http_send "GET / HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
proxy_pass http://test;
@@ -41,10 +51,10 @@
=== TEST 2: the http_check test-multi_server
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
+ server blog.163.com:80;
+ server blog.163.com:81;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
+ check interval=3000 rise=1 fall=5 timeout=2000 type=http;
check_http_send "GET / HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
@@ -58,15 +68,6 @@
check_http_expect_alive http_2xx http_3xx;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
proxy_pass http://test;
@@ -79,23 +80,14 @@
=== TEST 3: the http_check test
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
+ server blog.163.com:80;
+ server blog.163.com:81;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
+ check interval=3000 rise=1 fall=5 timeout=2000 type=http;
check_http_send "GET /foo HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
proxy_pass http://test;
@@ -109,17 +101,8 @@
=== TEST 4: the http_check without check directive
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
- }
-
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
+ server blog.163.com:80;
+ server blog.163.com:81;
}
--- config
@@ -134,26 +117,17 @@
=== TEST 5: the http_check which does not use the upstream
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
+ server blog.163.com:80;
+ server blog.163.com:81;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
+ check interval=3000 rise=1 fall=5 timeout=2000 type=http;
check_http_send "GET / HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
- proxy_pass http://127.0.0.1:1970;
+ proxy_pass http://blog.163.com;
}
--- request
@@ -163,23 +137,14 @@
=== TEST 6: the http_check test-single server
--- http_config
upstream test{
- server 127.0.0.1:1970;
+ server blog.163.com:80;
ip_hash;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
+ check interval=3000 rise=1 fall=5 timeout=2000 type=http;
check_http_send "GET / HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
proxy_pass http://test;
@@ -192,24 +157,15 @@
=== TEST 7: the http_check test-multi_server
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
+ server blog.163.com:80;
+ server blog.163.com:81;
ip_hash;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
+ check interval=3000 rise=1 fall=5 timeout=2000 type=http;
check_http_send "GET / HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
proxy_pass http://test;
@@ -222,24 +178,15 @@
=== TEST 8: the http_check test
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
+ server blog.163.com:80;
+ server blog.163.com:81;
ip_hash;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
+ check interval=3000 rise=1 fall=5 timeout=2000 type=http;
check_http_send "GET /foo HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
proxy_pass http://test;
@@ -253,20 +200,11 @@
=== TEST 9: the http_check without check directive
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
+ server blog.163.com:80;
+ server blog.163.com:81;
ip_hash;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
proxy_pass http://test;
@@ -279,27 +217,18 @@
=== TEST 10: the http_check which does not use the upstream
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
+ server blog.163.com:80;
+ server blog.163.com:81;
ip_hash;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
+ check interval=3000 rise=1 fall=5 timeout=2000 type=http;
check_http_send "GET / HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
- proxy_pass http://127.0.0.1:1970;
+ proxy_pass http://blog.163.com;
}
--- request
@@ -309,30 +238,21 @@
=== TEST 11: the http_check which does not use the upstream, with variable
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
+ server blog.163.com:80;
+ server blog.163.com:81;
ip_hash;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
+ check interval=3000 rise=1 fall=5 timeout=2000 type=http;
check_http_send "GET / HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
resolver 8.8.8.8;
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
set $test "/";
- proxy_pass http://www.taobao.com$test;
+ proxy_pass http://blog.163.com$test;
}
--- request
@@ -340,25 +260,18 @@
--- response_body_like: ^<(.*)>$
=== TEST 12: the http_check test-single server, least conn
+--- skip_nginx
+2: < 1.2.2
--- http_config
upstream test{
- server 127.0.0.1:1970;
+ server blog.163.com:80;
least_conn;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
+ check interval=3000 rise=1 fall=5 timeout=2000 type=http;
check_http_send "GET / HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
proxy_pass http://test;
@@ -369,26 +282,19 @@
--- response_body_like: ^<(.*)>$
=== TEST 13: the http_check test-multi_server, least conn
+--- skip_nginx
+2: < 1.2.2
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
+ server blog.163.com:80;
+ server blog.163.com:81;
least_conn;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
+ check interval=3000 rise=1 fall=5 timeout=2000 type=http;
check_http_send "GET / HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
proxy_pass http://test;
@@ -399,26 +305,19 @@
--- response_body_like: ^<(.*)>$
=== TEST 14: the http_check test, least conn
+--- skip_nginx
+2: < 1.2.2
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
+ server blog.163.com:80;
+ server blog.163.com:81;
least_conn;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
+ check interval=3000 rise=1 fall=5 timeout=2000 type=http;
check_http_send "GET /foo HTTP/1.0\r\n\r\n";
check_http_expect_alive http_2xx http_3xx;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
proxy_pass http://test;
@@ -430,106 +329,15 @@
--- response_body_like: ^.*$
=== TEST 15: the http_check without check directive, least conn
+--- skip_nginx
+2: < 1.2.2
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
+ server blog.163.com:80;
+ server blog.163.com:81;
least_conn;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
---- config
- location / {
- proxy_pass http://test;
- }
-
---- request
-GET /
---- response_body_like: ^<(.*)>$
-
-=== TEST 16: the http_check with port
---- http_config
- upstream test{
- server 127.0.0.1:1970;
- check interval=2000 rise=1 fall=1 timeout=1000 type=http port=1971;
- check_http_send "GET / HTTP/1.0\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
- }
-
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
---- config
- location / {
- proxy_pass http://test;
- }
-
---- request
-GET /
---- error_code: 502
---- response_body_like: ^.*$
-
-=== TEST 17: the http_check with port
---- http_config
- upstream test{
- server 127.0.0.1:1971;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http port=1970;
- check_http_send "GET / HTTP/1.0\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
- }
-
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
---- config
- location / {
- proxy_pass http://test;
- }
-
---- request
-GET /
---- error_code: 502
---- response_body_like: ^.*$
-
-=== TEST 18: the http_check with check_keepalive_requests configured
---- http_config
- upstream test{
- server 127.0.0.1:1970;
- check_keepalive_requests 10;
- check interval=3000 rise=1 fall=1 timeout=1000 type=http;
- check_http_send "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
- }
-
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
proxy_pass http://test;
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/test/t/ssl_hello_check.t
^
|
@@ -1,3 +1,21 @@
+#
+#===============================================================================
+#
+# FILE: ssl_hello_check.t
+#
+# DESCRIPTION: test
+#
+# FILES: ---
+# BUGS: ---
+# NOTES: ---
+# AUTHOR: Weibin Yao (http://yaoweibin.cn/), yaoweibin@gmail.com
+# COMPANY:
+# VERSION: 1.0
+# CREATED: 03/02/2010 03:18:28 PM
+# REVISION: ---
+#===============================================================================
+
+
# vi:filetype=perl
use lib 'lib';
@@ -19,12 +37,11 @@
server www.alipay.com:444;
server www.alipay.com:445;
- check interval=4000 rise=1 fall=1 timeout=2000 type=ssl_hello;
+ check interval=4000 rise=1 fall=5 timeout=2000 type=ssl_hello;
}
--- config
location / {
- proxy_ssl_session_reuse off;
proxy_pass https://test;
}
@@ -40,12 +57,11 @@
server www.alipay.com:445;
ip_hash;
- check interval=4000 rise=1 fall=1 timeout=2000 type=ssl_hello;
+ check interval=4000 rise=1 fall=5 timeout=2000 type=ssl_hello;
}
--- config
location / {
- proxy_ssl_session_reuse off;
proxy_pass https://test;
}
@@ -61,12 +77,11 @@
server www.alipay.com:444;
server www.alipay.com:445;
- check interval=4000 rise=1 fall=1 timeout=2000 type=ssl_hello;
+ check interval=4000 rise=1 fall=5 timeout=2000 type=ssl_hello;
}
--- config
location / {
- proxy_ssl_session_reuse off;
proxy_pass https://test;
}
@@ -75,6 +90,8 @@
--- response_body_like: ^<(.*)>[\r\n\s\t]*$
=== TEST 4: the ssl_hello_check test with least_conn
+--- skip_nginx
+2: < 1.2.2
--- http_config
upstream test{
server www.alipay.com:443;
@@ -82,50 +99,11 @@
server www.alipay.com:445;
least_conn;
- check interval=4000 rise=1 fall=1 timeout=2000 type=ssl_hello;
- }
-
---- config
- location / {
- proxy_ssl_session_reuse off;
- proxy_pass https://test;
- }
-
---- request
-GET /
---- response_body_like: ^<(.*)>[\r\n\s\t]*$
-
-=== TEST 5: the ssl_hello_check test with port 80
---- http_config
- upstream test{
- server www.alipay.com:443;
-
- check interval=4000 rise=1 fall=1 timeout=2000 type=http port=80;
- check_http_send "GET / HTTP/1.0\r\n\r\n";
- check_http_expect_alive http_2xx http_3xx;
- }
-
---- config
- location / {
- proxy_ssl_session_reuse off;
- proxy_pass https://test;
- }
-
---- request
-GET /
---- response_body_like: ^<(.*)>[\r\n\s\t]*$
-
-=== TEST 6: the ssl_hello_check test with port 443
---- http_config
- upstream test{
- server www.alipay.com:443;
-
- check interval=4000 rise=1 fall=1 timeout=2000 type=ssl_hello port=443;
+ check interval=4000 rise=1 fall=5 timeout=2000 type=ssl_hello;
}
--- config
location / {
- proxy_ssl_session_reuse off;
proxy_pass https://test;
}
@@ -133,22 +111,3 @@
GET /
--- response_body_like: ^<(.*)>[\r\n\s\t]*$
-=== TEST 7: the ssl_hello_check test with port 444
---- http_config
- upstream test{
- server www.alipay.com:443;
-
- check interval=4000 rise=1 fall=1 timeout=2000 type=ssl_hello port=444;
- }
-
---- config
- location / {
- proxy_ssl_session_reuse off;
- proxy_pass https://test;
- }
-
---- request
-GET /
---- error_code: 502
---- response_body_like: ^.*$
-
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/test/t/tcp_check.t
^
|
@@ -1,3 +1,21 @@
+#
+#===============================================================================
+#
+# FILE: tcp_check.t
+#
+# DESCRIPTION: test
+#
+# FILES: ---
+# BUGS: ---
+# NOTES: ---
+# AUTHOR: Weibin Yao (http://yaoweibin.cn/), yaoweibin@gmail.com
+# COMPANY:
+# VERSION: 1.0
+# CREATED: 03/02/2010 03:18:28 PM
+# REVISION: ---
+#===============================================================================
+
+
# vi:filetype=perl
use lib 'lib';
@@ -15,24 +33,15 @@
=== TEST 1: the tcp_check test
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
-
- check interval=3000 rise=1 fall=1 timeout=1000;
- }
-
- server {
- listen 1970;
+ server blog.163.com:80;
+ server blog.163.com:81;
+ server blog.163.com:82;
- location / {
- root html;
- index index.html index.htm;
- }
+ check interval=3000 rise=1 fall=5 timeout=1000;
}
--- config
- location / {
+ location / {
proxy_pass http://test;
}
@@ -43,25 +52,16 @@
=== TEST 2: the tcp_check test with ip_hash
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
+ server blog.163.com:80;
+ server blog.163.com:81;
+ server blog.163.com:82;
ip_hash;
- check interval=3000 rise=1 fall=1 timeout=1000 type=tcp;
- }
-
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
+ check interval=3000 rise=1 fall=5 timeout=1000 type=tcp;
}
--- config
- location / {
+ location / {
proxy_pass http://test;
}
@@ -72,25 +72,16 @@
=== TEST 3: the tcp_check test which don't use the checked upstream
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
+ server blog.163.com:80;
+ server blog.163.com:81;
+ server blog.163.com:82;
- check interval=3000 rise=1 fall=1 timeout=1000;
- }
-
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
+ check interval=3000 rise=1 fall=5 timeout=1000;
}
--- config
location / {
- proxy_pass http://127.0.0.1:1970;
+ proxy_pass http://blog.163.com;
}
--- request
@@ -98,25 +89,18 @@
--- response_body_like: ^<(.*)>$
=== TEST 3: the tcp_check test with least_conn;
+--- skip_nginx
+2: < 1.2.2
--- http_config
upstream test{
- server 127.0.0.1:1970;
- server 127.0.0.1:1971;
- server 127.0.0.1:1972;
+ server blog.163.com:80;
+ server blog.163.com:81;
+ server blog.163.com:82;
least_conn;
check interval=3000 rise=1 fall=5 timeout=1000 type=tcp;
}
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
--- config
location / {
proxy_pass http://test;
@@ -126,83 +110,3 @@
GET /
--- response_body_like: ^<(.*)>$
-=== TEST 4: the tcp_check test with port
---- http_config
- upstream test{
- server 127.0.0.1:1971;
-
- check interval=3000 rise=1 fall=1 timeout=1000 type=tcp port=1970;
- }
-
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
---- config
- location / {
- proxy_pass http://test;
- }
-
---- request
-GET /
---- error_code: 502
---- response_body_like: ^.*$
-
-=== TEST 5: the tcp_check test with port
---- http_config
- upstream test{
- server 127.0.0.1:1970;
-
- check interval=2000 rise=1 fall=1 timeout=1000 type=tcp port=1971;
- }
-
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
---- config
- location / {
- proxy_pass http://test;
- }
-
---- request
-GET /
---- error_code: 502
---- response_body_like: ^.*$
-
-=== TEST 5: the tcp_check test with check_keepalive_requests configured
---- http_config
- upstream test{
- server 127.0.0.1:1970;
-
- check_keepalive_requests 10;
- check interval=2000 rise=1 fall=1 timeout=1000 type=tcp;
- }
-
- server {
- listen 1970;
-
- location / {
- root html;
- index index.html index.htm;
- }
- }
-
---- config
- location / {
- proxy_pass http://test;
- }
-
---- request
-GET /
---- response_body_like: ^<(.*)>$
|
[-]
[+]
|
Changed |
nginx_upstream_check_module-master.tar.gz/upstream_fair.patch
^
|
@@ -1,37 +1,37 @@
-diff --git a/ngx_http_upstream_fair_module.c b/ngx_http_upstream_fair_module.c
-index a4419ca..af80bba 100644
---- a/ngx_http_upstream_fair_module.c
-+++ b/ngx_http_upstream_fair_module.c
+diff --git a/nginx-upstream-fair/ngx_http_upstream_fair_module.c b/nginx-upstream-fair/ngx_http_upstream_fair_module.c
+index 722b6c3..187a23b 100644
+--- a/nginx-upstream-fair/ngx_http_upstream_fair_module.c
++++ b/nginx-upstream-fair/ngx_http_upstream_fair_module.c
@@ -9,6 +9,10 @@
#include <ngx_core.h>
#include <ngx_http.h>
-+#if (NGX_HTTP_UPSTREAM_CHECK)
-+#include "ngx_http_upstream_check_module.h"
++#if (NGX_UPSTREAM_CHECK_MODULE)
++#include "ngx_http_upstream_check_handler.h"
+#endif
+
typedef struct {
ngx_uint_t nreq;
ngx_uint_t total_req;
-@@ -42,6 +42,10 @@ typedef struct {
+@@ -42,6 +46,10 @@ typedef struct {
ngx_uint_t max_fails;
time_t fail_timeout;
-
-+#if (NGX_HTTP_UPSTREAM_CHECK)
+
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_uint_t check_index;
+#endif
+
time_t accessed;
ngx_uint_t down:1;
-
-@@ -474,6 +478,15 @@ ngx_http_upstream_init_fair_rr(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us)
+
+@@ -474,6 +482,15 @@ ngx_http_upstream_init_fair_rr(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us)
peers->peer[n].fail_timeout = server[i].fail_timeout;
peers->peer[n].down = server[i].down;
peers->peer[n].weight = server[i].down ? 0 : server[i].weight;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ if (!server[i].down) {
+ peers->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
+ }
+ else {
+ peers->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
@@ -40,14 +40,14 @@
n++;
}
}
-@@ -524,6 +537,15 @@ ngx_http_upstream_init_fair_rr(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us)
+@@ -524,6 +541,15 @@ ngx_http_upstream_init_fair_rr(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us)
backup->peer[n].max_fails = server[i].max_fails;
backup->peer[n].fail_timeout = server[i].fail_timeout;
backup->peer[n].down = server[i].down;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ if (!server[i].down) {
+ backup->peer[n].check_index =
-+ ngx_http_upstream_check_add_peer(cf, us, &server[i].addrs[j]);
++ ngx_http_check_add_peer(cf, us, &server[i].addrs[j]);
+ }
+ else {
+ backup->peer[n].check_index = (ngx_uint_t) NGX_ERROR;
@@ -56,36 +56,35 @@
n++;
}
}
-@@ -580,6 +602,9 @@ ngx_http_upstream_init_fair_rr(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us)
+@@ -580,6 +606,9 @@ ngx_http_upstream_init_fair_rr(ngx_conf_t *cf, ngx_http_upstream_srv_conf_t *us)
peers->peer[i].weight = 1;
peers->peer[i].max_fails = 1;
peers->peer[i].fail_timeout = 10;
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ peers->peer[i].check_index = (ngx_uint_t) NGX_ERROR;
+#endif
}
-
+
us->peer.data = peers;
-@@ -723,6 +748,12 @@ ngx_http_upstream_fair_try_peer(ngx_peer_connection_t *pc,
+@@ -721,6 +750,11 @@ ngx_http_upstream_fair_try_peer(ngx_peer_connection_t *pc,
peer = &fp->peers->peer[peer_id];
-
+
if (!peer->down) {
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
-+ "[upstream_fair] get fair peer, check_index: %ui",
-+ peer->check_index);
-+ if (!ngx_http_upstream_check_peer_down(peer->check_index)) {
++ "[upstream_fair] get fair peer, check_index: %ui", peer->check_index);
++ if (!ngx_http_check_peer_down(peer->check_index)) {
+#endif
if (peer->max_fails == 0 || peer->shared->fails < peer->max_fails) {
return NGX_OK;
}
-@@ -733,6 +764,9 @@ ngx_http_upstream_fair_try_peer(ngx_peer_connection_t *pc,
+@@ -731,6 +765,9 @@ ngx_http_upstream_fair_try_peer(ngx_peer_connection_t *pc,
peer->shared->fails = 0;
return NGX_OK;
}
-+#if (NGX_HTTP_UPSTREAM_CHECK)
++#if (NGX_UPSTREAM_CHECK_MODULE)
+ }
+#endif
}
-
+
return NGX_BUSY;
|
[-]
[+]
|
Changed |
ssl.conf
^
|
@@ -1,3 +1,6 @@
+#
+# HTTPS server configuration
+#
#server {
# listen 443;
@@ -7,31 +10,15 @@
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
-# Don't use outdated SSLv3 protocol. Protects against BEAST and POODLE attacks.
-# ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
+# ssl_session_timeout 5m;
-# Use secure ciphers - courtesy Cloudflare
-# ssl_ciphers EECDH+CHACHA20:EECDH+CHACHA20-draft:EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5;
-# ssl_prefer_server_ciphers on;
-
-# ssl_session_cache shared:SSL:20m;
-# ssl_session_tickets on;
-# ssl_session_timeout 30m;
-
-# HSTS - Use HTTPS exclusively , uncomment one.
-# add_header Strict-Transport-Security "max-age=15768000; includeSubdomains";
-
-# create new one with: openssl dhparam -out dhparams.pem 2048
-# ssl_dhparam /etc/nginx/ssl/dhparams_2048.pem;
-
-# ssl_buffer_size 4k;
-
-# if you want extra security at the cost of considerable pressure on processor ..
-# ssl_ecdh_curve secp384r1;
+# ssl_protocols SSLv2 SSLv3 TLSv1;
+# ssl_ciphers ALL:!ADH:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP;
+# ssl_prefer_server_ciphers on;
# location / {
-# root html;
-# index index.html index.htm;
-# }
+# root html;
+# index index.html index.htm;
+# }
#}
|