diff --git a/CHANGELOG b/CHANGELOG index c9a85b4d3..4b5ea773f 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,70 @@ ChangeLog : =========== +2020/11/28 : 2.3.2 + - BUILD: http-htx: fix build warning regarding long type in printf + - CLEANUP: cfgparse: remove duplicate registration for transparent build options + - BUG/MEDIUM: filters: Forward all filtered data at the end of http filtering + - BUG/MINOR: http-ana: Don't wait for the body of CONNECT requests + - DOC: add missing 3.10 in the summary + - BUG/MINOR: ssl: segv on startup when AKID but no keyid + - BUG/MEDIUM: http-ana: Don't eval http-after-response ruleset on empty messages + - BUG/MEDIUM: ssl/crt-list: bundle support broken in crt-list + - BUG/MEDIUM: ssl: error when no certificate are found + - BUG/MINOR: ssl/crt-list: load bundle in crt-list only if activated + - BUG/MEDIUM: ssl/crt-list: fix error when no file found + - BUILD: makefile: enable crypt(3) for OpenBSD + - DOC: clarify how to create a fallback crt + - CLEANUP: connection: do not use conn->owner when the session is known + - BUG/MAJOR: connection: reset conn->owner when detaching from session list + - BUG/MINOR: http_htx: Fix searching headers by substring + - DOC: better describes how to configure a fallback crt + - BUG/MAJOR: filters: Always keep all offsets up to date during data filtering + - MEDIUM: cache: Change caching conditions + - DOC: cache: Add new caching limitation information + - REGTESTS: Add sample_fetches/cook.vtc + - REGTESTS: converter: add url_dec test + - MINOR: http_act: Add -m flag for del-header name matching method + - BUILD: Make DEBUG part of .build_opts + - BUILD: Show the value of DEBUG= in haproxy -vv + - BUG/MEDIUM: http_act: Restore init of log-format list + - BUG/MAJOR: peers: fix partial message decoding + - DOC: better document the config file format and escaping/quoting rules + - DOC: Clarify %HP description in log-format + - BUG/MINOR: tcpcheck: Don't forget to reset tcp-check flags on new kind of check + - MINOR: tcpcheck: Don't handle anymore in-progress send rules in tcpcheck_main + - BUG/MAJOR: tcpcheck: Allocate input and output buffers from the buffer pool + - DOC: config: Move req.hdrs and req.hdrs_bin in L7 samples fetches section + - BUG/MINOR: http-fetch: Fix smp_fetch_body() when called from a health-check + +2020/11/13 : 2.3.1 + - BUG/MINOR: ssl: don't report 1024 bits DH param load error when it's higher + - MINOR: http-htx: Add understandable errors for the errorfiles parsing + - DOC: config: Fix a typo on ssl_c_chain_der + - BUG/MEDIUM: ssl/crt-list: correctly insert crt-list line if crt already loaded + - BUG/MINOR: pattern: a sample marked as const could be written + - BUG/MINOR: lua: set buffer size during map lookups + - BUG/MINOR: stats: free dynamically stats fields/lines on shutdown + - BUG/MINOR: peers: Do not ignore a protocol error for dictionary entries. + - BUG/MINOR: peers: Missing TX cache entries reset. + - BUG/MEDIUM: peers: fix decoding of multi-byte length in stick-table messages + - BUG/MINOR: http-fetch: Extract cookie value even when no cookie name + - BUG/MINOR: http-fetch: Fix calls w/o parentheses of the cookie sample fetches + - BUG/MEDIUM: check: reuse srv proto only if using same mode + - MINOR: check: report error on incompatible proto + - MINOR: check: report error on incompatible connect proto + - BUG/MINOR: http-htx: Handle warnings when parsing http-error and http-errors + - BUG/MAJOR: spoe: Be sure to remove all references on a released spoe applet + - MINOR: spoe: Don't close connection in sync mode on processing timeout + - BUG/MINOR: tcpcheck: Don't warn on unused rules if check option is after + - MINOR: init: Fix the prototype for per-thread free callbacks + - MINOR: config/mux-h2: Return ERR_ flags from init_h2() instead of a status + - MINOR: cfgparse: tighten the scope of newnameserver variable, free it on error. + - REGTEST: ssl: test wildcard and multi-type + exclusions + - REGTEST: ssl: mark reg-tests/ssl/ssl_crt-list_filters.vtc as broken + - MINOR: peers: Add traces to peer_treat_updatemsg(). + - REGTEST: make ssl_client_samples and ssl_server_samples require to 2.2 + 2020/11/05 : 2.3.0 - CLEANUP: pattern: remove unused entry "tree" in pattern.val - BUILD: ssl: use SSL_CTRL_GET_RAW_CIPHERLIST instead of OpenSSL versions diff --git a/Makefile b/Makefile index c22626df4..0b963837b 100644 --- a/Makefile +++ b/Makefile @@ -254,7 +254,7 @@ EXTRA = # feed CPU_CFLAGS, which in turn feed CFLAGS, so it is not mandatory to use # them. You should not have to change these options. Better use CPU_CFLAGS or # even CFLAGS instead. -CPU_CFLAGS.generic = -O0 +CPU_CFLAGS.generic = -O2 CPU_CFLAGS.native = -O2 -march=native CPU_CFLAGS.i586 = -O2 -march=i586 CPU_CFLAGS.i686 = -O2 -march=i686 @@ -390,8 +390,8 @@ endif # OpenBSD 6.3 and above ifeq ($(TARGET),openbsd) set_target_defaults = $(call default_opts, \ - USE_POLL USE_TPROXY USE_THREAD USE_KQUEUE USE_ACCEPT4 USE_CLOSEFROM \ - USE_GETADDRINFO) + USE_POLL USE_TPROXY USE_LIBCRYPT USE_THREAD USE_KQUEUE USE_ACCEPT4 \ + USE_CLOSEFROM USE_GETADDRINFO) endif # NetBSD 8 and above @@ -493,10 +493,12 @@ BUILD_FEATURES := $(foreach opt,$(patsubst USE_%,%,$(use_opts)),$(if $(USE_$(opt OPTIONS_CFLAGS += $(foreach opt,$(use_opts),$(if $($(opt)),-D$(opt),)) ifneq ($(USE_LIBCRYPT),) +ifneq ($(TARGET),openbsd) ifneq ($(TARGET),osx) OPTIONS_LDFLAGS += -lcrypt endif endif +endif ifneq ($(USE_SLZ),) # Use SLZ_INC and SLZ_LIB to force path to zlib.h and libz.{a,so} if needed. @@ -893,7 +895,7 @@ help: # Used only to force a rebuild if some build options change, but we don't do # it for certain targets which take no build options ifneq (reg-tests, $(firstword $(MAKECMDGOALS))) -build_opts = $(shell rm -f .build_opts.new; echo \'$(TARGET) $(BUILD_OPTIONS) $(VERBOSE_CFLAGS)\' > .build_opts.new; if cmp -s .build_opts .build_opts.new; then rm -f .build_opts.new; else mv -f .build_opts.new .build_opts; fi) +build_opts = $(shell rm -f .build_opts.new; echo \'$(TARGET) $(BUILD_OPTIONS) $(VERBOSE_CFLAGS) $(DEBUG)\' > .build_opts.new; if cmp -s .build_opts .build_opts.new; then rm -f .build_opts.new; else mv -f .build_opts.new .build_opts; fi) .build_opts: $(build_opts) else .build_opts: @@ -934,6 +936,7 @@ src/haproxy.o: src/haproxy.c $(DEP) -DBUILD_CC='"$(strip $(CC))"' \ -DBUILD_CFLAGS='"$(strip $(VERBOSE_CFLAGS))"' \ -DBUILD_OPTIONS='"$(strip $(BUILD_OPTIONS))"' \ + -DBUILD_DEBUG='"$(strip $(DEBUG))"' \ -DBUILD_FEATURES='"$(strip $(BUILD_FEATURES))"' \ -c -o $@ $< diff --git a/VERDATE b/VERDATE index fe0a1e5ad..6af01e46c 100644 --- a/VERDATE +++ b/VERDATE @@ -1,2 +1,2 @@ $Format:%ci$ -2020/11/05 +2020/11/28 diff --git a/VERSION b/VERSION index 276cbf9e2..f90b1afc0 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.3.0 +2.3.2 diff --git a/doc/configuration.txt b/doc/configuration.txt index 20545c631..79abb822b 100644 --- a/doc/configuration.txt +++ b/doc/configuration.txt @@ -4,7 +4,7 @@ ---------------------- version 2.3 willy tarreau - 2020/11/05 + 2020/11/28 This document covers the configuration language as implemented in the version @@ -54,6 +54,7 @@ Summary 3.7. Programs 3.8. HTTP-errors 3.9. Rings +3.10. Log forwarding 4. Proxies 4.1. Proxy keywords matrix @@ -403,28 +404,137 @@ details. HAProxy's configuration process involves 3 major sources of parameters : - the arguments from the command-line, which always take precedence - - the "global" section, which sets process-wide parameters - - the proxies sections which can take form of "defaults", "listen", - "frontend" and "backend". + - the configuration file(s), whose format is described here + - the running process' environment, in case some environment variables are + explicitly referenced + +The configuration file follows a fairly simple hierarchical format which obey +a few basic rules: + + 1. a configuration file is an ordered sequence of statements + + 2. a statement is a single non-empty line before any unprotected "#" (hash) + + 3. a line is a series of tokens or "words" delimited by unprotected spaces or + tab characters + + 4. the first word or sequence of words of a line is one of the keywords or + keyword sequences listed in this document + + 5. all other words are all arguments of the first one, some being well-known + keywords listed in this document, others being values, references to other + parts of the configuration, or expressions + + 6. certain keywords delimit a section inside which only a subset of keywords + are supported + + 7. a section ends at the end of a file or on a special keyword starting a new + section + +This is all that is needed to know to write a simple but reliable configuration +generator, but this is not enough to reliably parse any configuration nor to +figure how to deal with certain corner cases. + +First, there are a few consequences of the rules above. Rule 6 and 7 imply that +the keywords used to define a new section are valid everywhere and cannot have +a different meaning in a specific section. These keywords are always a single +word (as opposed to a sequence of words), and traditionally the section that +follows them is designated using the same name. For example when speaking about +the "global section", it designates the section of configuration that follows +the "global" keyword. This usage is used a lot in error messages to help locate +the parts that need to be addressed. + +A number of sections create an internal object or configuration space, which +requires to be distinguished from other ones. In this case they will take an +extra word which will set the name of this particular section. For some of them +the section name is mandatory. For example "frontend foo" will create a new +section of type "frontend" named "foo". Usually a name is specific to its +section and two sections of different types may use the same name, but this is +not recommended as it tends to complexify configuration management. + +A direct consequence of rule 7 is that when multiple files are read at once, +each of them must start with a new section, and the end of each file will end +a section. A file cannot contain sub-sections nor end an existing section and +start a new one. + +Rule 1 mentioned that ordering matters. Indeed, some keywords create directives +that can be repeated multiple times to create ordered sequences of rules to be +applied in a certain order. For example "tcp-request" can be used to alternate +"accept" and "reject" rules on varying criteria. As such, a configuration file +processor must always preserve a section's ordering when editing a file. The +ordering of sections usually does not matter except for the global section +which must be placed before other sections, but it may be repeated if needed. +In addition, some automatic identifiers may automatically be assigned to some +of the created objects (e.g. proxies), and by reordering sections, their +identifiers will change. These ones appear in the statistics for example. As +such, the configuration below will assign "foo" ID number 1 and "bar" ID number +2, which will be swapped if the two sections are reversed: + + listen foo + bind :80 + + listen bar + bind :81 + +Another important point is that according to rules 2 and 3 above, empty lines, +spaces, tabs, and comments following and unprotected "#" character are not part +of the configuration as they are just used as delimiters. This implies that the +following configurations are strictly equivalent: + + global#this is the global section + daemon#daemonize + frontend foo + mode http # or tcp + +and: + + global + daemon + + # this is the public web frontend + frontend foo + mode http + +The common practice is to align to the left only the keyword that initiates a +new section, and indent (i.e. prepend a tab character or a few spaces) all +other keywords so that it's instantly visible that they belong to the same +section (as done in the second example above). Placing comments before a new +section helps the reader decide if it's the desired one. Leaving a blank line +at the end of a section also visually helps spotting the end when editing it. -The configuration file syntax consists in lines beginning with a keyword -referenced in this manual, optionally followed by one or several parameters -delimited by spaces. +Tabs are very convenient for indent but they do not copy-paste well. If spaces +are used instead, it is recommended to avoid placing too many (2 to 4) so that +editing in field doesn't become a burden with limited editors that do not +support automatic indent. + +In the early days it used to be common to see arguments split at fixed tab +positions because most keywords would not take more than two arguments. With +modern versions featuring complex expressions this practice does not stand +anymore, and is not recommended. 2.2. Quoting and escaping ------------------------- -HAProxy's configuration introduces a quoting and escaping system similar to -many programming languages. The configuration file supports 3 types: escaping -with a backslash, weak quoting with double quotes, and strong quoting with -single quotes. +In modern configurations, some arguments require the use of some characters +that were previously considered as pure delimiters. In order to make this +possible, HAProxy supports character escaping by prepending a backslash ('\') +in front of the character to be escaped, weak quoting within double quotes +('"') and strong quoting within single quotes ("'"). + +This is pretty similar to what is done in a number of programming languages and +very close to what is commonly encountered in Bourne shell. The principle is +the following: while the configuration parser cuts the lines into words, it +also takes care of quotes and backslashes to decide whether a character is a +delimiter or is the raw representation of this character within the current +word. The escape character is then removed, the quotes are removed, and the +remaining word is used as-is as a keyword or argument for example. -If spaces have to be entered in strings, then they must be escaped by preceding -them by a backslash ('\') or by quoting them. Backslashes also have to be -escaped by doubling or strong quoting them. +If a backslash is needed in a word, it must either be escaped using itself +(i.e. double backslash) or be strongly quoted. -Escaping is achieved by preceding a special character by a backslash ('\'): +Escaping outside quotes is achieved by preceding a special character by a +backslash ('\'): \ to mark a space and differentiate it from a delimiter \# to mark a hash and differentiate it from a comment @@ -432,39 +542,161 @@ Escaping is achieved by preceding a special character by a backslash ('\'): \' to use a single quote and differentiate it from strong quoting \" to use a double quote and differentiate it from weak quoting -Weak quoting is achieved by using double quotes (""). Weak quoting prevents -the interpretation of: - - space as a parameter separator - ' single quote as a strong quoting delimiter - # hash as a comment start +In addition, a few non-printable characters may be emitted using their usual +C-language representation: -Weak quoting permits the interpretation of variables, if you want to use a non --interpreted dollar within a double quoted string, you should escape it with a -backslash ("\$"), it does not work outside weak quoting. + \n to insert a line feed (LF, character \x0a or ASCII 10 decimal) + \r to insert a carriage return (CR, character \x0d or ASCII 13 decimal) + \t to insert a tab (character \x09 or ASCII 9 decimal) + \xNN to insert character having ASCII code hex NN (e.g \x0a for LF). -Interpretation of escaping and special characters are not prevented by weak -quoting. +Weak quoting is achieved by surrounding double quotes ("") around the character +or sequence of characters to protect. Weak quoting prevents the interpretation +of: -Strong quoting is achieved by using single quotes (''). Inside single quotes, -nothing is interpreted, it's the efficient way to quote regexes. + space or tab as a word separator + ' single quote as a strong quoting delimiter + # hash as a comment start -Quoted and escaped strings are replaced in memory by their interpreted -equivalent, it allows you to perform concatenation. +Weak quoting permits the interpretation of environment variables (which are not +evaluated outside of quotes) by preceding them with a dollar sign ('$'). If a +dollar character is needed inside double quotes, it must be escaped using a +backslash. + +Strong quoting is achieved by surrounding single quotes ('') around the +character or sequence of characters to protect. Inside single quotes, nothing +is interpreted, it's the efficient way to quote regular expressions. + +As a result, here is the matrix indicating how special characters can be +entered in different contexts (unprintable characters are replaced with their +name within angle brackets). Note that some characters that may only be +represented escaped have no possible representation inside single quotes, +hence the '-' there: + + Character | Unquoted | Weakly quoted | Strongly quoted + -----------+---------------+-----------------------------+----------------- + | \, \x09 | "", "\", "\x09" | '' + | \n, \x0a | "\n", "\x0a" | - + | \r, \x0d | "\r", "\x0d" | - + | \, \x20 | "", "\", "\x20" | '' + " | \", \x22 | "\"", "\x22" | '"' + # | \#, \x23 | "#", "\#", "\x23" | '#' + $ | $, \$, \x24 | "\$", "\x24" | '$' + ' | \', \x27 | "'", "\'", "\x27" | - + \ | \\, \x5c | "\\", "\x5c" | '\' Example: - # those are equivalents: + # those are all strictly equivalent: log-format %{+Q}o\ %t\ %s\ %{-Q}r log-format "%{+Q}o %t %s %{-Q}r" log-format '%{+Q}o %t %s %{-Q}r' log-format "%{+Q}o %t"' %s %{-Q}r' log-format "%{+Q}o %t"' %s'\ %{-Q}r - # those are equivalents: - reqrep "^([^\ :]*)\ /static/(.*)" \1\ /\2 - reqrep "^([^ :]*)\ /static/(.*)" '\1 /\2' - reqrep "^([^ :]*)\ /static/(.*)" "\1 /\2" - reqrep "^([^ :]*)\ /static/(.*)" "\1\ /\2" +There is one particular case where a second level of quoting or escaping may be +necessary. Some keywords take arguments within parenthesis, sometimes delimited +by commas. These arguments are commonly integers or predefined words, but when +they are arbitrary strings, it may be required to perform a separate level of +escaping to disambiguate the characters that belong to the argument from the +characters that are used to delimit the arguments themselves. A pretty common +case is the "regsub" converter. It takes a regular expression in argument, and +if a closing parenthesis is needed inside, this one will require to have its +own quotes. + +The keyword argument parser is exactly the same as the top-level one regarding +quotes, except that is will not make special cases of backslashes. But what is +not always obvious is that the delimitors used inside must first be escaped or +quoted so that they are not resolved at the top level. + +Let's take this example making use of the "regsub" converter which takes 3 +arguments, one regular expression, one replacement string and one set of flags: + + # replace all occurrences of "foo" with "blah" in the path: + http-request set-path %[path,regsub(foo,blah,g)] + +Here no special quoting was necessary. But if now we want to replace either +"foo" or "bar" with "blah", we'll need the regular expression "(foo|bar)". We +cannot write: + + http-request set-path %[path,regsub((foo|bar),blah,g)] + +because we would like the string to cut like this: + + http-request set-path %[path,regsub((foo|bar),blah,g)] + |---------|----|-| + arg1 _/ / / + arg2 __________/ / + arg3 ______________/ + +but actually what is passed is a string between the opening and closing +parenthesis then garbage: + + http-request set-path %[path,regsub((foo|bar),blah,g)] + |--------|--------| + arg1=(foo|bar _/ / + trailing garbage _________/ + +The obvious solution here seems to be that the closing parenthesis needs to be +quoted, but alone this will not work, because as mentioned above, quotes are +processed by the top-level parser which will resolve them before processing +this word: + + http-request set-path %[path,regsub("(foo|bar)",blah,g)] + ------------ -------- ---------------------------------- + word1 word2 word3=%[path,regsub((foo|bar),blah,g)] + +So we didn't change anything for the argument parser at the second level which +still sees a truncated regular expression as the only argument, and garbage at +the end of the string. By escaping the quotes they will be passed unmodified to +the second level: + + http-request set-path %[path,regsub(\"(foo|bar)\",blah,g)] + ------------ -------- ------------------------------------ + word1 word2 word3=%[path,regsub("(foo|bar)",blah,g)] + |---------||----|-| + arg1=(foo|bar) _/ / / + arg2=blah ___________/ / + arg3=g _______________/ + +Another approch consists in using single quotes outside the whole string and +double quotes inside (so that the double quotes are not stripped again): + + http-request set-path '%[path,regsub("(foo|bar)",blah,g)]' + ------------ -------- ---------------------------------- + word1 word2 word3=%[path,regsub("(foo|bar)",blah,g)] + |---------||----|-| + arg1=(foo|bar) _/ / / + arg2 ___________/ / + arg3 _______________/ + +When using regular expressions, it can happen that the dollar ('$') character +appears in the expression or that a backslash ('\') is used in the replacement +string. In this case these ones will also be processed inside the double quotes +thus single quotes are preferred (or double escaping). Example: + + http-request set-path '%[path,regsub("^/(here)(/|$)","my/\1",g)]' + ------------ -------- ----------------------------------------- + word1 word2 word3=%[path,regsub("^/(here)(/|$)","my/\1",g)] + |-------------| |-----||-| + arg1=(here)(/|$) _/ / / + arg2=my/\1 ________________/ / + arg3 ______________________/ + +Remember that backslahes are not escape characters withing single quotes and +that the whole word3 above is already protected against them using the single +quotes. Conversely, if double quotes had been used around the whole expression, +single the dollar character and the backslashes would have been resolved at top +level, breaking the argument contents at the second level. + +When in doubt, simply do not use quotes anywhere, and start to place single or +double quotes around arguments that require a comma or a closing parenthesis, +and think about escaping these quotes using a backslash of the string contains +a dollar or a backslash. Again, this is pretty similar to what is used under +a Bourne shell when double-escaping a command passed to "eval". For API writers +the best is probably to place escaped quotes around each and every argument, +regardless of their contents. Users will probably find that using single quotes +around the whole expression and double quotes around each argument provides +more readable configurations. 2.3. Environment variables @@ -4686,9 +4918,13 @@ http-after-response allow [ { if | unless } ] This stops the evaluation of the rules and lets the response pass the check. No further "http-after-response" rules are evaluated. -http-after-response del-header [ { if | unless } ] +http-after-response del-header [ -m ] [ { if | unless } ] - This removes all HTTP header fields whose name is specified in . + This removes all HTTP header fields whose name is specified in . + is the matching method, applied on the header name. Supported matching methods + are "str" (exact match), "beg" (prefix match), "end" (suffix match), "sub" + (substring match) and "reg" (regex match). If not specified, exact matching + method is used. http-after-response replace-header [ { if | unless } ] @@ -5460,9 +5696,13 @@ http-request del-acl() [ { if | unless } ] It is the equivalent of the "del acl" command from the stats socket, but can be triggered by an HTTP request. -http-request del-header [ { if | unless } ] +http-request del-header [ -m ] [ { if | unless } ] - This removes all HTTP header fields whose name is specified in . + This removes all HTTP header fields whose name is specified in . + is the matching method, applied on the header name. Supported matching methods + are "str" (exact match), "beg" (prefix match), "end" (suffix match), "sub" + (substring match) and "reg" (regex match). If not specified, exact matching + method is used. http-request del-map() [ { if | unless } ] @@ -6273,9 +6513,13 @@ http-response del-acl() [ { if | unless } ] It is the equivalent of the "del acl" command from the stats socket, but can be triggered by an HTTP response. -http-response del-header [ { if | unless } ] +http-response del-header [ -m ] [ { if | unless } ] - This removes all HTTP header fields whose name is specified in . + This removes all HTTP header fields whose name is specified in . + is the matching method, applied on the header name. Supported matching methods + are "str" (exact match), "beg" (prefix match), "end" (suffix match), "sub" + (substring match) and "reg" (regex match). If not specified, exact matching + method is used. http-response del-map() [ { if | unless } ] @@ -12595,10 +12839,11 @@ crt-list configuration set in bind line for the certificate. Wildcards are supported in the SNI filter. Negative filter are also supported, - only useful in combination with a wildcard filter to exclude a particular SNI. - The certificates will be presented to clients who provide a valid TLS Server - Name Indication field matching one of the SNI filters. If no SNI filter is - specified, the CN and alt subjects are used. This directive may be specified + useful in combination with a wildcard filter to exclude a particular SNI, or + after the first certificate to exclude a pattern from its CN or Subject Alt + Name (SAN). The certificates will be presented to clients who provide a valid + TLS Server Name Indication field matching one of the SNI filters. If no SNI + filter is specified, the CN and SAN are used. This directive may be specified multiple times. See the "crt" option for more information. The default certificate is still needed to meet OpenSSL expectations. If it is not used, the 'strict-sni' option may be used. @@ -12609,8 +12854,17 @@ crt-list Empty lines as well as lines beginning with a hash ('#') will be ignored. + The first declared certificate of a bind line is used as the default + certificate, either from crt or crt-list option, which haproxy should use in + the TLS handshake if no other certificate matches. This certificate will also + be used if the provided SNI matches its CN or SAN, even if a matching SNI + filter is found on any crt-list. The SNI filter !* can be used after the first + declared certificate to not include its CN and SAN in the SNI tree, so it will + never match except if no other certificate matches. This way the first + declared certificate act as a fallback. + crt-list file example: - cert1.pem + cert1.pem !* # comment cert2.pem [alpn h2,http/1.1] certW.pem *.domain.tld !secure.domain.tld @@ -14272,6 +14526,9 @@ The cache won't store and won't deliver objects in these cases: - If the response contains a Vary header - If the Content-Length + the headers size is greater than "max-object-size" - If the response is not cacheable +- If the response does not have an explicit expiration time (s-maxage or max-age + Cache-Control directives or Expires header) or a validator (ETag or Last-Modified + headers) - If the request is not a GET - If the HTTP version of the request is smaller than 1.1 @@ -17101,18 +17358,18 @@ ssl_c_ca_err_depth : integer verification of the client certificate. If no error is encountered, 0 is returned. -ssl_c_der : binary - Returns the DER formatted certificate presented by the client when the - incoming connection was made over an SSL/TLS transport layer. When used for - an ACL, the value(s) to match against can be passed in hexadecimal form. - -ssl_c_der_chain : binary +ssl_c_chain_der : binary Returns the DER formatted chain certificate presented by the client when the incoming connection was made over an SSL/TLS transport layer. When used for an ACL, the value(s) to match against can be passed in hexadecimal form. One can parse the result with any lib accepting ASN.1 DER data. It currentlly does not support resumed sessions. +ssl_c_der : binary + Returns the DER formatted certificate presented by the client when the + incoming connection was made over an SSL/TLS transport layer. When used for + an ACL, the value(s) to match against can be passed in hexadecimal form. + ssl_c_err : integer When the incoming connection was made over an SSL/TLS transport layer, returns the ID of the first error detected during verification at depth 0, or @@ -17571,25 +17828,6 @@ payload_lv(,[,]) : binary (deprecated) (e.g. "stick on", "stick match"), and for "res.payload_lv" when used in the context of a response such as in "stick store response". -req.hdrs : string - Returns the current request headers as string including the last empty line - separating headers from the request body. The last empty line can be used to - detect a truncated header block. This sample fetch is useful for some SPOE - headers analyzers and for advanced logging. - -req.hdrs_bin : binary - Returns the current request headers contained in preparsed binary form. This - is useful for offloading some processing with SPOE. Each string is described - by a length followed by the number of bytes indicated in the length. The - length is represented using the variable integer encoding detailed in the - SPOE documentation. The end of the list is marked by a couple of empty header - names and values (length of 0 for both). - - *() - - int: refer to the SPOE documentation for the encoding - str: - req.len : integer req_len : integer (deprecated) Returns an integer value corresponding to the number of bytes present in the @@ -18065,7 +18303,24 @@ hdr_val([[,]]) : integer (deprecated) the first one. Negative values indicate positions relative to the last one, with -1 being the last one. A typical use is with the X-Forwarded-For header. +req.hdrs : string + Returns the current request headers as string including the last empty line + separating headers from the request body. The last empty line can be used to + detect a truncated header block. This sample fetch is useful for some SPOE + headers analyzers and for advanced logging. + +req.hdrs_bin : binary + Returns the current request headers contained in preparsed binary form. This + is useful for offloading some processing with SPOE. Each string is described + by a length followed by the number of bytes indicated in the length. The + length is represented using the variable integer encoding detailed in the + SPOE documentation. The end of the list is marked by a couple of empty header + names and values (length of 0 for both). + + *() + int: refer to the SPOE documentation for the encoding + str: http_auth() : boolean Returns a boolean indicating whether the authentication data received from @@ -19213,7 +19468,7 @@ Please refer to the table below for currently defined variables : | H | %CS | captured_response_cookie | string | | | %H | hostname | string | | H | %HM | HTTP method (ex: POST) | string | - | H | %HP | HTTP request URI without query string (path) | string | + | H | %HP | HTTP request URI without query string | string | | H | %HQ | HTTP request URI query string (ex: ?bar=baz) | string | | H | %HU | HTTP request URI (ex: /foo?bar=baz) | string | | H | %HV | HTTP version (ex: HTTP/1.0) | string | diff --git a/include/haproxy/action-t.h b/include/haproxy/action-t.h index a002153d7..36aa5bbdc 100644 --- a/include/haproxy/action-t.h +++ b/include/haproxy/action-t.h @@ -78,7 +78,6 @@ enum act_name { ACT_ACTION_DENY, /* common http actions .*/ - ACT_HTTP_DEL_HDR, ACT_HTTP_REDIR, ACT_HTTP_SET_NICE, ACT_HTTP_SET_LOGL, diff --git a/include/haproxy/check-t.h b/include/haproxy/check-t.h index 727f837bb..274e162b5 100644 --- a/include/haproxy/check-t.h +++ b/include/haproxy/check-t.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -49,6 +50,8 @@ enum chk_result { #define CHK_ST_PAUSED 0x0008 /* checks are paused because of maintenance (health only) */ #define CHK_ST_AGENT 0x0010 /* check is an agent check (otherwise it's a health check) */ #define CHK_ST_PORT_MISS 0x0020 /* check can't be send because no port is configured to run it */ +#define CHK_ST_IN_ALLOC 0x0040 /* check blocked waiting for input buffer allocation */ +#define CHK_ST_OUT_ALLOC 0x0080 /* check blocked waiting for output buffer allocation */ /* check status */ enum healthcheck_status { @@ -145,6 +148,7 @@ struct check { struct xprt_ops *xprt; /* transport layer operations for health checks */ struct conn_stream *cs; /* conn_stream state for health checks */ struct buffer bi, bo; /* input and output buffers to send/recv check */ + struct buffer_wait buf_wait; /* Wait list for buffer allocation */ struct task *task; /* the task associated to the health check processing, NULL if disabled */ struct timeval start; /* last health check start time */ long duration; /* time in ms took to finish last health check */ diff --git a/include/haproxy/check.h b/include/haproxy/check.h index a1bdb012d..5e55da112 100644 --- a/include/haproxy/check.h +++ b/include/haproxy/check.h @@ -40,6 +40,9 @@ void check_notify_stopping(struct check *check); void check_notify_success(struct check *check); struct task *process_chk(struct task *t, void *context, unsigned short state); +int check_buf_available(void *target); +struct buffer *check_get_buf(struct check *check, struct buffer *bptr); +void check_release_buf(struct check *check, struct buffer *bptr); const char *init_check(struct check *check, int type); void free_check(struct check *check); diff --git a/include/haproxy/filters.h b/include/haproxy/filters.h index 48d3c254e..4a32c21cc 100644 --- a/include/haproxy/filters.h +++ b/include/haproxy/filters.h @@ -166,11 +166,10 @@ unregister_data_filter(struct stream *s, struct channel *chn, struct filter *fil } /* This function must be called when a filter alter payload data. It updates - * offsets of all previous filters and the offset of the stream. Do not call - * this function when a filter change the size of payload data leads to an - * undefined behavior. + * offsets of all previous filters. Do not call this function when a filter + * change the size of payload data leads to an undefined behavior. * - * This is the filter's responsiblitiy to update data itself.. + * This is the filter's responsiblitiy to update data itself. */ static inline void flt_update_offsets(struct filter *filter, struct channel *chn, int len) @@ -181,8 +180,7 @@ flt_update_offsets(struct filter *filter, struct channel *chn, int len) list_for_each_entry(f, &strm_flt(s)->filters, list) { if (f == filter) break; - if (IS_DATA_FILTER(filter, chn)) - FLT_OFF(f, chn) += len; + FLT_OFF(f, chn) += len; } } diff --git a/include/haproxy/global.h b/include/haproxy/global.h index 23ce30573..feb04c213 100644 --- a/include/haproxy/global.h +++ b/include/haproxy/global.h @@ -76,7 +76,7 @@ void hap_register_server_deinit(void (*fct)(struct server *)); void hap_register_per_thread_alloc(int (*fct)()); void hap_register_per_thread_init(int (*fct)()); void hap_register_per_thread_deinit(void (*fct)()); -void hap_register_per_thread_free(int (*fct)()); +void hap_register_per_thread_free(void (*fct)()); void mworker_accept_wrapper(int fd); void mworker_reload(); diff --git a/include/haproxy/http_ana-t.h b/include/haproxy/http_ana-t.h index bab438ea5..9449b7955 100644 --- a/include/haproxy/http_ana-t.h +++ b/include/haproxy/http_ana-t.h @@ -59,7 +59,7 @@ /* cacheability management, bits values 0x1000 to 0x3000 (0-3 shift 12) */ #define TX_CACHEABLE 0x00001000 /* at least part of the response is cacheable */ #define TX_CACHE_COOK 0x00002000 /* a cookie in the response is cacheable */ -#define TX_CACHE_IGNORE 0x00004000 /* do not retrieve object from cache */ +#define TX_CACHE_IGNORE 0x00004000 /* do not retrieve object from cache, or avoid caching response */ #define TX_CACHE_SHIFT 12 /* bit shift */ #define TX_CON_WANT_TUN 0x00008000 /* Will be a tunnel (CONNECT or 101-Switching-Protocol) */ diff --git a/include/haproxy/http_htx.h b/include/haproxy/http_htx.h index f90b4cc74..70df6806f 100644 --- a/include/haproxy/http_htx.h +++ b/include/haproxy/http_htx.h @@ -59,7 +59,7 @@ unsigned int http_get_htx_hdr(const struct htx *htx, const struct ist hdr, int occ, struct http_hdr_ctx *ctx, char **vptr, size_t *vlen); unsigned int http_get_htx_fhdr(const struct htx *htx, const struct ist hdr, int occ, struct http_hdr_ctx *ctx, char **vptr, size_t *vlen); -int http_str_to_htx(struct buffer *buf, struct ist raw); +int http_str_to_htx(struct buffer *buf, struct ist raw, char **errmsg); void release_http_reply(struct http_reply *http_reply); int http_check_http_reply(struct http_reply *reply, struct proxy*px, char **errmsg); diff --git a/include/haproxy/session.h b/include/haproxy/session.h index 6a24d8a59..fa21dda6c 100644 --- a/include/haproxy/session.h +++ b/include/haproxy/session.h @@ -78,9 +78,20 @@ static inline void session_unown_conn(struct session *sess, struct connection *c { struct sess_srv_list *srv_list = NULL; + /* WT: this currently is a workaround for an inconsistency between + * the link status of the connection in the session list and the + * connection's owner. This should be removed as soon as all this + * is addressed. Right now it's possible to enter here with a non-null + * conn->owner that points to a dead session, but in this case the + * element is not linked. + */ + if (!LIST_ADDED(&conn->session_list)) + return; + if (conn->flags & CO_FL_SESS_IDLE) sess->idle_conns--; LIST_DEL_INIT(&conn->session_list); + conn->owner = NULL; list_for_each_entry(srv_list, &sess->srv_list, srv_list) { if (srv_list->target == conn->target) { if (LIST_ISEMPTY(&srv_list->conn_list)) { @@ -103,7 +114,7 @@ static inline int session_add_conn(struct session *sess, struct connection *conn int found = 0; /* Already attach to the session or not the connection owner */ - if (!LIST_ISEMPTY(&conn->session_list) || conn->owner != sess) + if (!LIST_ISEMPTY(&conn->session_list) || (conn->owner && conn->owner != sess)) return 1; list_for_each_entry(srv_list, &sess->srv_list, srv_list) { diff --git a/reg-tests/cache/basic.vtc b/reg-tests/cache/basic.vtc index e8255af51..849057d9e 100644 --- a/reg-tests/cache/basic.vtc +++ b/reg-tests/cache/basic.vtc @@ -6,7 +6,8 @@ feature ignore_unknown_macro server s1 { rxreq - txresp -nolen -hdr "Transfer-Encoding: chunked" + txresp -nolen -hdr "Transfer-Encoding: chunked" \ + -hdr "Cache-Control: max-age=5" chunkedlen 1 chunkedlen 1 chunkedlen 2 diff --git a/reg-tests/cache/caching_rules.vtc b/reg-tests/cache/caching_rules.vtc new file mode 100644 index 000000000..1abd92439 --- /dev/null +++ b/reg-tests/cache/caching_rules.vtc @@ -0,0 +1,150 @@ +varnishtest "Caching rules test" +# A respnse will not be cached unless it has an explicit age (Cache-Control max-age of s-maxage, Expires, Last-Modified headers, or ETag) + +#REQUIRE_VERSION=1.9 + +feature ignore_unknown_macro + +server s1 { + rxreq + expect req.url == "/max-age" + txresp -hdr "Cache-Control: max-age=5" \ + -bodylen 150 + + rxreq + expect req.url == "/s-maxage" + txresp -hdr "Cache-Control: s-maxage=5" \ + -bodylen 160 + + rxreq + expect req.url == "/last-modified" + txresp -hdr "Last-Modified: Thu, 22 Oct 2020 16:51:12 GMT" \ + -bodylen 180 + + rxreq + expect req.url == "/etag" + txresp -hdr "ETag: \"etag\"" \ + -bodylen 190 + + rxreq + expect req.url == "/uncacheable" + txresp \ + -bodylen 200 + + rxreq + expect req.url == "/uncacheable" + txresp \ + -bodylen 210 +} -start + +server s2 { + rxreq + expect req.url == "/expires" + # Expires header is filled directly by the expires_be backend" + txresp \ + -bodylen 170 +} -start + +haproxy h1 -conf { + defaults + mode http + ${no-htx} option http-use-htx + timeout connect 1s + timeout client 1s + timeout server 1s + + frontend fe + bind "fd@${fe}" + use_backend expires_be if { path_beg /expires } + default_backend test + + backend expires_be + http-request cache-use my_cache + server www ${s2_addr}:${s2_port} + http-response set-header X-Cache-Hit %[res.cache_hit] + # Expires value set in the future (current_time+5s) + http-response set-header Expires %[date(5),http_date] + http-response cache-store my_cache + + backend test + http-request cache-use my_cache + server www ${s1_addr}:${s1_port} + http-response cache-store my_cache + http-response set-header X-Cache-Hit %[res.cache_hit] + + cache my_cache + total-max-size 3 + max-age 20 + max-object-size 3072 +} -start + + +client c1 -connect ${h1_fe_sock} { + txreq -url "/max-age" + rxresp + expect resp.status == 200 + expect resp.bodylen == 150 + + txreq -url "/max-age" + rxresp + expect resp.status == 200 + expect resp.bodylen == 150 + expect resp.http.X-Cache-Hit == 1 + + txreq -url "/s-maxage" + rxresp + expect resp.status == 200 + expect resp.bodylen == 160 + + txreq -url "/s-maxage" + rxresp + expect resp.status == 200 + expect resp.bodylen == 160 + expect resp.http.X-Cache-Hit == 1 + + txreq -url "/expires" + rxresp + expect resp.status == 200 + expect resp.bodylen == 170 + + txreq -url "/expires" + rxresp + expect resp.status == 200 + expect resp.bodylen == 170 + expect resp.http.X-Cache-Hit == 1 + + txreq -url "/last-modified" + rxresp + expect resp.status == 200 + expect resp.bodylen == 180 + + txreq -url "/last-modified" + rxresp + expect resp.status == 200 + expect resp.bodylen == 180 + expect resp.http.X-Cache-Hit == 1 + + txreq -url "/etag" + rxresp + expect resp.status == 200 + expect resp.bodylen == 190 + + txreq -url "/etag" + rxresp + expect resp.status == 200 + expect resp.bodylen == 190 + expect resp.http.X-Cache-Hit == 1 + + # The next response should not be cached + txreq -url "/uncacheable" + rxresp + expect resp.status == 200 + expect resp.bodylen == 200 + + txreq -url "/uncacheable" + rxresp + expect resp.status == 200 + expect resp.bodylen == 210 + expect resp.http.X-Cache-Hit == 0 + +} -run diff --git a/reg-tests/cache/if-modified-since.vtc b/reg-tests/cache/if-modified-since.vtc index af8cbf0b8..e491e4660 100644 --- a/reg-tests/cache/if-modified-since.vtc +++ b/reg-tests/cache/if-modified-since.vtc @@ -19,7 +19,8 @@ server s1 { rxreq expect req.url == "/date" txresp -nolen -hdr "Transfer-Encoding: chunked" \ - -hdr "Date: Thu, 22 Oct 2020 16:51:12 GMT" + -hdr "Date: Thu, 22 Oct 2020 16:51:12 GMT" \ + -hdr "Cache-Control: max-age=5" chunkedlen 16 chunkedlen 16 chunkedlen 16 diff --git a/reg-tests/cache/sample_fetches.vtc b/reg-tests/cache/sample_fetches.vtc index 1ba069022..73e6e1bf6 100644 --- a/reg-tests/cache/sample_fetches.vtc +++ b/reg-tests/cache/sample_fetches.vtc @@ -7,7 +7,8 @@ feature ignore_unknown_macro server s1 { rxreq - txresp -nolen -hdr "Transfer-Encoding: chunked" + txresp -nolen -hdr "Transfer-Encoding: chunked" \ + -hdr "Cache-Control: max-age=5" chunkedlen 15 chunkedlen 15 chunkedlen 15 @@ -16,7 +17,8 @@ server s1 { server s2 { rxreq - txresp -nolen -hdr "Transfer-Encoding: chunked" + txresp -nolen -hdr "Transfer-Encoding: chunked" \ + -hdr "Cache-Control: max-age=5" chunkedlen 16 chunkedlen 16 chunkedlen 16 @@ -25,14 +27,16 @@ server s2 { server s3 { rxreq - txresp -nolen -hdr "Transfer-Encoding: chunked" + txresp -nolen -hdr "Transfer-Encoding: chunked" \ + -hdr "Cache-Control: max-age=5" chunkedlen 17 chunkedlen 17 chunkedlen 17 chunkedlen 0 rxreq - txresp -nolen -hdr "Transfer-Encoding: chunked" + txresp -nolen -hdr "Transfer-Encoding: chunked" \ + -hdr "Cache-Control: max-age=5" chunkedlen 17 chunkedlen 17 chunkedlen 17 diff --git a/reg-tests/checks/http-check.vtc b/reg-tests/checks/http-check.vtc index 1180c3de6..9ece54b29 100644 --- a/reg-tests/checks/http-check.vtc +++ b/reg-tests/checks/http-check.vtc @@ -82,6 +82,10 @@ syslog S1 -level notice { expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]/srv succeeded.*code: 200" recv expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]/srv succeeded.*code: 200" + recv + expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]/srv succeeded.*code: 200" + recv + expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be[0-9]/srv succeeded.*code: 200" } -start haproxy h1 -conf { @@ -133,6 +137,21 @@ haproxy h1 -conf { ## implicit expect rule server srv ${s1_addr}:${s1_port} check inter 100ms rise 1 fall 1 + backend be5 + log ${S1_addr}:${S1_port} len 2048 local0 + option httpchk + server srv ${h1_li1_addr}:${h1_li1_port} proto h2 check inter 100ms rise 1 fall 1 + + backend be6 + log ${S1_addr}:${S1_port} len 2048 local0 + option httpchk GET /status HTTP/1.1 + server srv ${h1_li1_addr}:${h1_li1_port} check check-proto h2 inter 100ms rise 1 fall 1 + + listen li1 + mode http + bind "fd@${li1}" proto h2 + http-request return status 200 + } -start syslog S1 -wait diff --git a/reg-tests/converter/url_dec.vtc b/reg-tests/converter/url_dec.vtc new file mode 100644 index 000000000..464c35a27 --- /dev/null +++ b/reg-tests/converter/url_dec.vtc @@ -0,0 +1,38 @@ +varnishtest "url_dec converter Test" + +#REQUIRE_VERSION=1.6 + +feature ignore_unknown_macro + +server s1 { + rxreq + txresp +} -repeat 2 -start + +haproxy h1 -conf { + defaults + mode http + timeout connect 1s + timeout client 1s + timeout server 1s + + frontend fe + bind "fd@${fe}" + + http-request set-var(txn.url) url + http-response set-header url_dec0 "%[var(txn.url),url_dec]" + http-response set-header url_dec1 "%[var(txn.url),url_dec(1)]" + + default_backend be + + backend be + server s1 ${s1_addr}:${s1_port} +} -start + +client c1 -connect ${h1_fe_sock} { + txreq -url "/bla+%20?foo%3Dbar%2B42+42%20" + rxresp + expect resp.http.url_dec0 == "/bla+ ?foo=bar+42 42 " + expect resp.http.url_dec1 == "/bla ?foo=bar+42 42 " + expect resp.status == 200 +} -run diff --git a/reg-tests/http-rules/del_header.vtc b/reg-tests/http-rules/del_header.vtc new file mode 100644 index 000000000..32a7a70a1 --- /dev/null +++ b/reg-tests/http-rules/del_header.vtc @@ -0,0 +1,93 @@ +varnishtest "del-header tests" + +# This config tests various http-request/response del-header operations +# with or without specified header name matching method. + +feature ignore_unknown_macro + +server s1 { + rxreq + expect req.url == / + expect req.http.x-always == always + expect req.http.x-str1 == + expect req.http.x-str2 == + expect req.http.x-beg1 == + expect req.http.x-beg2 == + expect req.http.x-end1 == + expect req.http.x-end2 == end2 + expect req.http.x-sub1 == + expect req.http.x-sub2 == + expect req.http.x-reg1 == + expect req.http.x-reg2 == + txresp -hdr "x-always: always" \ + -hdr "x-str1: str1" \ + -hdr "x-str2: str2" \ + -hdr "x-beg1: beg1" \ + -hdr "x-beg2: beg2" \ + -hdr "x-end1: end1" \ + -hdr "x-end2: end2" \ + -hdr "x-sub1: sub1" \ + -hdr "x-sub2: sub2" \ + -hdr "x-reg1: reg1" \ + -hdr "x-reg2: reg2" + +} -start + +haproxy h1 -conf { + defaults + mode http + timeout connect 1s + timeout client 1s + timeout server 1s + + frontend fe + bind "fd@${fe}" + + http-request del-header x-str1 + http-request del-header x-str2 -m str + http-request del-header x-beg -m beg + http-request del-header end1 -m end + http-request del-header sub -m sub + http-request del-header ^x.reg.$ -m reg + + http-response del-header x-str1 + http-response del-header x-str2 -m str + http-response del-header x-beg -m beg + http-response del-header end1 -m end + http-response del-header sub -m sub + http-response del-header ^x.reg.$ -m reg + + default_backend be + + backend be + server s1 ${s1_addr}:${s1_port} + +} -start + +client c1 -connect ${h1_fe_sock} { + txreq -req GET -url / \ + -hdr "x-always: always" \ + -hdr "x-str1: str1" \ + -hdr "x-str2: str2" \ + -hdr "x-beg1: beg1" \ + -hdr "x-beg2: beg2" \ + -hdr "x-end1: end1" \ + -hdr "x-end2: end2" \ + -hdr "x-sub1: sub1" \ + -hdr "x-sub2: sub2" \ + -hdr "x-reg1: reg1" \ + -hdr "x-reg2: reg2" + rxresp + expect resp.status == 200 + expect resp.http.x-always == always + expect resp.http.x-str1 == + expect resp.http.x-str2 == + expect resp.http.x-beg1 == + expect resp.http.x-beg2 == + expect resp.http.x-end1 == + expect resp.http.x-end2 == end2 + expect resp.http.x-sub1 == + expect resp.http.x-sub2 == + expect resp.http.x-reg1 == + expect resp.http.x-reg2 == +} -run diff --git a/reg-tests/sample_fetches/cook.vtc b/reg-tests/sample_fetches/cook.vtc new file mode 100644 index 000000000..e2c1284da --- /dev/null +++ b/reg-tests/sample_fetches/cook.vtc @@ -0,0 +1,37 @@ +varnishtest "cook sample fetch Test" + +feature ignore_unknown_macro + +server s1 { + rxreq + txresp +} -start + +haproxy h1 -conf { + defaults + mode http + + frontend fe + bind "fd@${fe}" + http-request set-var(txn.count) req.cook_cnt() + http-request set-var(txn.val) req.cook_val() + http-request set-var(txn.val_cook2) req.cook_val(cook2) + http-response set-header count %[var(txn.count)] + http-response set-header val %[var(txn.val)] + http-response set-header val_cook2 %[var(txn.val_cook2)] + + default_backend be + + backend be + server srv1 ${s1_addr}:${s1_port} +} -start + +client c1 -connect ${h1_fe_sock} { + txreq -url "/" \ + -hdr "cookie: cook1=0; cook2=123; cook3=22" + rxresp + expect resp.status == 200 + expect resp.http.count == "3" + expect resp.http.val == "0" + expect resp.http.val_cook2 == "123" +} -run diff --git a/reg-tests/ssl/filters.crt-list b/reg-tests/ssl/filters.crt-list new file mode 100644 index 000000000..e72ee0bf4 --- /dev/null +++ b/reg-tests/ssl/filters.crt-list @@ -0,0 +1,2 @@ +common.pem *.bug810.domain.tld record.bug810.domain.tld *.bug818.domain.tld !another-record.bug818.domain.tld +ecdsa.pem record.bug810.domain.tld another-record.bug810.domain.tld *.bug818.domain.tld diff --git a/reg-tests/ssl/simple.crt-list b/reg-tests/ssl/simple.crt-list new file mode 100644 index 000000000..9ffacb469 --- /dev/null +++ b/reg-tests/ssl/simple.crt-list @@ -0,0 +1,5 @@ +common.pem record1.bug940.domain.tld +common.pem record2.bug940.domain.tld +ecdsa.pem record3.bug940.domain.tld +ecdsa.pem record4.bug940.domain.tld + diff --git a/reg-tests/ssl/ssl_client_samples.vtc b/reg-tests/ssl/ssl_client_samples.vtc index 54212b2c8..ccefa7940 100644 --- a/reg-tests/ssl/ssl_client_samples.vtc +++ b/reg-tests/ssl/ssl_client_samples.vtc @@ -1,7 +1,7 @@ #REGTEST_TYPE=devel varnishtest "Test the ssl_c_* sample fetches" -#REQUIRE_VERSION=2.3 +#REQUIRE_VERSION=2.2 #REQUIRE_OPTIONS=OPENSSL feature ignore_unknown_macro diff --git a/reg-tests/ssl/ssl_crt-list_filters.vtc b/reg-tests/ssl/ssl_crt-list_filters.vtc new file mode 100644 index 000000000..a5ba189c9 --- /dev/null +++ b/reg-tests/ssl/ssl_crt-list_filters.vtc @@ -0,0 +1,64 @@ +#REGTEST_TYPE=broken +varnishtest "Test for the bug #810 and #818" +# This test checks if the multiple certificate types works correctly with the +# SNI, and that the negative filters are correctly excluded + + +#REQUIRE_VERSION=2.2 +#REQUIRE_OPTIONS=OPENSSL +feature ignore_unknown_macro + +server s1 -repeat 3 { + rxreq + txresp +} -start + +haproxy h1 -conf { + global + tune.ssl.default-dh-param 2048 + crt-base ${testdir} + stats socket "${tmpdir}/h1/stats" level admin + + defaults + mode http + option httplog + log stderr local0 debug err + option logasap + timeout connect 1s + timeout client 1s + timeout server 1s + + + listen clear-lst + bind "fd@${clearlst}" + balance roundrobin + server s1 "${tmpdir}/ssl.sock" ssl verify none sni str(another-record.bug810.domain.tld) ssl-min-ver TLSv1.2 ssl-max-ver TLSv1.2 ciphers "kRSA" + server s2 "${tmpdir}/ssl.sock" ssl verify none sni str(another-record.bug810.domain.tld) ssl-min-ver TLSv1.2 ssl-max-ver TLSv1.2 ciphers "aECDSA" + server s3 "${tmpdir}/ssl.sock" ssl verify none sni str(another-record.bug818.domain.tld) ssl-min-ver TLSv1.2 ssl-max-ver TLSv1.2 ciphers "kRSA" + + listen ssl-lst + mode http + ${no-htx} option http-use-htx + bind "${tmpdir}/ssl.sock" ssl strict-sni ssl-min-ver TLSv1.2 ssl-max-ver TLSv1.2 ciphers "kRSA:aECDSA" crt-list ${testdir}/filters.crt-list + + server s1 ${s1_addr}:${s1_port} +} -start + + +client c1 -connect ${h1_clearlst_sock} { + txreq + rxresp + expect resp.status == 200 +} -run + +client c1 -connect ${h1_clearlst_sock} { + txreq + rxresp + expect resp.status == 200 +} -run + +client c1 -connect ${h1_clearlst_sock} { + txreq + rxresp + expect resp.status == 503 +} -run diff --git a/reg-tests/ssl/ssl_server_samples.vtc b/reg-tests/ssl/ssl_server_samples.vtc index f7c254685..53dd1b81c 100644 --- a/reg-tests/ssl/ssl_server_samples.vtc +++ b/reg-tests/ssl/ssl_server_samples.vtc @@ -1,7 +1,7 @@ #REGTEST_TYPE=devel varnishtest "Test the ssl_s_* sample fetches" -#REQUIRE_VERSION=2.3 +#REQUIRE_VERSION=2.2 #REQUIRE_OPTIONS=OPENSSL feature ignore_unknown_macro diff --git a/reg-tests/ssl/ssl_simple_crt-list.vtc b/reg-tests/ssl/ssl_simple_crt-list.vtc new file mode 100644 index 000000000..76605e352 --- /dev/null +++ b/reg-tests/ssl/ssl_simple_crt-list.vtc @@ -0,0 +1,51 @@ +#REGTEST_TYPE=bug +varnishtest "Test for the bug #940" +# Test that the SNI are correcly inserted with the same file multiple times. + +#REQUIRE_VERSION=2.2 +#REQUIRE_OPTIONS=OPENSSL +feature ignore_unknown_macro + +server s1 -repeat 4 { + rxreq + txresp +} -start + +haproxy h1 -conf { + global + tune.ssl.default-dh-param 2048 + crt-base ${testdir} + stats socket "${tmpdir}/h1/stats" level admin + + defaults + mode http + option httplog + log stderr local0 debug err + option logasap + timeout connect 1s + timeout client 1s + timeout server 1s + + + listen clear-lst + bind "fd@${clearlst}" + balance roundrobin + server s1 "${tmpdir}/ssl.sock" ssl verify none sni str(record1.bug940.domain.tld) + server s2 "${tmpdir}/ssl.sock" ssl verify none sni str(record2.bug940.domain.tld) + server s3 "${tmpdir}/ssl.sock" ssl verify none sni str(record3.bug940.domain.tld) + server s4 "${tmpdir}/ssl.sock" ssl verify none sni str(record4.bug940.domain.tld) + + listen ssl-lst + mode http + ${no-htx} option http-use-htx + bind "${tmpdir}/ssl.sock" ssl strict-sni crt-list ${testdir}/simple.crt-list + + server s1 ${s1_addr}:${s1_port} +} -start + + +client c1 -repeat 4 -connect ${h1_clearlst_sock} { + txreq + rxresp + expect resp.status == 200 +} -run diff --git a/src/backend.c b/src/backend.c index e0f7dd627..a76f08ce2 100644 --- a/src/backend.c +++ b/src/backend.c @@ -1207,7 +1207,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv, /* attach the connection to the session private list */ conn->owner = s->sess; - session_add_conn(conn->owner, conn, conn->target); + session_add_conn(s->sess, conn, conn->target); } else { LIST_ADDQ(&srv->available_conns[tid], mt_list_to_list(&conn->list)); @@ -1563,7 +1563,7 @@ int connect_server(struct stream *s) ((s->be->options & PR_O_REUSE_MASK) == PR_O_REUSE_SAFE && srv_conn->mux->flags & MX_FL_HOL_RISK)) { /* If it fail now, the same will be done in mux->detach() callback */ - session_add_conn(srv_conn->owner, srv_conn, srv_conn->target); + session_add_conn(s->sess, srv_conn, srv_conn->target); } } diff --git a/src/cache.c b/src/cache.c index f86c96f09..4f8fad986 100644 --- a/src/cache.c +++ b/src/cache.c @@ -664,7 +664,7 @@ enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px, http_check_response_for_cacheability(s, &s->res); - if (!(txn->flags & TX_CACHEABLE) || !(txn->flags & TX_CACHE_COOK)) + if (!(txn->flags & TX_CACHEABLE) || !(txn->flags & TX_CACHE_COOK) || (txn->flags & TX_CACHE_IGNORE)) goto out; age = 0; diff --git a/src/cfgparse-tcp.c b/src/cfgparse-tcp.c index 0bf347bcc..4dc39d547 100644 --- a/src/cfgparse-tcp.c +++ b/src/cfgparse-tcp.c @@ -289,29 +289,6 @@ static struct srv_kw_list srv_kws = { "TCP", { }, { INITCALL1(STG_REGISTER, srv_register_keywords, &srv_kws); - -REGISTER_BUILD_OPTS("Built with transparent proxy support using:" -#if defined(IP_TRANSPARENT) - " IP_TRANSPARENT" -#endif -#if defined(IPV6_TRANSPARENT) - " IPV6_TRANSPARENT" -#endif -#if defined(IP_FREEBIND) - " IP_FREEBIND" -#endif -#if defined(IP_BINDANY) - " IP_BINDANY" -#endif -#if defined(IPV6_BINDANY) - " IPV6_BINDANY" -#endif -#if defined(SO_BINDANY) - " SO_BINDANY" -#endif - ""); - - /* * Local variables: * c-indent-level: 8 diff --git a/src/cfgparse.c b/src/cfgparse.c index 4d3c8ad55..f1d5a41b2 100644 --- a/src/cfgparse.c +++ b/src/cfgparse.c @@ -931,7 +931,6 @@ int cfg_parse_peers(const char *file, int linenum, char **args, int kwm) int cfg_parse_resolvers(const char *file, int linenum, char **args, int kwm) { static struct dns_resolvers *curr_resolvers = NULL; - struct dns_nameserver *newnameserver = NULL; const char *err; int err_code = 0; char *errmsg = NULL; @@ -992,6 +991,7 @@ int cfg_parse_resolvers(const char *file, int linenum, char **args, int kwm) HA_SPIN_INIT(&curr_resolvers->lock); } else if (strcmp(args[0], "nameserver") == 0) { /* nameserver definition */ + struct dns_nameserver *newnameserver = NULL; struct sockaddr_storage *sk; int port1, port2; @@ -1043,6 +1043,7 @@ int cfg_parse_resolvers(const char *file, int linenum, char **args, int kwm) newnameserver->addr = *sk; } else if (strcmp(args[0], "parse-resolv-conf") == 0) { + struct dns_nameserver *newnameserver = NULL; const char *whitespace = "\r\n\t "; char *resolv_line = NULL; int resolv_linenum = 0; @@ -1131,6 +1132,7 @@ int cfg_parse_resolvers(const char *file, int linenum, char **args, int kwm) if (newnameserver->conf.file == NULL) { ha_alert("parsing [/etc/resolv.conf:%d] : out of memory.\n", resolv_linenum); err_code |= ERR_ALERT | ERR_FATAL; + free(newnameserver); goto resolv_out; } @@ -1138,6 +1140,8 @@ int cfg_parse_resolvers(const char *file, int linenum, char **args, int kwm) if (newnameserver->id == NULL) { ha_alert("parsing [/etc/resolv.conf:%d] : out of memory.\n", resolv_linenum); err_code |= ERR_ALERT | ERR_FATAL; + free((char *)newnameserver->conf.file); + free(newnameserver); goto resolv_out; } @@ -1287,7 +1291,7 @@ int cfg_parse_resolvers(const char *file, int linenum, char **args, int kwm) err_code |= ERR_ALERT | ERR_FATAL; goto out; } - } /* neither "nameserver" nor "resolvers" */ + } else if (*args[0] != 0) { ha_alert("parsing [%s:%d] : unknown keyword '%s' in '%s' section\n", file, linenum, args[0], cursection); err_code |= ERR_ALERT | ERR_FATAL; diff --git a/src/check.c b/src/check.c index c281bb279..337030fbd 100644 --- a/src/check.c +++ b/src/check.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -866,8 +867,6 @@ static struct task *process_chk_conn(struct task *t, void *context, unsigned sho set_server_check_status(check, HCHK_STATUS_START, NULL); check->state |= CHK_ST_INPROGRESS; - b_reset(&check->bi); - b_reset(&check->bo); task_set_affinity(t, tid_bit); @@ -936,7 +935,9 @@ static struct task *process_chk_conn(struct task *t, void *context, unsigned sho } } task_set_affinity(t, MAX_THREADS_MASK); - check->state &= ~CHK_ST_INPROGRESS; + check_release_buf(check, &check->bi); + check_release_buf(check, &check->bo); + check->state &= ~(CHK_ST_INPROGRESS|CHK_ST_IN_ALLOC|CHK_ST_OUT_ALLOC); if (check->server) { rv = 0; @@ -961,18 +962,65 @@ static struct task *process_chk_conn(struct task *t, void *context, unsigned sho /**************************************************************************/ /************************** Init/deinit checks ****************************/ /**************************************************************************/ -const char *init_check(struct check *check, int type) +/* + * Tries to grab a buffer and to re-enables processing on check . The + * check flags are used to figure what buffer was requested. It returns 1 if the + * allocation succeeds, in which case the I/O tasklet is woken up, or 0 if it's + * impossible to wake up and we prefer to be woken up later. + */ +int check_buf_available(void *target) { - check->type = type; + struct check *check = target; + + if ((check->state & CHK_ST_IN_ALLOC) && b_alloc_margin(&check->bi, 0)) { + check->state &= ~CHK_ST_IN_ALLOC; + tasklet_wakeup(check->wait_list.tasklet); + return 1; + } + if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc_margin(&check->bo, 0)) { + check->state &= ~CHK_ST_OUT_ALLOC; + tasklet_wakeup(check->wait_list.tasklet); + return 1; + } + + return 0; +} + +/* + * Allocate a buffer. If if fails, it adds the check in buffer wait queue. + */ +struct buffer *check_get_buf(struct check *check, struct buffer *bptr) +{ + struct buffer *buf = NULL; - b_reset(&check->bi); check->bi.size = global.tune.chksize; - b_reset(&check->bo); check->bo.size = global.tune.chksize; + if (likely(!MT_LIST_ADDED(&check->buf_wait.list)) && + unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) { + check->buf_wait.target = check; + check->buf_wait.wakeup_cb = check_buf_available; + MT_LIST_ADDQ(&buffer_wq, &check->buf_wait.list); + } + return buf; +} - check->bi.area = calloc(check->bi.size, sizeof(*check->bi.area)); - check->bo.area = calloc(check->bo.size, sizeof(*check->bo.area)); +/* + * Release a buffer, if any, and try to wake up entities waiting in the buffer + * wait queue. + */ +void check_release_buf(struct check *check, struct buffer *bptr) +{ + if (bptr->size) { + b_free(bptr); + offer_buffers(check->buf_wait.target, tasks_run_queue); + } +} - if (!check->bi.area || !check->bo.area) - return "out of memory while allocating check buffer"; +const char *init_check(struct check *check, int type) +{ + check->type = type; + + check->bi = BUF_NULL; + check->bo = BUF_NULL; + MT_LIST_INIT(&check->buf_wait.list); check->wait_list.tasklet = tasklet_new(); if (!check->wait_list.tasklet) @@ -989,8 +1037,8 @@ void free_check(struct check *check) if (check->wait_list.tasklet) tasklet_free(check->wait_list.tasklet); - free(check->bi.area); - free(check->bo.area); + check_release_buf(check, &check->bi); + check_release_buf(check, &check->bo); if (check->cs) { free(check->cs->conn); check->cs->conn = NULL; @@ -1228,10 +1276,12 @@ static int init_srv_check(struct server *srv) const char *err; struct tcpcheck_rule *r; int ret = 0; + int check_type; if (!srv->do_check) goto out; + check_type = srv->check.tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK; /* If neither a port nor an addr was specified and no check transport * layer is forced, then the transport layer used by the checks is the @@ -1253,8 +1303,20 @@ static int init_srv_check(struct server *srv) /* Inherit the mux protocol from the server if not already defined for * the check */ - if (srv->mux_proto && !srv->check.mux_proto) + if (srv->mux_proto && !srv->check.mux_proto && + ((srv->mux_proto->mode == PROTO_MODE_HTTP && check_type == TCPCHK_RULES_HTTP_CHK) || + (srv->mux_proto->mode == PROTO_MODE_TCP && check_type != TCPCHK_RULES_HTTP_CHK))) { srv->check.mux_proto = srv->mux_proto; + } + /* test that check proto is valid if explicitly defined */ + else if (srv->check.mux_proto && + ((srv->check.mux_proto->mode == PROTO_MODE_HTTP && check_type != TCPCHK_RULES_HTTP_CHK) || + (srv->check.mux_proto->mode == PROTO_MODE_TCP && check_type == TCPCHK_RULES_HTTP_CHK))) { + ha_alert("config: %s '%s': server '%s' uses an incompatible MUX protocol for the selected check type\n", + proxy_type_str(srv->proxy), srv->proxy->id, srv->id); + ret |= ERR_ALERT | ERR_FATAL; + goto out; + } /* validate server health-check settings */ @@ -1584,6 +1646,7 @@ int proxy_parse_tcp_check_opt(char **args, int cur_arg, struct proxy *curpx, str ruleset_found: free_tcpcheck_vars(&rules->preset_vars); rules->list = &rs->rules; + rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS); rules->flags |= TCPCHK_RULES_TCP_CHK; out: @@ -1654,6 +1717,7 @@ int proxy_parse_redis_check_opt(char **args, int cur_arg, struct proxy *curpx, s ruleset_found: rules->list = &rs->rules; + rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS); rules->flags |= TCPCHK_RULES_REDIS_CHK; out: @@ -1753,6 +1817,7 @@ int proxy_parse_ssl_hello_chk_opt(char **args, int cur_arg, struct proxy *curpx, ruleset_found: rules->list = &rs->rules; + rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS); rules->flags |= TCPCHK_RULES_SSL3_CHK; out: @@ -1890,6 +1955,7 @@ int proxy_parse_smtpchk_opt(char **args, int cur_arg, struct proxy *curpx, struc ruleset_found: rules->list = &rs->rules; + rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS); rules->flags |= TCPCHK_RULES_SMTP_CHK; out: @@ -2035,6 +2101,7 @@ int proxy_parse_pgsql_check_opt(char **args, int cur_arg, struct proxy *curpx, s ruleset_found: rules->list = &rs->rules; + rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS); rules->flags |= TCPCHK_RULES_PGSQL_CHK; out: @@ -2258,6 +2325,7 @@ int proxy_parse_mysql_check_opt(char **args, int cur_arg, struct proxy *curpx, s ruleset_found: rules->list = &rs->rules; + rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS); rules->flags |= TCPCHK_RULES_MYSQL_CHK; out: @@ -2341,6 +2409,7 @@ int proxy_parse_ldap_check_opt(char **args, int cur_arg, struct proxy *curpx, st ruleset_found: rules->list = &rs->rules; + rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS); rules->flags |= TCPCHK_RULES_LDAP_CHK; out: @@ -2416,6 +2485,7 @@ int proxy_parse_spop_check_opt(char **args, int cur_arg, struct proxy *curpx, st ruleset_found: rules->list = &rs->rules; + rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS); rules->flags |= TCPCHK_RULES_SPOP_CHK; out: @@ -2606,6 +2676,7 @@ int proxy_parse_httpchk_opt(char **args, int cur_arg, struct proxy *curpx, struc } rules->list = &rs->rules; + rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS); rules->flags |= TCPCHK_RULES_HTTP_CHK; if (!tcpcheck_add_http_rule(chk, rules, &errmsg)) { ha_alert("parsing [%s:%d] : '%s %s' : %s.\n", file, line, args[0], args[1], errmsg); @@ -2736,6 +2807,7 @@ static int srv_parse_agent_check(char **args, int *cur_arg, struct proxy *curpx, ruleset_found: rules->list = &rs->rules; + rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS); rules->flags |= TCPCHK_RULES_AGENT_CHK; srv->do_agent = 1; diff --git a/src/connection.c b/src/connection.c index dc54b9d63..366bb25b5 100644 --- a/src/connection.c +++ b/src/connection.c @@ -54,10 +54,10 @@ int conn_create_mux(struct connection *conn) goto fail; if (sess && obj_type(sess->origin) == OBJ_TYPE_CHECK) { - if (conn_install_mux_chk(conn, conn->ctx, conn->owner) < 0) + if (conn_install_mux_chk(conn, conn->ctx, sess) < 0) goto fail; } - else if (conn_install_mux_be(conn, conn->ctx, conn->owner) < 0) + else if (conn_install_mux_be(conn, conn->ctx, sess) < 0) goto fail; srv = objt_server(conn->target); @@ -72,7 +72,7 @@ int conn_create_mux(struct connection *conn) LIST_ADDQ(&srv->available_conns[tid], mt_list_to_list(&conn->list)); else if (conn->flags & CO_FL_PRIVATE) { /* If it fail now, the same will be done in mux->detach() callback */ - session_add_conn(conn->owner, conn, conn->target); + session_add_conn(sess, conn, conn->target); } return 0; fail: diff --git a/src/filters.c b/src/filters.c index d18f77332..ca72d759b 100644 --- a/src/filters.c +++ b/src/filters.c @@ -546,10 +546,21 @@ flt_set_stream_backend(struct stream *s, struct proxy *be) int flt_http_end(struct stream *s, struct http_msg *msg) { + unsigned long long *strm_off = &FLT_STRM_OFF(s, msg->chn); + unsigned int offset = 0; int ret = 1; DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s, s->txn, msg); RESUME_FILTER_LOOP(s, msg->chn) { + unsigned long long flt_off = FLT_OFF(filter, msg->chn); + offset = flt_off - *strm_off; + + /* Call http_end for data filters only. But the filter offset is + * still valid for all filters + . */ + if (!IS_DATA_FILTER(filter, msg->chn)) + continue; + if (FLT_OPS(filter)->http_end) { DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s); ret = FLT_OPS(filter)->http_end(s, filter, msg); @@ -557,6 +568,10 @@ flt_http_end(struct stream *s, struct http_msg *msg) BREAK_EXECUTION(s, msg->chn, end); } } RESUME_FILTER_END; + + c_adv(msg->chn, offset); + *strm_off += offset; + end: DBG_TRACE_LEAVE(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s); return ret; @@ -620,13 +635,18 @@ flt_http_payload(struct stream *s, struct http_msg *msg, unsigned int len) ret = data = len - out; DBG_TRACE_ENTER(STRM_EV_STRM_ANA|STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s, s->txn, msg); list_for_each_entry(filter, &strm_flt(s)->filters, list) { - /* Call "data" filters only */ - if (!IS_DATA_FILTER(filter, msg->chn)) + unsigned long long *flt_off = &FLT_OFF(filter, msg->chn); + unsigned int offset = *flt_off - *strm_off; + + /* Call http_payload for filters only. Forward all data for + * others and update the filter offset + */ + if (!IS_DATA_FILTER(filter, msg->chn)) { + *flt_off += data - offset; continue; - if (FLT_OPS(filter)->http_payload) { - unsigned long long *flt_off = &FLT_OFF(filter, msg->chn); - unsigned int offset = *flt_off - *strm_off; + } + if (FLT_OPS(filter)->http_payload) { DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_HTTP_ANA|STRM_EV_FLT_ANA, s); ret = FLT_OPS(filter)->http_payload(s, filter, msg, out + offset, data - offset); if (ret < 0) @@ -784,10 +804,8 @@ flt_analyze_http_headers(struct stream *s, struct channel *chn, unsigned int an_ size_t data = http_get_hdrs_size(htxbuf(&chn->buf)); struct filter *f; - list_for_each_entry(f, &strm_flt(s)->filters, list) { - if (IS_DATA_FILTER(f, chn)) - FLT_OFF(f, chn) = data; - } + list_for_each_entry(f, &strm_flt(s)->filters, list) + FLT_OFF(f, chn) = data; } check_result: @@ -885,12 +903,18 @@ flt_tcp_payload(struct stream *s, struct channel *chn, unsigned int len) ret = data = len - out; DBG_TRACE_ENTER(STRM_EV_TCP_ANA|STRM_EV_FLT_ANA, s); list_for_each_entry(filter, &strm_flt(s)->filters, list) { - /* Call "data" filters only */ - if (!IS_DATA_FILTER(filter, chn)) + unsigned long long *flt_off = &FLT_OFF(filter, chn); + unsigned int offset = *flt_off - *strm_off; + + /* Call tcp_payload for filters only. Forward all data for + * others and update the filter offset + */ + if (!IS_DATA_FILTER(filter, chn)) { + *flt_off += data - offset; continue; + } + if (FLT_OPS(filter)->tcp_payload) { - unsigned long long *flt_off = &FLT_OFF(filter, chn); - unsigned int offset = *flt_off - *strm_off; DBG_TRACE_DEVEL(FLT_ID(filter), STRM_EV_TCP_ANA|STRM_EV_FLT_ANA, s); ret = FLT_OPS(filter)->tcp_payload(s, filter, chn, out + offset, data - offset); diff --git a/src/flt_spoe.c b/src/flt_spoe.c index cf5fc7a4c..58a7e7903 100644 --- a/src/flt_spoe.c +++ b/src/flt_spoe.c @@ -1009,8 +1009,17 @@ spoe_handle_agentack_frame(struct appctx *appctx, struct spoe_context **ctx, (unsigned int)stream_id, (unsigned int)frame_id); SPOE_APPCTX(appctx)->status_code = SPOE_FRM_ERR_FRAMEID_NOTFOUND; - if (appctx->st0 == SPOE_APPCTX_ST_WAITING_SYNC_ACK) - return -1; + if (appctx->st0 == SPOE_APPCTX_ST_WAITING_SYNC_ACK) { + /* Report an error if we are waiting the ack for another frame, + * but not if there is no longer frame waiting for a ack + * (timeout) + */ + if (!LIST_ISEMPTY(&SPOE_APPCTX(appctx)->waiting_queue) || + SPOE_APPCTX(appctx)->frag_ctx.ctx) + return -1; + appctx->st0 = SPOE_APPCTX_ST_PROCESSING; + SPOE_APPCTX(appctx)->cur_fpa = 0; + } return 0; found: @@ -1253,6 +1262,7 @@ spoe_release_appctx(struct appctx *appctx) LIST_INIT(&ctx->list); _HA_ATOMIC_SUB(&agent->counters.nb_waiting, 1); spoe_update_stat_time(&ctx->stats.tv_wait, &ctx->stats.t_waiting); + ctx->spoe_appctx = NULL; ctx->state = SPOE_CTX_ST_ERROR; ctx->status_code = (spoe_appctx->status_code + 0x100); task_wakeup(ctx->strm->task, TASK_WOKEN_MSG); @@ -1268,8 +1278,13 @@ spoe_release_appctx(struct appctx *appctx) task_wakeup(ctx->strm->task, TASK_WOKEN_MSG); } - if (!LIST_ISEMPTY(&agent->rt[tid].applets)) + if (!LIST_ISEMPTY(&agent->rt[tid].applets)) { + list_for_each_entry_safe(ctx, back, &agent->rt[tid].waiting_queue, list) { + if (ctx->spoe_appctx == spoe_appctx) + ctx->spoe_appctx = NULL; + } goto end; + } /* If this was the last running applet, notify all waiting streams */ list_for_each_entry_safe(ctx, back, &agent->rt[tid].sending_queue, list) { @@ -1277,6 +1292,7 @@ spoe_release_appctx(struct appctx *appctx) LIST_INIT(&ctx->list); _HA_ATOMIC_SUB(&agent->counters.nb_sending, 1); spoe_update_stat_time(&ctx->stats.tv_queue, &ctx->stats.t_queue); + ctx->spoe_appctx = NULL; ctx->state = SPOE_CTX_ST_ERROR; ctx->status_code = (spoe_appctx->status_code + 0x100); task_wakeup(ctx->strm->task, TASK_WOKEN_MSG); @@ -1286,6 +1302,7 @@ spoe_release_appctx(struct appctx *appctx) LIST_INIT(&ctx->list); _HA_ATOMIC_SUB(&agent->counters.nb_waiting, 1); spoe_update_stat_time(&ctx->stats.tv_wait, &ctx->stats.t_waiting); + ctx->spoe_appctx = NULL; ctx->state = SPOE_CTX_ST_ERROR; ctx->status_code = (spoe_appctx->status_code + 0x100); task_wakeup(ctx->strm->task, TASK_WOKEN_MSG); diff --git a/src/haproxy.c b/src/haproxy.c index a7ebaddcb..964e5cd7b 100644 --- a/src/haproxy.c +++ b/src/haproxy.c @@ -393,7 +393,7 @@ struct server_deinit_fct { struct list per_thread_free_list = LIST_HEAD_INIT(per_thread_free_list); struct per_thread_free_fct { struct list list; - int (*fct)(); + void (*fct)(); }; /* These functions are called for each thread just after the scheduler loop and @@ -565,7 +565,7 @@ void hap_register_per_thread_deinit(void (*fct)()) } /* used to register some free functions to call for each thread. */ -void hap_register_per_thread_free(int (*fct)()) +void hap_register_per_thread_free(void (*fct)()) { struct per_thread_free_fct *b; @@ -632,6 +632,9 @@ static void display_build_opts() #ifdef BUILD_OPTIONS "\n OPTIONS = " BUILD_OPTIONS #endif +#ifdef BUILD_DEBUG + "\n DEBUG = " BUILD_DEBUG +#endif #ifdef BUILD_FEATURES "\n\nFeature list : " BUILD_FEATURES #endif diff --git a/src/hlua.c b/src/hlua.c index 97dcebd88..f497892be 100644 --- a/src/hlua.c +++ b/src/hlua.c @@ -1664,6 +1664,7 @@ __LJMP static inline int _hlua_map_lookup(struct lua_State *L, int str) smp.data.type = SMP_T_STR; smp.flags = SMP_F_CONST; smp.data.u.str.area = (char *)MAY_LJMP(luaL_checklstring(L, 2, (size_t *)&smp.data.u.str.data)); + smp.data.u.str.size = smp.data.u.str.data + 1; } pat = pattern_exec_match(&desc->pat, &smp, 1); diff --git a/src/http.c b/src/http.c index bb99c5098..22a5a1418 100644 --- a/src/http.c +++ b/src/http.c @@ -651,6 +651,7 @@ char *http_find_cookie_value_end(char *s, const char *e) } /* Try to find the next occurrence of a cookie name in a cookie header value. + * To match on any cookie name, must be set to 0. * The lookup begins at . The pointer and size of the next occurrence of * the cookie value is returned into *value and *value_l, and the function * returns a pointer to the next pointer to search from if the value was found. @@ -728,8 +729,8 @@ char *http_extract_cookie_value(char *hdr, const char *hdr_end, * its value between val_beg and val_end. */ - if (att_end - att_beg == cookie_name_l && - memcmp(att_beg, cookie_name, cookie_name_l) == 0) { + if (cookie_name_l == 0 || (att_end - att_beg == cookie_name_l && + memcmp(att_beg, cookie_name, cookie_name_l) == 0)) { /* let's return this value and indicate where to go on from */ *value = val_beg; *value_l = val_end - val_beg; diff --git a/src/http_act.c b/src/http_act.c index 27db47833..85a534e8d 100644 --- a/src/http_act.c +++ b/src/http_act.c @@ -1438,20 +1438,67 @@ static enum act_parse_ret parse_http_replace_header(const char **args, int *orig return ACT_RET_PRS_OK; } -/* Parse a "del-header" action. It takes an header name as argument. It returns - * ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error. +/* This function executes a del-header action with selected matching mode for + * header name. It finds the matching method to be performed in <.action>, previously + * filled by function parse_http_del_header(). On success, it returns ACT_RET_CONT. + * Otherwise ACT_RET_ERR is returned. + */ +static enum act_return http_action_del_header(struct act_rule *rule, struct proxy *px, + struct session *sess, struct stream *s, int flags) +{ + struct http_hdr_ctx ctx; + struct http_msg *msg = ((rule->from == ACT_F_HTTP_REQ) ? &s->txn->req : &s->txn->rsp); + struct htx *htx = htxbuf(&msg->chn->buf); + enum act_return ret = ACT_RET_CONT; + + /* remove all occurrences of the header */ + ctx.blk = NULL; + switch (rule->action) { + case PAT_MATCH_STR: + while (http_find_header(htx, rule->arg.http.str, &ctx, 1)) + http_remove_header(htx, &ctx); + break; + case PAT_MATCH_BEG: + while (http_find_pfx_header(htx, rule->arg.http.str, &ctx, 1)) + http_remove_header(htx, &ctx); + break; + case PAT_MATCH_END: + while (http_find_sfx_header(htx, rule->arg.http.str, &ctx, 1)) + http_remove_header(htx, &ctx); + break; + case PAT_MATCH_SUB: + while (http_find_sub_header(htx, rule->arg.http.str, &ctx, 1)) + http_remove_header(htx, &ctx); + break; + case PAT_MATCH_REG: + while (http_match_header(htx, rule->arg.http.re, &ctx, 1)) + http_remove_header(htx, &ctx); + break; + default: + return ACT_RET_ERR; + } + return ret; +} + +/* Parse a "del-header" action. It takes string as a required argument, + * optional flag (currently only -m) and optional matching method of input string + * with header name to be deleted. Default matching method is exact match (-m str). + * It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error. */ static enum act_parse_ret parse_http_del_header(const char **args, int *orig_arg, struct proxy *px, struct act_rule *rule, char **err) { int cur_arg; + int pat_idx; - rule->action = ACT_HTTP_DEL_HDR; + /* set exact matching (-m str) as default */ + rule->action = PAT_MATCH_STR; + rule->action_ptr = http_action_del_header; rule->release_ptr = release_http_action; cur_arg = *orig_arg; if (!*args[cur_arg]) { - memprintf(err, "expects exactly 1 arguments"); + memprintf(err, "expects at least 1 argument"); return ACT_RET_PRS_ERR; } @@ -1460,6 +1507,32 @@ static enum act_parse_ret parse_http_del_header(const char **args, int *orig_arg px->conf.args.ctx = (rule->from == ACT_F_HTTP_REQ ? ARGC_HRQ : ARGC_HRS); LIST_INIT(&rule->arg.http.fmt); + if (strcmp(args[cur_arg+1], "-m") == 0) { + cur_arg++; + if (!*args[cur_arg+1]) { + memprintf(err, "-m flag expects exactly 1 argument"); + return ACT_RET_PRS_ERR; + } + + cur_arg++; + pat_idx = pat_find_match_name(args[cur_arg]); + switch (pat_idx) { + case PAT_MATCH_REG: + if (!(rule->arg.http.re = regex_comp(rule->arg.http.str.ptr, 1, 1, err))) + return ACT_RET_PRS_ERR; + /* fall through */ + case PAT_MATCH_STR: + case PAT_MATCH_BEG: + case PAT_MATCH_END: + case PAT_MATCH_SUB: + rule->action = pat_idx; + break; + default: + memprintf(err, "-m with unsupported matching method '%s'", args[cur_arg]); + return ACT_RET_PRS_ERR; + } + } + *orig_arg = cur_arg + 1; return ACT_RET_PRS_OK; } diff --git a/src/http_ana.c b/src/http_ana.c index 3096cfec5..341989393 100644 --- a/src/http_ana.c +++ b/src/http_ana.c @@ -1050,6 +1050,10 @@ int http_wait_for_request_body(struct stream *s, struct channel *req, int an_bit if (htx->flags & HTX_FL_PROCESSING_ERROR) goto return_int_err; + /* CONNECT requests have no body */ + if (txn->meth == HTTP_METH_CONNECT) + goto http_end; + if (msg->msg_state < HTTP_MSG_BODY) goto missing_data; @@ -2840,14 +2844,10 @@ static enum rule_result http_req_get_intercept_rule(struct proxy *px, struct lis { struct session *sess = strm_sess(s); struct http_txn *txn = s->txn; - struct htx *htx; struct act_rule *rule; - struct http_hdr_ctx ctx; enum rule_result rule_ret = HTTP_RULE_RES_CONT; int act_opts = 0; - htx = htxbuf(&s->req.buf); - /* If "the current_rule_list" match the executed rule list, we are in * resume condition. If a resume is needed it is always in the action * and never in the ACL or converters. In this case, we initialise the @@ -2960,13 +2960,6 @@ static enum rule_result http_req_get_intercept_rule(struct proxy *px, struct lis s->logs.level = rule->arg.http.i; break; - case ACT_HTTP_DEL_HDR: - /* remove all occurrences of the header */ - ctx.blk = NULL; - while (http_find_header(htx, rule->arg.http.str, &ctx, 1)) - http_remove_header(htx, &ctx); - break; - /* other flags exists, but normally, they never be matched. */ default: break; @@ -2996,14 +2989,10 @@ static enum rule_result http_res_get_intercept_rule(struct proxy *px, struct lis { struct session *sess = strm_sess(s); struct http_txn *txn = s->txn; - struct htx *htx; struct act_rule *rule; - struct http_hdr_ctx ctx; enum rule_result rule_ret = HTTP_RULE_RES_CONT; int act_opts = 0; - htx = htxbuf(&s->res.buf); - /* If "the current_rule_list" match the executed rule list, we are in * resume condition. If a resume is needed it is always in the action * and never in the ACL or converters. In this case, we initialise the @@ -3104,13 +3093,6 @@ static enum rule_result http_res_get_intercept_rule(struct proxy *px, struct lis s->logs.level = rule->arg.http.i; break; - case ACT_HTTP_DEL_HDR: - /* remove all occurrences of the header */ - ctx.blk = NULL; - while (http_find_header(htx, rule->arg.http.str, &ctx, 1)) - http_remove_header(htx, &ctx); - break; - case ACT_HTTP_REDIR: rule_ret = HTTP_RULE_RES_ABRT; if (!http_apply_redirect_rule(rule->arg.redir, s, txn)) @@ -3918,6 +3900,8 @@ void http_check_response_for_cacheability(struct stream *s, struct channel *res) struct http_txn *txn = s->txn; struct http_hdr_ctx ctx = { .blk = NULL }; struct htx *htx; + int has_freshness_info = 0; + int has_validator = 0; if (txn->status < 200) { /* do not try to cache interim responses! */ @@ -3955,7 +3939,37 @@ void http_check_response_for_cacheability(struct stream *s, struct channel *res) txn->flags &= ~TX_CACHE_COOK; continue; } + + if (istmatchi(ctx.value, ist("s-maxage")) || + istmatchi(ctx.value, ist("max-age"))) { + has_freshness_info = 1; + continue; + } } + + /* If no freshness information could be found in Cache-Control values, + * look for an Expires header. */ + if (!has_freshness_info) { + ctx.blk = NULL; + has_freshness_info = http_find_header(htx, ist("expires"), &ctx, 0); + } + + /* If no freshness information could be found in Cache-Control or Expires + * values, look for an explicit validator. */ + if (!has_freshness_info) { + ctx.blk = NULL; + has_validator = 1; + if (!http_find_header(htx, ist("etag"), &ctx, 0)) { + ctx.blk = NULL; + if (!http_find_header(htx, ist("last-modified"), &ctx, 0)) + has_validator = 0; + } + } + + /* We won't store an entry that has neither a cache validator nor an + * explicit expiration time, as suggested in RFC 7234#3. */ + if (!has_freshness_info && !has_validator) + txn->flags |= TX_CACHE_IGNORE; } /* @@ -4526,7 +4540,7 @@ int http_forward_proxy_resp(struct stream *s, int final) if (final) { htx->flags |= HTX_FL_PROXY_RESP; - if (!http_eval_after_res_rules(s)) + if (!htx_is_empty(htx) && !http_eval_after_res_rules(s)) return 0; if (s->txn->meth == HTTP_METH_HEAD) diff --git a/src/http_fetch.c b/src/http_fetch.c index 170d86c23..a0b9cf57f 100644 --- a/src/http_fetch.c +++ b/src/http_fetch.c @@ -587,6 +587,7 @@ static int smp_fetch_body(const struct arg *args, struct sample *smp, const char struct htx *htx = smp_prefetch_htx(smp, chn, check, 1); struct buffer *temp; int32_t pos; + int finished = 0; if (!htx) return 0; @@ -596,8 +597,10 @@ static int smp_fetch_body(const struct arg *args, struct sample *smp, const char struct htx_blk *blk = htx_get_blk(htx, pos); enum htx_blk_type type = htx_get_blk_type(blk); - if (type == HTX_BLK_EOM || type == HTX_BLK_TLR || type == HTX_BLK_EOT) + if (type == HTX_BLK_EOM || type == HTX_BLK_TLR || type == HTX_BLK_EOT) { + finished = 1; break; + } if (type == HTX_BLK_DATA) { if (!h1_format_htx_data(htx_get_blk_value(htx, blk), temp, 0)) return 0; @@ -608,7 +611,8 @@ static int smp_fetch_body(const struct arg *args, struct sample *smp, const char smp->data.u.str = *temp; smp->flags = SMP_F_VOL_TEST; - if (!channel_full(chn, global.tune.maxrewrite) && !(chn->flags & (CF_EOI|CF_SHUTR|CF_READ_ERROR))) + if (!finished && (check || (chn && !channel_full(chn, global.tune.maxrewrite) && + !(chn->flags & (CF_EOI|CF_SHUTR|CF_READ_ERROR))))) smp->flags |= SMP_F_MAY_CHANGE; return 1; @@ -1566,11 +1570,11 @@ static int smp_fetch_capture_res_ver(const struct arg *args, struct sample *smp, * smp->ctx.a[0] for the in-header position, smp->ctx.a[1] for the * end-of-header-value, and smp->ctx.a[2] for the hdr_ctx. Depending on * the direction, multiple cookies may be parsed on the same line or not. - * The cookie name is in args and the name length in args->data.str.len. - * Accepts exactly 1 argument of type string. If the input options indicate - * that no iterating is desired, then only last value is fetched if any. - * The returned sample is of type CSTR. Can be used to parse cookies in other - * files. + * If provided, the searched cookie name is in args, in args->data.str. If + * the input options indicate that no iterating is desired, then only last + * value is fetched if any. If no cookie name is provided, the first cookie + * value found is fetched. The returned sample is of type CSTR. Can be used + * to parse cookies in other files. */ static int smp_fetch_cookie(const struct arg *args, struct sample *smp, const char *kw, void *private) { @@ -1580,11 +1584,14 @@ static int smp_fetch_cookie(const struct arg *args, struct sample *smp, const ch struct htx *htx = smp_prefetch_htx(smp, chn, check, 1); struct http_hdr_ctx *ctx = smp->ctx.a[2]; struct ist hdr; - int occ = 0; + char *cook = NULL; + size_t cook_l = 0; int found = 0; - if (!args || args->type != ARGT_STR) - return 0; + if (args && args->type == ARGT_STR) { + cook = args->data.str.area; + cook_l = args->data.str.data; + } if (!ctx) { /* first call */ @@ -1598,13 +1605,9 @@ static int smp_fetch_cookie(const struct arg *args, struct sample *smp, const ch hdr = (!(check || (chn && chn->flags & CF_ISRESP)) ? ist("Cookie") : ist("Set-Cookie")); - if (!occ && !(smp->opt & SMP_OPT_ITERATE)) - /* no explicit occurrence and single fetch => last cookie by default */ - occ = -1; - - /* OK so basically here, either we want only one value and it's the - * last one, or we want to iterate over all of them and we fetch the - * next one. + /* OK so basically here, either we want only one value or we want to + * iterate over all of them and we fetch the next one. In this last case + * SMP_OPT_ITERATE option is set. */ if (!(smp->flags & SMP_F_NOT_LAST)) { @@ -1622,7 +1625,7 @@ static int smp_fetch_cookie(const struct arg *args, struct sample *smp, const ch if (!http_find_header(htx, hdr, ctx, 0)) goto out; - if (ctx->value.len < args->data.str.data + 1) + if (ctx->value.len < cook_l + 1) continue; smp->ctx.a[0] = ctx->value.ptr; @@ -1632,17 +1635,21 @@ static int smp_fetch_cookie(const struct arg *args, struct sample *smp, const ch smp->data.type = SMP_T_STR; smp->flags |= SMP_F_CONST; smp->ctx.a[0] = http_extract_cookie_value(smp->ctx.a[0], smp->ctx.a[1], - args->data.str.area, args->data.str.data, + cook, cook_l, (smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ, &smp->data.u.str.area, &smp->data.u.str.data); if (smp->ctx.a[0]) { found = 1; - if (occ >= 0) { - /* one value was returned into smp->data.u.str.{str,len} */ + if (smp->opt & SMP_OPT_ITERATE) { + /* iterate on cookie value */ smp->flags |= SMP_F_NOT_LAST; return 1; } + if (args->data.str.data == 0) { + /* No cookie name, first occurrence returned */ + break; + } } /* if we're looking for last occurrence, let's loop */ } @@ -1679,10 +1686,14 @@ static int smp_fetch_cookie_cnt(const struct arg *args, struct sample *smp, cons struct http_hdr_ctx ctx; struct ist hdr; char *val_beg, *val_end; + char *cook = NULL; + size_t cook_l = 0; int cnt; - if (!args || args->type != ARGT_STR) - return 0; + if (args && args->type == ARGT_STR){ + cook = args->data.str.area; + cook_l = args->data.str.data; + } if (!htx) return 0; @@ -1698,7 +1709,7 @@ static int smp_fetch_cookie_cnt(const struct arg *args, struct sample *smp, cons if (!http_find_header(htx, hdr, &ctx, 0)) break; - if (ctx.value.len < args->data.str.data + 1) + if (ctx.value.len < cook_l + 1) continue; val_beg = ctx.value.ptr; @@ -1708,7 +1719,7 @@ static int smp_fetch_cookie_cnt(const struct arg *args, struct sample *smp, cons smp->data.type = SMP_T_STR; smp->flags |= SMP_F_CONST; while ((val_beg = http_extract_cookie_value(val_beg, val_end, - args->data.str.area, args->data.str.data, + cook, cook_l, (smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ, &smp->data.u.str.area, &smp->data.u.str.data))) { diff --git a/src/http_htx.c b/src/http_htx.c index cb52b06f0..63e9dc2d5 100644 --- a/src/http_htx.c +++ b/src/http_htx.c @@ -201,7 +201,7 @@ static int __http_find_header(const struct htx *htx, const void *pattern, struct goto next_blk; break; case HTTP_FIND_FL_MATCH_SUB: - if (strnistr(n.ptr, n.len, name.ptr, n.len) != NULL) + if (!strnistr(n.ptr, n.len, name.ptr, name.len)) goto next_blk; break; default: @@ -891,7 +891,7 @@ unsigned int http_get_htx_fhdr(const struct htx *htx, const struct ist hdr, return 1; } -int http_str_to_htx(struct buffer *buf, struct ist raw) +int http_str_to_htx(struct buffer *buf, struct ist raw, char **errmsg) { struct htx *htx; struct htx_sl *sl; @@ -917,17 +917,24 @@ int http_str_to_htx(struct buffer *buf, struct ist raw) h1m.flags |= H1_MF_NO_PHDR; ret = h1_headers_to_hdr_list(raw.ptr, raw.ptr + raw.len, hdrs, sizeof(hdrs)/sizeof(hdrs[0]), &h1m, &h1sl); - if (ret <= 0) + if (ret <= 0) { + memprintf(errmsg, "unabled to parse headers (error offset: %d)", h1m.err_pos); goto error; + } - if (unlikely(h1sl.st.v.len != 8)) + if (unlikely(h1sl.st.v.len != 8)) { + memprintf(errmsg, "invalid http version (%.*s)", (int)h1sl.st.v.len, h1sl.st.v.ptr); goto error; + } if ((*(h1sl.st.v.ptr + 5) > '1') || ((*(h1sl.st.v.ptr + 5) == '1') && (*(h1sl.st.v.ptr + 7) >= '1'))) h1m.flags |= H1_MF_VER_11; - if (h1sl.st.status < 200 && (h1sl.st.status == 100 || h1sl.st.status >= 102)) + if (h1sl.st.status < 200 && (h1sl.st.status == 100 || h1sl.st.status >= 102)) { + memprintf(errmsg, "invalid http status code for an error message (%u)", + h1sl.st.status); goto error; + } if (h1sl.st.status == 204 || h1sl.st.status == 304) { /* Responses known to have no body. */ @@ -944,8 +951,10 @@ int http_str_to_htx(struct buffer *buf, struct ist raw) flags |= HTX_SL_F_XFER_ENC; if (h1m.flags & H1_MF_XFER_LEN) { flags |= HTX_SL_F_XFER_LEN; - if (h1m.flags & H1_MF_CHNK) - goto error; /* Unsupported because there is no body parsing */ + if (h1m.flags & H1_MF_CHNK) { + memprintf(errmsg, "chunk-encoded payload not supported"); + goto error; + } else if (h1m.flags & H1_MF_CLEN) { flags |= HTX_SL_F_CLEN; if (h1m.body_len == 0) @@ -955,26 +964,37 @@ int http_str_to_htx(struct buffer *buf, struct ist raw) flags |= HTX_SL_F_BODYLESS; } - if ((flags & HTX_SL_F_BODYLESS) && raw.len > ret) - goto error; /* No body expected */ - if ((flags & HTX_SL_F_CLEN) && h1m.body_len != (raw.len - ret)) - goto error; /* body with wrong length */ + if ((flags & HTX_SL_F_BODYLESS) && raw.len > ret) { + memprintf(errmsg, "message payload not expected"); + goto error; + } + if ((flags & HTX_SL_F_CLEN) && h1m.body_len != (raw.len - ret)) { + memprintf(errmsg, "payload size does not match the announced content-length (%lu != %lu)", + (unsigned long)(raw.len - ret), (unsigned long)h1m.body_len); + goto error; + } htx = htx_from_buf(buf); sl = htx_add_stline(htx, HTX_BLK_RES_SL, flags, h1sl.st.v, h1sl.st.c, h1sl.st.r); - if (!sl || !htx_add_all_headers(htx, hdrs)) + if (!sl || !htx_add_all_headers(htx, hdrs)) { + memprintf(errmsg, "unable to add headers into the HTX message"); goto error; + } sl->info.res.status = h1sl.st.status; while (raw.len > ret) { int sent = htx_add_data(htx, ist2(raw.ptr + ret, raw.len - ret)); - if (!sent) + if (!sent) { + memprintf(errmsg, "unable to add payload into the HTX message"); goto error; + } ret += sent; } - if (!htx_add_endof(htx, HTX_BLK_EOM)) + if (!htx_add_endof(htx, HTX_BLK_EOM)) { + memprintf(errmsg, "unable to add EOM into the HTX message"); goto error; + } return 1; @@ -1027,22 +1047,32 @@ static int http_htx_init(void) { struct buffer chk; struct ist raw; + char *errmsg = NULL; int rc; int err_code = 0; for (rc = 0; rc < HTTP_ERR_SIZE; rc++) { if (!http_err_msgs[rc]) { - ha_alert("Internal error: no message defined for HTTP return code %d", rc); + ha_alert("Internal error: no default message defined for HTTP return code %d", rc); err_code |= ERR_ALERT | ERR_FATAL; continue; } raw = ist2(http_err_msgs[rc], strlen(http_err_msgs[rc])); - if (!http_str_to_htx(&chk, raw)) { - ha_alert("Internal error: Unable to convert message in HTX for HTTP return code %d.\n", - http_err_codes[rc]); + if (!http_str_to_htx(&chk, raw, &errmsg)) { + ha_alert("Internal error: invalid default message for HTTP return code %d: %s.\n", + http_err_codes[rc], errmsg); err_code |= ERR_ALERT | ERR_FATAL; } + else if (errmsg) { + ha_warning("invalid default message for HTTP return code %d: %s.\n", http_err_codes[rc], errmsg); + err_code |= ERR_WARN; + } + + /* Reset errmsg */ + free(errmsg); + errmsg = NULL; + http_err_chunks[rc] = chk; http_err_replies[rc].type = HTTP_REPLY_ERRMSG; http_err_replies[rc].status = http_err_codes[rc]; @@ -1155,8 +1185,8 @@ struct buffer *http_load_errorfile(const char *file, char **errmsg) } /* Convert the error file into an HTX message */ - if (!http_str_to_htx(&chk, ist2(err, errlen))) { - memprintf(errmsg, "unable to convert custom error message file '%s' in HTX.", file); + if (!http_str_to_htx(&chk, ist2(err, errlen), errmsg)) { + memprintf(errmsg, "'%s': %s", file, *errmsg); free(http_errmsg->node.key); free(http_errmsg); goto out; @@ -1206,8 +1236,8 @@ struct buffer *http_load_errormsg(const char *key, const struct ist msg, char ** } /* Convert the error file into an HTX message */ - if (!http_str_to_htx(&chk, msg)) { - memprintf(errmsg, "unable to convert message in HTX."); + if (!http_str_to_htx(&chk, msg, errmsg)) { + memprintf(errmsg, "invalid error message: %s", *errmsg); free(http_errmsg->node.key); free(http_errmsg); goto out; @@ -1757,6 +1787,9 @@ static int proxy_parse_errorloc(char **args, int section, struct proxy *curpx, conf_err->line = line; LIST_ADDQ(&curpx->conf.errors, &conf_err->list); + /* handle warning message */ + if (*errmsg) + ret = 1; out: return ret; @@ -1819,6 +1852,9 @@ static int proxy_parse_errorfile(char **args, int section, struct proxy *curpx, conf_err->line = line; LIST_ADDQ(&curpx->conf.errors, &conf_err->list); + /* handle warning message */ + if (*errmsg) + ret = 1; out: return ret; @@ -1951,6 +1987,9 @@ static int proxy_parse_http_error(char **args, int section, struct proxy *curpx, conf_err->line = line; LIST_ADDQ(&curpx->conf.errors, &conf_err->list); + /* handle warning message */ + if (*errmsg) + ret = 1; out: return ret; @@ -2164,6 +2203,10 @@ static int cfg_parse_http_errors(const char *file, int linenum, char **args, int err_code |= ERR_ALERT | ERR_FATAL; goto out; } + if (errmsg) { + ha_warning("parsing [%s:%d] : %s: %s\n", file, linenum, args[0], errmsg); + err_code |= ERR_WARN; + } reply = calloc(1, sizeof(*reply)); if (!reply) { diff --git a/src/mux_h2.c b/src/mux_h2.c index 740e25555..482c6205d 100644 --- a/src/mux_h2.c +++ b/src/mux_h2.c @@ -6398,9 +6398,11 @@ static int init_h2() pool_head_hpack_tbl = create_pool("hpack_tbl", h2_settings_header_table_size, MEM_F_SHARED|MEM_F_EXACT); - if (!pool_head_hpack_tbl) - return -1; - return 0; + if (!pool_head_hpack_tbl) { + ha_alert("failed to allocate hpack_tbl memory pool\n"); + return (ERR_ALERT | ERR_FATAL); + } + return ERR_NONE; } REGISTER_POST_CHECK(init_h2); diff --git a/src/pattern.c b/src/pattern.c index 4a3fe297e..8bdd4d9b5 100644 --- a/src/pattern.c +++ b/src/pattern.c @@ -465,11 +465,18 @@ struct pattern *pat_match_str(struct sample *smp, struct pattern_expr *expr, int if (smp->data.u.str.data < smp->data.u.str.size) { /* we may have to force a trailing zero on the test pattern and - * the buffer is large enough to accommodate it. + * the buffer is large enough to accommodate it. If the flag + * CONST is set, duplicate the string */ prev = smp->data.u.str.area[smp->data.u.str.data]; - if (prev) - smp->data.u.str.area[smp->data.u.str.data] = '\0'; + if (prev) { + if (smp->flags & SMP_F_CONST) { + if (!smp_dup(smp)) + return NULL; + } else { + smp->data.u.str.area[smp->data.u.str.data] = '\0'; + } + } } else { /* Otherwise, the sample is duplicated. A trailing zero diff --git a/src/peers.c b/src/peers.c index aac015a23..abc2c596b 100644 --- a/src/peers.c +++ b/src/peers.c @@ -42,6 +42,7 @@ #include #include #include +#include /*******************************/ @@ -288,6 +289,69 @@ static struct ebpt_node *dcache_tx_insert(struct dcache *dc, struct dcache_tx_entry *i); static inline void flush_dcache(struct peer *peer); +/* trace source and events */ +static void peers_trace(enum trace_level level, uint64_t mask, + const struct trace_source *src, + const struct ist where, const struct ist func, + const void *a1, const void *a2, const void *a3, const void *a4); + +static const struct trace_event peers_trace_events[] = { +#define PEERS_EV_UPDTMSG (1 << 0) + { .mask = PEERS_EV_UPDTMSG, .name = "updtmsg", .desc = "update message received" }, +}; + +static const struct name_desc peers_trace_lockon_args[4] = { + /* arg1 */ { /* already used by the connection */ }, + /* arg2 */ { .name="peers", .desc="Peers protocol" }, + /* arg3 */ { }, + /* arg4 */ { } +}; + +static const struct name_desc peers_trace_decoding[] = { +#define PEERS_VERB_CLEAN 1 + { .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" }, + { /* end */ } +}; + + +struct trace_source trace_peers = { + .name = IST("peers"), + .desc = "Peers protocol", + .arg_def = TRC_ARG1_CONN, /* TRACE()'s first argument is always a connection */ + .default_cb = peers_trace, + .known_events = peers_trace_events, + .lockon_args = peers_trace_lockon_args, + .decoding = peers_trace_decoding, + .report_events = ~0, /* report everything by default */ +}; + +#define TRACE_SOURCE &trace_peers +INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE); + +static void peers_trace(enum trace_level level, uint64_t mask, + const struct trace_source *src, + const struct ist where, const struct ist func, + const void *a1, const void *a2, const void *a3, const void *a4) +{ + if (mask & PEERS_EV_UPDTMSG) { + if (a2) { + const struct peer *peer = a2; + + chunk_appendf(&trace_buf, " peer=%s", peer->id); + } + if (a3) { + const char *p = a3; + + chunk_appendf(&trace_buf, " @%p", p); + } + if (a4) { + const size_t *val = a4; + + chunk_appendf(&trace_buf, " %llu", (unsigned long long)*val); + } + } +} + static const char *statuscode_str(int statuscode) { switch (statuscode) { @@ -1352,6 +1416,7 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, unsigned int data_type; void *data_ptr; + TRACE_ENTER(PEERS_EV_UPDTMSG, NULL, p); /* Here we have data message */ if (!st) goto ignore_msg; @@ -1359,8 +1424,10 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, expire = MS_TO_TICKS(st->table->expire); if (updt) { - if (msg_len < sizeof(update)) + if (msg_len < sizeof(update)) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p); goto malformed_exit; + } memcpy(&update, *msg_cur, sizeof(update)); *msg_cur += sizeof(update); @@ -1373,8 +1440,13 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, if (exp) { size_t expire_sz = sizeof expire; - if (*msg_cur + expire_sz > msg_end) + if (*msg_cur + expire_sz > msg_end) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, *msg_cur); + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, msg_end, &expire_sz); goto malformed_exit; + } memcpy(&expire, *msg_cur, expire_sz); *msg_cur += expire_sz; @@ -1389,12 +1461,19 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, unsigned int to_read, to_store; to_read = intdecode(msg_cur, msg_end); - if (!*msg_cur) + if (!*msg_cur) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p); goto malformed_free_newts; + } to_store = MIN(to_read, st->table->key_size - 1); - if (*msg_cur + to_store > msg_end) + if (*msg_cur + to_store > msg_end) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, *msg_cur); + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, msg_end, &to_store); goto malformed_free_newts; + } memcpy(newts->key.key, *msg_cur, to_store); newts->key.key[to_store] = 0; @@ -1403,8 +1482,13 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, else if (st->table->type == SMP_T_SINT) { unsigned int netinteger; - if (*msg_cur + sizeof(netinteger) > msg_end) + if (*msg_cur + sizeof(netinteger) > msg_end) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, *msg_cur); + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, msg_end); goto malformed_free_newts; + } memcpy(&netinteger, *msg_cur, sizeof(netinteger)); netinteger = ntohl(netinteger); @@ -1412,8 +1496,13 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, *msg_cur += sizeof(netinteger); } else { - if (*msg_cur + st->table->key_size > msg_end) + if (*msg_cur + st->table->key_size > msg_end) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, *msg_cur); + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, msg_end, &st->table->key_size); goto malformed_free_newts; + } memcpy(newts->key.key, *msg_cur, st->table->key_size); *msg_cur += st->table->key_size; @@ -1435,8 +1524,10 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, continue; decoded_int = intdecode(msg_cur, msg_end); - if (!*msg_cur) + if (!*msg_cur) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p); goto malformed_unlock; + } switch (stktable_data_types[data_type].std_type) { case STD_T_SINT: @@ -1467,12 +1558,16 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, data.curr_tick = tick_add(now_ms, -decoded_int) & ~0x1; data.curr_ctr = intdecode(msg_cur, msg_end); - if (!*msg_cur) + if (!*msg_cur) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p); goto malformed_unlock; + } data.prev_ctr = intdecode(msg_cur, msg_end); - if (!*msg_cur) + if (!*msg_cur) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, NULL, p); goto malformed_unlock; + } data_ptr = stktable_data_ptr(st->table, ts, data_type); if (data_ptr) @@ -1492,22 +1587,33 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, break; } data_len = decoded_int; - if (*msg_cur + data_len > msg_end) + if (*msg_cur + data_len > msg_end) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, *msg_cur); + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, msg_end, &data_len); goto malformed_unlock; + } /* Compute the end of the current data, being at the end of * the entire message. */ end = *msg_cur + data_len; id = intdecode(msg_cur, end); - if (!*msg_cur || !id) + if (!*msg_cur || !id) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, *msg_cur, &id); goto malformed_unlock; + } dc = p->dcache; if (*msg_cur == end) { /* Dictionary entry key without value. */ - if (id > dc->max_entries) - break; + if (id > dc->max_entries) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, NULL, &id); + goto malformed_unlock; + } /* IDs sent over the network are numbered from 1. */ de = dc->rx[id - 1].de; } @@ -1515,8 +1621,13 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, chunk = get_trash_chunk(); value_len = intdecode(msg_cur, end); if (!*msg_cur || *msg_cur + value_len > end || - unlikely(value_len + 1 >= chunk->size)) + unlikely(value_len + 1 >= chunk->size)) { + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, *msg_cur, &value_len); + TRACE_PROTO("malformed message", PEERS_EV_UPDTMSG, + NULL, p, end, &chunk->size); goto malformed_unlock; + } chunk_memcpy(chunk, *msg_cur, value_len); chunk->area[chunk->data] = '\0'; @@ -1539,11 +1650,13 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); stktable_touch_remote(st->table, ts, 1); + TRACE_LEAVE(PEERS_EV_UPDTMSG, NULL, p); return 1; ignore_msg: /* skip consumed message */ co_skip(si_oc(si), totl); + TRACE_DEVEL("leaving in error", PEERS_EV_UPDTMSG); return 0; malformed_unlock: @@ -1551,6 +1664,7 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, HA_RWLOCK_WRUNLOCK(STK_SESS_LOCK, &ts->lock); stktable_touch_remote(st->table, ts, 1); appctx->st0 = PEER_SESS_ST_ERRPROTO; + TRACE_DEVEL("leaving in error", PEERS_EV_UPDTMSG); return 0; malformed_free_newts: @@ -1558,6 +1672,7 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt, stksess_free(st->table, newts); malformed_exit: appctx->st0 = PEER_SESS_ST_ERRPROTO; + TRACE_DEVEL("leaving in error", PEERS_EV_UPDTMSG); return 0; } @@ -1711,7 +1826,14 @@ static inline int peer_treat_definemsg(struct appctx *appctx, struct peer *p, } /* - * Receive a stick-table message. + * Receive a stick-table message or pre-parse any other message. + * The message's header will be sent into which must be at least + * bytes long (at least 7 to store 32-bit variable lengths). + * The first two bytes are always read, and the rest is only read if the + * first bytes indicate a stick-table message. If the message is a stick-table + * message, the varint is decoded and the equivalent number of bytes will be + * copied into the trash at trash.area. is incremented by the number of + * bytes read EVEN IN CASE OF INCOMPLETE MESSAGES. * Returns 1 if there was no error, if not, returns 0 if not enough data were available, * -1 if there was an error updating the appctx state st0 accordingly. */ @@ -1720,6 +1842,7 @@ static inline int peer_recv_msg(struct appctx *appctx, char *msg_head, size_t ms { int reql; struct stream_interface *si = appctx->owner; + char *cur; reql = co_getblk(si_oc(si), msg_head, 2 * sizeof(char), *totl); if (reql <= 0) /* closed or EOL not found */ @@ -1730,46 +1853,32 @@ static inline int peer_recv_msg(struct appctx *appctx, char *msg_head, size_t ms if (!(msg_head[1] & PEER_MSG_STKT_BIT_MASK)) return 1; + /* This is a stick-table message, let's go on */ + /* Read and Decode message length */ - reql = co_getblk(si_oc(si), &msg_head[2], sizeof(char), *totl); + msg_head += *totl; + msg_head_sz -= *totl; + reql = co_data(si_oc(si)) - *totl; + if (reql > msg_head_sz) + reql = msg_head_sz; + + reql = co_getblk(si_oc(si), msg_head, reql, *totl); if (reql <= 0) /* closed */ goto incomplete; - *totl += reql; - - if ((unsigned int)msg_head[2] < PEER_ENC_2BYTES_MIN) { - *msg_len = msg_head[2]; - } - else { - int i; - char *cur; - char *end; - - for (i = 3 ; i < msg_head_sz ; i++) { - reql = co_getblk(si_oc(si), &msg_head[i], sizeof(char), *totl); - if (reql <= 0) /* closed */ - goto incomplete; - - *totl += reql; - - if (!(msg_head[i] & PEER_MSG_STKT_BIT_MASK)) - break; - } + cur = msg_head; + *msg_len = intdecode(&cur, cur + reql); + if (!cur) { + /* the number is truncated, did we read enough ? */ + if (reql < msg_head_sz) + goto incomplete; - if (i == msg_head_sz) { - /* malformed message */ - appctx->st0 = PEER_SESS_ST_ERRPROTO; - return -1; - } - end = msg_head + msg_head_sz; - cur = &msg_head[2]; - *msg_len = intdecode(&cur, end); - if (!cur) { - /* malformed message */ - appctx->st0 = PEER_SESS_ST_ERRPROTO; - return -1; - } + /* malformed message */ + TRACE_PROTO("malformed message: too large length encoding", PEERS_EV_UPDTMSG); + appctx->st0 = PEER_SESS_ST_ERRPROTO; + return -1; } + *totl += cur - msg_head; /* Read message content */ if (*msg_len) { @@ -1788,8 +1897,8 @@ static inline int peer_recv_msg(struct appctx *appctx, char *msg_head, size_t ms return 1; incomplete: - if (reql < 0) { - /* there was an error */ + if (reql < 0 || (si_oc(si)->flags & (CF_SHUTW|CF_SHUTW_NOW))) { + /* there was an error or the message was truncated */ appctx->st0 = PEER_SESS_ST_END; return -1; } @@ -2384,7 +2493,7 @@ static void peer_io_handler(struct appctx *appctx) uint32_t msg_len = 0; char *msg_cur = trash.area; char *msg_end = trash.area; - unsigned char msg_head[7]; + unsigned char msg_head[7]; // 2 + 5 for varint32 int totl = 0; prev_state = appctx->st0; @@ -2947,8 +3056,12 @@ static inline void flush_dcache(struct peer *peer) int i; struct dcache *dc = peer->dcache; - for (i = 0; i < dc->max_entries; i++) + for (i = 0; i < dc->max_entries; i++) { ebpt_delete(&dc->tx->entries[i]); + dc->tx->entries[i].key = NULL; + } + dc->tx->prev_lookup = NULL; + dc->tx->lru_key = 0; memset(dc->rx, 0, dc->max_entries * sizeof *dc->rx); } diff --git a/src/session.c b/src/session.c index b065ff3d2..710b6b6f6 100644 --- a/src/session.c +++ b/src/session.c @@ -99,6 +99,7 @@ void session_free(struct session *sess) void conn_session_free(struct connection *conn) { session_free(conn->owner); + conn->owner = NULL; } /* count a new session to keep frontend, listener and track stats up to date */ diff --git a/src/ssl_crtlist.c b/src/ssl_crtlist.c index 65ca2891d..f72f60ea9 100644 --- a/src/ssl_crtlist.c +++ b/src/ssl_crtlist.c @@ -447,7 +447,6 @@ int crtlist_parse_file(char *file, struct bind_conf *bind_conf, struct proxy *cu struct crtlist *newlist; struct crtlist_entry *entry = NULL; char thisline[CRT_LINESIZE]; - char path[MAXPATHLEN+1]; FILE *f; struct stat buf; int linenum = 0; @@ -470,7 +469,9 @@ int crtlist_parse_file(char *file, struct bind_conf *bind_conf, struct proxy *cu char *end; char *line = thisline; char *crt_path; + char path[MAXPATHLEN+1]; struct ckch_store *ckchs; + int found = 0; if (missing_lf != -1) { memprintf(err, "parsing [%s:%d]: Stray NUL character at position %d.\n", @@ -537,6 +538,7 @@ int crtlist_parse_file(char *file, struct bind_conf *bind_conf, struct proxy *cu ckchs = ckchs_lookup(crt_path); if (ckchs == NULL) { if (stat(crt_path, &buf) == 0) { + found++; ckchs = ckchs_load_cert_file(crt_path, err); if (ckchs == NULL) { @@ -550,7 +552,7 @@ int crtlist_parse_file(char *file, struct bind_conf *bind_conf, struct proxy *cu LIST_ADDQ(&newlist->ord_entries, &entry->by_crtlist); LIST_ADDQ(&ckchs->crtlist_entry, &entry->by_ckch_store); - } else { + } else if (global_ssl.extra_files & SSL_GF_BUNDLE) { /* If we didn't find the file, this could be a bundle, since 2.3 we don't support multiple certificate in the same OpenSSL store, so we @@ -564,40 +566,56 @@ int crtlist_parse_file(char *file, struct bind_conf *bind_conf, struct proxy *cu struct stat buf; int ret; - ret = snprintf(fp, sizeof(fp), "%s.%s", path, SSL_SOCK_KEYTYPE_NAMES[n]); + ret = snprintf(fp, sizeof(fp), "%s.%s", crt_path, SSL_SOCK_KEYTYPE_NAMES[n]); if (ret > sizeof(fp)) continue; ckchs = ckchs_lookup(fp); - if (!ckchs && stat(fp, &buf) == 0) { - - ckchs = ckchs_load_cert_file(fp, err); - if (ckchs == NULL) { - cfgerr |= ERR_ALERT | ERR_FATAL; - goto error; - } - - linenum++; /* we duplicate the line for this entry in the bundle */ - if (!entry_dup) { /* if the entry was used, duplicate one */ - linenum++; - entry_dup = crtlist_entry_dup(entry); - if (!entry_dup) { + if (!ckchs) { + if (stat(fp, &buf) == 0) { + ckchs = ckchs_load_cert_file(fp, err); + if (!ckchs) { cfgerr |= ERR_ALERT | ERR_FATAL; goto error; } - entry_dup->linenum = linenum; + } else { + continue; /* didn't find this extension, skip */ + } + } + found++; + linenum++; /* we duplicate the line for this entry in the bundle */ + if (!entry_dup) { /* if the entry was used, duplicate one */ + linenum++; + entry_dup = crtlist_entry_dup(entry); + if (!entry_dup) { + cfgerr |= ERR_ALERT | ERR_FATAL; + goto error; } + entry_dup->linenum = linenum; + } - entry_dup->node.key = ckchs; - entry_dup->crtlist = newlist; - ebpt_insert(&newlist->entries, &entry_dup->node); - LIST_ADDQ(&newlist->ord_entries, &entry_dup->by_crtlist); - LIST_ADDQ(&ckchs->crtlist_entry, &entry_dup->by_ckch_store); + entry_dup->node.key = ckchs; + entry_dup->crtlist = newlist; + ebpt_insert(&newlist->entries, &entry_dup->node); + LIST_ADDQ(&newlist->ord_entries, &entry_dup->by_crtlist); + LIST_ADDQ(&ckchs->crtlist_entry, &entry_dup->by_ckch_store); - entry_dup = NULL; /* the entry was used, we need a new one next round */ - } + entry_dup = NULL; /* the entry was used, we need a new one next round */ } } + if (!found) { + memprintf(err, "%sunable to stat SSL certificate from file '%s' : %s.\n", + err && *err ? *err : "", crt_path, strerror(errno)); + cfgerr |= ERR_ALERT | ERR_FATAL; + } + + } else { + entry->node.key = ckchs; + entry->crtlist = newlist; + ebpt_insert(&newlist->entries, &entry->node); + LIST_ADDQ(&newlist->ord_entries, &entry->by_crtlist); + LIST_ADDQ(&ckchs->crtlist_entry, &entry->by_ckch_store); + found++; } entry = NULL; } diff --git a/src/ssl_sock.c b/src/ssl_sock.c index 6f73a3175..0490b2aef 100644 --- a/src/ssl_sock.c +++ b/src/ssl_sock.c @@ -2993,7 +2993,7 @@ static int ssl_sock_load_dh_params(SSL_CTX *ctx, const struct cert_key_and_chain /* Clear openssl global errors stack */ ERR_clear_error(); - if (global_ssl.default_dh_param <= 1024) { + if (global_ssl.default_dh_param && global_ssl.default_dh_param <= 1024) { /* we are limited to DH parameter of 1024 bits anyway */ if (local_dh_1024 == NULL) local_dh_1024 = ssl_get_dh_1024(); @@ -3454,24 +3454,24 @@ int ssl_sock_load_cert_list_file(char *file, int dir, struct bind_conf *bind_con int ssl_sock_load_cert(char *path, struct bind_conf *bind_conf, char **err) { struct stat buf; - char fp[MAXPATHLEN+1]; int cfgerr = 0; struct ckch_store *ckchs; struct ckch_inst *ckch_inst = NULL; + int found = 0; /* did we found a file to load ? */ if ((ckchs = ckchs_lookup(path))) { /* we found the ckchs in the tree, we can use it directly */ - return ssl_sock_load_ckchs(path, ckchs, bind_conf, NULL, NULL, 0, &ckch_inst, err); - } - if (stat(path, &buf) == 0) { + cfgerr |= ssl_sock_load_ckchs(path, ckchs, bind_conf, NULL, NULL, 0, &ckch_inst, err); + found++; + } else if (stat(path, &buf) == 0) { + found++; if (S_ISDIR(buf.st_mode) == 0) { ckchs = ckchs_load_cert_file(path, err); if (!ckchs) - return ERR_ALERT | ERR_FATAL; - - return ssl_sock_load_ckchs(path, ckchs, bind_conf, NULL, NULL, 0, &ckch_inst, err); + cfgerr |= ERR_ALERT | ERR_FATAL; + cfgerr |= ssl_sock_load_ckchs(path, ckchs, bind_conf, NULL, NULL, 0, &ckch_inst, err); } else { - return ssl_sock_load_cert_list_file(path, 1, bind_conf, bind_conf->frontend, err); + cfgerr |= ssl_sock_load_cert_list_file(path, 1, bind_conf, bind_conf->frontend, err); } } else { /* stat failed, could be a bundle */ @@ -3490,21 +3490,25 @@ int ssl_sock_load_cert(char *path, struct bind_conf *bind_conf, char **err) if ((ckchs = ckchs_lookup(fp))) { cfgerr |= ssl_sock_load_ckchs(fp, ckchs, bind_conf, NULL, NULL, 0, &ckch_inst, err); + found++; } else { if (stat(fp, &buf) == 0) { + found++; ckchs = ckchs_load_cert_file(fp, err); if (!ckchs) - return ERR_ALERT | ERR_FATAL; + cfgerr |= ERR_ALERT | ERR_FATAL; cfgerr |= ssl_sock_load_ckchs(fp, ckchs, bind_conf, NULL, NULL, 0, &ckch_inst, err); } } } - } else { - memprintf(err, "%sunable to stat SSL certificate from file '%s' : %s.\n", - err && *err ? *err : "", fp, strerror(errno)); - cfgerr |= ERR_ALERT | ERR_FATAL; + } } + if (!found) { + memprintf(err, "%sunable to stat SSL certificate from file '%s' : %s.\n", + err && *err ? *err : "", path, strerror(errno)); + cfgerr |= ERR_ALERT | ERR_FATAL; + } return cfgerr; } @@ -6187,7 +6191,7 @@ int ssl_load_global_issuer_from_BIO(BIO *in, char *fp, char **err) struct issuer_chain *issuer = NULL; akid = X509_get_ext_d2i(cert, NID_authority_key_identifier, NULL, NULL); - if (akid) { + if (akid && akid->keyid) { struct eb64_node *node; u64 hk; hk = XXH64(ASN1_STRING_get0_data(akid->keyid), ASN1_STRING_length(akid->keyid), 0); diff --git a/src/stats.c b/src/stats.c index 762672f45..ad92d7159 100644 --- a/src/stats.c +++ b/src/stats.c @@ -4548,6 +4548,23 @@ static int allocate_stats_dns_postcheck(void) REGISTER_CONFIG_POSTPARSER("allocate-stats-dns", allocate_stats_dns_postcheck); +static void deinit_stats(void) +{ + int domains[] = { STATS_DOMAIN_PROXY, STATS_DOMAIN_DNS }, i; + + for (i = 0; i < STATS_DOMAIN_COUNT; ++i) { + const int domain = domains[i]; + + if (stat_l[domain]) + free(stat_l[domain]); + + if (stat_f[domain]) + free(stat_f[domain]); + } +} + +REGISTER_POST_DEINIT(deinit_stats); + /* register cli keywords */ static struct cli_kw_list cli_kws = {{ },{ { { "clear", "counters", NULL }, "clear counters : clear max statistics counters (add 'all' for all counters)", cli_parse_clear_counters, NULL, NULL }, diff --git a/src/tcpcheck.c b/src/tcpcheck.c index 3142e7868..accc699ff 100644 --- a/src/tcpcheck.c +++ b/src/tcpcheck.c @@ -993,6 +993,10 @@ enum tcpcheck_eval_ret tcpcheck_eval_connect(struct check *check, struct tcpchec * 3: release and replace the old one on success */ + /* Always release input and output buffer when a new connect is evaluated */ + check_release_buf(check, &check->bi); + check_release_buf(check, &check->bo); + /* 2- prepare new connection */ cs = cs_new(NULL, (s ? &s->obj_type : &proxy->obj_type)); if (!cs) { @@ -1222,9 +1226,23 @@ enum tcpcheck_eval_ret tcpcheck_eval_send(struct check *check, struct tcpcheck_r struct buffer *tmp = NULL; struct htx *htx = NULL; - /* reset the read & write buffer */ - b_reset(&check->bi); - b_reset(&check->bo); + if (check->state & CHK_ST_OUT_ALLOC) { + ret = TCPCHK_EVAL_WAIT; + goto out; + } + + if (!check_get_buf(check, &check->bo)) { + check->state |= CHK_ST_OUT_ALLOC; + ret = TCPCHK_EVAL_WAIT; + goto out; + } + + /* Data already pending in the output buffer, send them now */ + if (b_data(&check->bo)) + goto do_send; + + /* Always release input buffer when a new send is evaluated */ + check_release_buf(check, &check->bi); switch (send->type) { case TCPCHK_SEND_STRING: @@ -1352,7 +1370,7 @@ enum tcpcheck_eval_ret tcpcheck_eval_send(struct check *check, struct tcpcheck_r goto out; }; - + do_send: if (conn->mux->snd_buf(cs, &check->bo, (IS_HTX_CONN(conn) ? (htxbuf(&check->bo))->data: b_data(&check->bo)), 0) <= 0) { if ((conn->flags & CO_FL_ERROR) || (cs->flags & CS_FL_ERROR)) { @@ -1368,6 +1386,8 @@ enum tcpcheck_eval_ret tcpcheck_eval_send(struct check *check, struct tcpcheck_r out: free_trash_chunk(tmp); + if (!b_data(&check->bo) || ret == TCPCHK_EVAL_STOP) + check_release_buf(check, &check->bo); return ret; error_htx: @@ -1410,6 +1430,14 @@ enum tcpcheck_eval_ret tcpcheck_eval_recv(struct check *check, struct tcpcheck_r if (cs->flags & CS_FL_EOS) goto end_recv; + if (check->state & CHK_ST_IN_ALLOC) + goto wait_more_data; + + if (!check_get_buf(check, &check->bi)) { + check->state |= CHK_ST_IN_ALLOC; + goto wait_more_data; + } + /* errors on the connection and the conn-stream were already checked */ /* prepare to detect if the mux needs more room */ @@ -1457,6 +1485,8 @@ enum tcpcheck_eval_ret tcpcheck_eval_recv(struct check *check, struct tcpcheck_r } out: + if (!b_data(&check->bi) || ret == TCPCHK_EVAL_STOP) + check_release_buf(check, &check->bi); return ret; stop: @@ -1927,7 +1957,7 @@ int tcpcheck_main(struct check *check) struct conn_stream *cs = check->cs; struct connection *conn = cs_conn(cs); int must_read = 1, last_read = 0; - int ret, retcode = 0; + int retcode = 0; enum tcpcheck_eval_ret eval_ret; /* here, we know that the check is complete or that it failed */ @@ -1968,34 +1998,12 @@ int tcpcheck_main(struct check *check) rule = LIST_NEXT(&check->current_step->list, typeof(rule), list); } - /* 3- check for pending outgoing data. It only happens during - * TCPCHK_ACT_SEND. */ - else if (check->current_step && check->current_step->action == TCPCHK_ACT_SEND) { - if (b_data(&check->bo)) { - /* We're already waiting to be able to send, give up */ - if (check->wait_list.events & SUB_RETRY_SEND) - goto out; - - ret = conn->mux->snd_buf(cs, &check->bo, - (IS_HTX_CONN(conn) ? (htxbuf(&check->bo))->data: b_data(&check->bo)), 0); - if (ret <= 0) { - if ((conn->flags & CO_FL_ERROR) || (cs->flags & CS_FL_ERROR)) - goto out_end_tcpcheck; - } - if ((IS_HTX_CONN(conn) && !htx_is_empty(htxbuf(&check->bo))) || b_data(&check->bo)) { - conn->mux->subscribe(cs, SUB_RETRY_SEND, &check->wait_list); - goto out; - } - } - rule = LIST_NEXT(&check->current_step->list, typeof(rule), list); - } - - /* 4- check if a rule must be resume. It happens if check->current_step + /* 3- check if a rule must be resume. It happens if check->current_step * is defined. */ else if (check->current_step) rule = check->current_step; - /* 5- It is the first evaluation. We must create a session and preset + /* 4- It is the first evaluation. We must create a session and preset * tcp-check variables */ else { struct tcpcheck_var *var; @@ -2133,6 +2141,10 @@ int tcpcheck_main(struct check *check) if ((conn && conn->flags & CO_FL_ERROR) || (cs && cs->flags & CS_FL_ERROR)) chk_report_conn_err(check, errno, 0); + /* the tcpcheck is finished, release in/out buffer now */ + check_release_buf(check, &check->bi); + check_release_buf(check, &check->bo); + out: return retcode; } @@ -2282,6 +2294,16 @@ struct tcpcheck_rule *parse_tcpcheck_connect(char **args, int cur_arg, struct pr memprintf(errmsg, "'%s' : unknown MUX protocol '%s'.", args[cur_arg], args[cur_arg+1]); goto error; } + + if (strcmp(args[0], "tcp-check") == 0 && mux_proto->mode != PROTO_MODE_TCP) { + memprintf(errmsg, "'%s' : invalid MUX protocol '%s' for tcp-check", args[cur_arg], args[cur_arg+1]); + goto error; + } + else if (strcmp(args[0], "http-check") == 0 && mux_proto->mode != PROTO_MODE_HTTP) { + memprintf(errmsg, "'%s' : invalid MUX protocol '%s' for http-check", args[cur_arg], args[cur_arg+1]); + goto error; + } + cur_arg++; } else if (strcmp(args[cur_arg], "comment") == 0) {