Import upstream version 0.1+git20190916.47225d0, md5 2f29086921ea2de06262a22a3f7ff832
Debian Janitor
4 years ago
13 | 13 | - LUA="lua 5.3" ZLIB=lzlib |
14 | 14 | - LUA="lua 5.3" ZLIB=lua-zlib |
15 | 15 | - LUA="lua 5.3" COMPAT53=no |
16 | - LUA="lua 5.3" LUA_CFLAGS="-DLUA_INT_TYPE=LUA_INT_INT" | |
16 | 17 | - LUA="luajit @" |
17 | 18 | - LUA="luajit @" ZLIB=lzlib |
18 | 19 | - LUA="luajit @" ZLIB=lua-zlib |
29 | 30 | |
30 | 31 | before_install: |
31 | 32 | - pip install hererocks |
32 | - hererocks ~/hererocks -r^ --$LUA # Install latest LuaRocks version | |
33 | # plus the Lua version for this build job | |
34 | # into 'here' subdirectory | |
35 | - export PATH=$PATH:~/hererocks/bin # Add directory with all installed binaries to PATH | |
36 | - eval `luarocks path --bin` | |
33 | - hererocks ~/hererocks -r^ --$LUA --cflags=$LUA_CFLAGS | |
34 | - export PATH=$PATH:~/hererocks/bin | |
35 | - eval $(luarocks path --bin) | |
37 | 36 | - luarocks install luacheck |
37 | - luarocks install https://raw.githubusercontent.com/andremm/typedlua/master/typedlua-scm-1.rockspec | |
38 | 38 | - luarocks install luacov-coveralls |
39 | 39 | - luarocks install busted |
40 | 40 | |
46 | 46 | |
47 | 47 | script: |
48 | 48 | - luacheck . |
49 | - tlc -o /dev/null spec/require-all.lua | |
49 | 50 | - busted -c |
50 | 51 | |
51 | 52 | after_success: |
0 | Hello and thank-you for considering contributing to lua-http! | |
1 | ||
2 | If you haven't already, see the [getting started](https://github.com/daurnimator/lua-http#getting-started) section of the main readme. | |
3 | ||
4 | # Contributing | |
5 | ||
6 | To submit your code for inclusion, please [send a "pull request" using github](https://github.com/daurnimator/lua-http/pulls). | |
7 | For a speedy approval, please: | |
8 | ||
9 | - Follow the [coding style](#coding-style) | |
10 | - Run [`luacheck`](https://github.com/mpeterv/luacheck) to lint your code | |
11 | - Include [tests](#tests) | |
12 | - Bug fixes should add a test exhibiting the issue | |
13 | - Enhancements must add tests for the new feature | |
14 | - [Sign off](#dco) your code | |
15 | ||
16 | ||
17 | If you are requested by a project maintainer to fix an issue with your pull request, please edit your existing commits (using e.g. `git commit --amend` or [`git fixup`](https://github.com/hashbang/dotfiles/blob/master/git/.local/bin/git-fixup)) rather than pushing new commits on top of the old ones. | |
18 | ||
19 | All commits *should* have the project in an operational state. | |
20 | ||
21 | ||
22 | # Coding Style | |
23 | ||
24 | When editing an existing file, please follow the coding style used in that file. | |
25 | If not clear from context or if you're starting a new file: | |
26 | ||
27 | - Indent with tabs | |
28 | - Alignment should not be done; when unavoidable, align with spaces | |
29 | - Remove any trailing whitespace (unless whitespace is significant as it can be in e.g. markdown) | |
30 | - Things (e.g. table fields) should be ordered by: | |
31 | 1. Required vs optional | |
32 | 2. Importance | |
33 | 3. Lexographically (alphabetically) | |
34 | ||
35 | ||
36 | ## Lua conventions | |
37 | ||
38 | - Add a `__name` field to metatables | |
39 | - Use a separate table than the metatable itself for `__index` | |
40 | - Single-line table definitions should use commas (`,`) for delimiting elements | |
41 | - Multi-line table definitions should use semicolons (`;`) for delimiting elements | |
42 | ||
43 | ||
44 | ## Markdown conventions | |
45 | ||
46 | - Files should have two blank lines at the end of a section | |
47 | - Repository information files (e.g. README.md/CONTRIBUTING.md) should use github compatible markdown features | |
48 | - Files used to generate documentation can use any `pandoc` features they want | |
49 | ||
50 | ||
51 | # Tests | |
52 | ||
53 | The project has a test suite using the [`busted`](https://github.com/Olivine-Labs/busted) framework. | |
54 | Coverage is measured using [`luacov`](https://github.com/keplerproject/luacov). | |
55 | ||
56 | Tests can be found in the `spec/` directory at the root of the repository. Each source file should have its own file full of tests. | |
57 | ||
58 | Tests should avoid running any external processes. Use `cqueues` to start up various test servers and clients in-process. | |
59 | ||
60 | A successful test should close any file handles and sockets to avoid resource exhaustion. | |
61 | ||
62 | ||
63 | # Legal | |
64 | ||
65 | All code in the repository is covered by `LICENSE.md`. | |
66 | ||
67 | ## DCO | |
68 | ||
69 | A git `Signed-off-by` statement in a commit message in this repository refers to the [Developer Certificate of Origin](https://developercertificate.org/) (DCO). | |
70 | By signing off your commit you are making a legal statement that the work is contributed under the license of this project. | |
71 | You can add the statement to your commit by passing `-s` to `git commit` | |
72 | ||
73 | ||
74 | # Security | |
75 | ||
76 | If you find a security vulnerabilities in the project and do not wish to file it publically on the [issue tracker](https://github.com/daurnimator/lua-http/issues) then you may email [lua-http-security@daurnimator.com](mailto:lua-http-security@daurnimator.com). You may encrypt your mail using PGP to the key with fingerprint [954A3772D62EF90E4B31FBC6C91A9911192C187A](https://daurnimator.com/post/109075829529/gpg-key). |
0 | 0 | The MIT License (MIT) |
1 | 1 | |
2 | Copyright (c) 2015-2016 Daurnimator | |
2 | Copyright (c) 2015-2019 Daurnimator | |
3 | 3 | |
4 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | 5 | of this software and associated documentation files (the "Software"), to deal |
0 | 0.3 - 2019-02-13 | |
1 | ||
2 | - Fix incorrect Sec-WebSocket-Protocol negotiation | |
3 | - Fix incorrect timeout handling in `websocket:receive()` | |
4 | - Add workaround to allow being required in openresty (#98) | |
5 | - Add http.tls.old_cipher_list (#112) | |
6 | - Add http.cookie module (#117) | |
7 | - Improvements to http.hsts module (#119) | |
8 | - Add `options` argument form to `stream:write_body_from_file()` (#125) | |
9 | ||
10 | ||
11 | 0.2 - 2017-05-28 | |
12 | ||
13 | - Remove broken http.server `.client_timeout` option (replaced with `.connection_setup_timeout`) | |
14 | - Fix http1 pipelining locks | |
15 | - Miscellaneous http2 fixes | |
16 | - HTTP 2 streams no longer have to be used in order of creation | |
17 | - No longer raise decode errors in hpack module | |
18 | - Fix `hpack:lookup_index()` to treat static entries without values as empty string | |
19 | - Fix HTTP 1 client in locales with non-"." decimal separator | |
20 | - Add h1_stream.max_header_lines property to prevent infinite list of headers | |
21 | - New '.bind' option for requests and http.client module | |
22 | ||
23 | ||
0 | 24 | 0.1 - 2016-12-17 |
1 | 25 | |
2 | 26 | - Support for HTTP versions 1, 1.1 and 2 |
4 | 4 | - Optionally asynchronous (including DNS lookups and TLS) |
5 | 5 | - Supports HTTP(S) version 1.0, 1.1 and 2 |
6 | 6 | - Functionality for both client and server |
7 | - Cookie Management | |
7 | 8 | - Websockets |
8 | 9 | - Compatible with Lua 5.1, 5.2, 5.3 and [LuaJIT](http://luajit.org/) |
9 | 10 | |
17 | 18 | |
18 | 19 | [![Build Status](https://travis-ci.org/daurnimator/lua-http.svg)](https://travis-ci.org/daurnimator/lua-http) |
19 | 20 | [![Coverage Status](https://coveralls.io/repos/daurnimator/lua-http/badge.svg?branch=master&service=github)](https://coveralls.io/github/daurnimator/lua-http?branch=master) |
21 | [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/108/badge)](https://bestpractices.coreinfrastructure.org/projects/108) | |
20 | 22 | |
21 | 23 | |
22 | 24 | # Installation |
28 | 30 | |
29 | 31 | ## Dependencies |
30 | 32 | |
31 | - [cqueues](http://25thandclement.com/~william/projects/cqueues.html) >= 20161214 | |
33 | - [cqueues](http://25thandclement.com/~william/projects/cqueues.html) >= 20161214 (Note: cqueues currently doesn't support Microsoft Windows operating systems) | |
32 | 34 | - [luaossl](http://25thandclement.com/~william/projects/luaossl.html) >= 20161208 |
33 | 35 | - [basexx](https://github.com/aiq/basexx/) >= 0.2.0 |
34 | 36 | - [lpeg](http://www.inf.puc-rio.br/~roberto/lpeg/lpeg.html) |
35 | - [lpeg_patterns](https://github.com/daurnimator/lpeg_patterns) >= 0.3 | |
37 | - [lpeg_patterns](https://github.com/daurnimator/lpeg_patterns) >= 0.5 | |
38 | - [binaryheap](https://github.com/Tieske/binaryheap.lua) >= 0.3 | |
36 | 39 | - [fifo](https://github.com/daurnimator/fifo.lua) |
37 | 40 | |
38 | 41 | To use gzip compression you need **one** of: |
39 | 42 | |
40 | 43 | - [lzlib](https://github.com/LuaDist/lzlib) or [lua-zlib](https://github.com/brimworks/lua-zlib) |
44 | ||
45 | To check cookies against a public suffix list: | |
46 | ||
47 | - [lua-psl](https://github.com/daurnimator/lua-psl) | |
41 | 48 | |
42 | 49 | If using lua < 5.3 you will need |
43 | 50 |
4 | 4 | MODULES = \ |
5 | 5 | http.bit.md \ |
6 | 6 | http.client.md \ |
7 | http.cookie.md \ | |
7 | 8 | http.h1_connection.md \ |
8 | 9 | http.h1_reason_phrases.md \ |
9 | 10 | http.h1_stream.md \ |
0 | 0 | ## connection |
1 | 1 | |
2 | lua-http has separate libraries for both HTTP 1 and HTTP 2 type communications. Future protocols will also be supported and exposed as new modules. As HTTP 1 and 2 share common concepts at the connection and stream level, the _[connection](#connection)_ and _[stream](#stream)_ modules have been written to contain common interfaces wherever possible. All _[connection](#connection)_ types expose the following fields: | |
2 | A connection encapsulates a socket and provides protocol specific operations. A connection may have [*streams*](#stream) which encapsulate the requests/responses happening over a conenction. Alternatively, you can ignore streams entirely and use low level protocol specific operations to read and write to the socket. | |
3 | 3 | |
4 | All *connection* types expose the following fields: | |
4 | 5 | |
5 | 6 | ### `connection.type` <!-- --> {#connection.type} |
6 | 7 | |
7 | 8 | The mode of use for the connection object. Valid values are: |
8 | 9 | |
9 | - `"client"` - Connects to a remote URI | |
10 | - `"server"` - Listens for connection on a local URI | |
10 | - `"client"`: Acts as a client; this connection type is used by entities who want to make requests | |
11 | - `"server"`: Acts as a server; this conenction type is used by entities who want to respond to requests | |
11 | 12 | |
12 | 13 | |
13 | 14 | ### `connection.version` <!-- --> {#connection.version} |
14 | 15 | |
15 | 16 | The HTTP version number of the connection as a number. |
17 | ||
18 | ||
19 | ### `connection:pollfd()` <!-- --> {#connection:pollfd} | |
20 | ||
21 | ||
22 | ### `connection:events()` <!-- --> {#connection:events} | |
23 | ||
24 | ||
25 | ### `connection:timeout()` <!-- --> {#connection:timeout} | |
16 | 26 | |
17 | 27 | |
18 | 28 | ### `connection:connect(timeout)` <!-- --> {#connection:connect} |
39 | 49 | |
40 | 50 | ### `connection:flush(timeout)` <!-- --> {#connection:flush} |
41 | 51 | |
42 | Flushes all buffered outgoing data on the socket. Returns `true` on success. Returns `false` and the error if the socket fails to flush. | |
52 | Flushes buffered outgoing data on the socket to the operating system. Returns `true` on success. On error, returns `nil`, an error message and an error number. | |
43 | 53 | |
44 | 54 | |
45 | 55 | ### `connection:shutdown()` <!-- --> {#connection:shutdown} |
88 | 88 | Writes the string `str` to the stream and ends the stream. On error, returns `nil`, an error message and an error number. |
89 | 89 | |
90 | 90 | |
91 | ### `stream:write_body_from_file(file, timeout)` <!-- --> {#stream:write_body_from_file} | |
91 | ### `stream:write_body_from_file(options|file, timeout)` <!-- --> {#stream:write_body_from_file} | |
92 | ||
93 | - `options` is a table containing: | |
94 | - `.file` (file) | |
95 | - `.count` (positive integer): number of bytes of `file` to write | |
96 | defaults to infinity (the whole file will be written) | |
92 | 97 | |
93 | 98 | Writes the contents of file `file` to the stream and ends the stream. `file` will not be automatically seeked, so ensure it is at the correct offset before calling. On error, returns `nil`, an error message and an error number. |
94 | 99 |
0 | 0 | # Interfaces |
1 | 1 | |
2 | lua-http has separate modules for HTTP 1 vs HTTP 2 protocols, yet the different versions share many common concepts. lua-http provides a common interface for operations that make sense for both protocol versions (as well as any future developments). | |
3 | ||
2 | 4 | The following sections outline the interfaces exposed by the lua-http library. |
98 | 98 | - Some HTTP 2 operations return/throw special [http 2 error objects](#http.h2_error). |
99 | 99 | |
100 | 100 | |
101 | ### Timeouts | |
102 | ||
103 | All operations that may block the current thread take a `timeout` argument. | |
104 | This argument is always the number of seconds to allow before returning `nil, err_msg, ETIMEDOUT` where `err_msg` is a localised error message such as `"connection timed out"`. | |
105 | ||
106 | ||
101 | 107 | ## Terminology |
102 | 108 | |
103 | 109 | Much lua-http terminology is borrowed from HTTP 2. |
36 | 36 | e.g. `"80"` or `80` |
37 | 37 | - `path` (string): path to connect to (UNIX sockets) |
38 | 38 | - `v6only` (boolean, optional): if the `IPV6_V6ONLY` flag should be set on the underlying socket. |
39 | - `bind` (string, optional): the local outgoing address and optionally port to bind in the form of `"address[:port]"`, IPv6 addresses may be specified via square bracket notation. e.g. `"127.0.0.1"`, `"127.0.0.1:50000"`, `"[::1]:30000"`. | |
39 | 40 | - `timeout` (optional) is the maximum amount of time (in seconds) to allow for connection to be established. |
40 | 41 | This includes time for DNS lookup, connection, TLS negotiation (if TLS enabled) and in the case of HTTP 2: settings exchange. |
41 | 42 |
0 | ## http.cookie | |
1 | ||
2 | A module for working with cookies. | |
3 | ||
4 | ### `bake(name, value, expiry_time, domain, path, secure_only, http_only, same_site)` <!-- --> {#http.cookie.bake} | |
5 | ||
6 | Returns a string suitable for use in a `Set-Cookie` header with the passed parameters. | |
7 | ||
8 | ||
9 | ### `parse_cookie(cookie)` <!-- --> {#http.cookie.parse_cookie} | |
10 | ||
11 | Parses the `Cookie` header contents `cookie`. | |
12 | ||
13 | Returns a table containing `name` and `value` pairs as strings. | |
14 | ||
15 | ||
16 | ### `parse_cookies(req_headers)` <!-- --> {#http.cookie.parse_cookies} | |
17 | ||
18 | Parses all `Cookie` headers in the [*http.headers*](#http.headers) object `req_headers`. | |
19 | ||
20 | Returns a table containing `name` and `value` pairs as strings. | |
21 | ||
22 | ||
23 | ### `parse_setcookie(setcookie)` <!-- --> {#http.cookie.parse_setcookie} | |
24 | ||
25 | Parses the `Set-Cookie` header contents `setcookie`. | |
26 | ||
27 | Returns `name`, `value` and `params` where: | |
28 | ||
29 | - `name` is a string containing the cookie name | |
30 | - `value` is a string containing the cookie value | |
31 | - `params` is the a table where the keys are cookie attribute names and values are cookie attribute values | |
32 | ||
33 | ||
34 | ### `new_store()` <!-- --> {#http.cookie.new_store} | |
35 | ||
36 | Creates a new cookie store. | |
37 | ||
38 | Cookies are unique for a tuple of domain, path and name; | |
39 | although multiple cookies with the same name may exist in a request due to overlapping paths or domains. | |
40 | ||
41 | ||
42 | ### `store.psl` <!-- --> {#http.cookie.store.psl} | |
43 | ||
44 | A [lua-psl](https://github.com/daurnimator/lua-psl) object to use for checking against the Public Suffix List. | |
45 | Set the field to `false` to skip checking the suffix list. | |
46 | ||
47 | Defaults to the [latest](https://rockdaboot.github.io/libpsl/libpsl-Public-Suffix-List-functions.html#psl-latest) PSL on the system. If lua-psl is not installed then it will be `nil`. | |
48 | ||
49 | ||
50 | ### `store.time()` <!-- --> {#http.cookie.store.time} | |
51 | ||
52 | A function used by the `store` to get the current time for expiries and such. | |
53 | ||
54 | Defaults to a function based on [`os.time`](https://www.lua.org/manual/5.3/manual.html#pdf-os.time). | |
55 | ||
56 | ||
57 | ### `store.max_cookie_length` <!-- --> {#http.cookie.store.max_cookie_length} | |
58 | ||
59 | The maximum length (in bytes) of cookies in the store; this value is also used as default maximum cookie length for `:lookup()`. | |
60 | Decreasing this value will only prevent new cookies from being added, it will not remove old cookies. | |
61 | ||
62 | Defaults to infinity (no maximum size). | |
63 | ||
64 | ||
65 | ### `store.max_cookies` <!-- --> {#http.cookie.store.max_cookies} | |
66 | ||
67 | The maximum number of cookies allowed in the `store`. | |
68 | Decreasing this value will only prevent new cookies from being added, it will not remove old cookies. | |
69 | ||
70 | Defaults to infinity (any number of cookies is allowed). | |
71 | ||
72 | ||
73 | ### `store.max_cookies_per_domain` <!-- --> {#http.cookie.store.max_cookies_per_domain} | |
74 | ||
75 | The maximum number of cookies allowed in the `store` per domain. | |
76 | Decreasing this value will only prevent new cookies from being added, it will not remove old cookies. | |
77 | ||
78 | Defaults to infinity (any number of cookies is allowed). | |
79 | ||
80 | ||
81 | ### `store:store(req_domain, req_path, req_is_http, req_is_secure, req_site_for_cookies, name, value, params)` <!-- --> {#http.cookie.store:store} | |
82 | ||
83 | Attempts to add a cookie to the `store`. | |
84 | ||
85 | - `req_domain` is the domain that the cookie was obtained from | |
86 | - `req_path` is the path that the cookie was obtained from | |
87 | - `req_is_http` is a boolean flag indicating if the cookie was obtained from a "non-HTTP" API | |
88 | - `req_is_secure` is a boolean flag indicating if the cookie was obtained from a "secure" protocol | |
89 | - `req_site_for_cookies` is a string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown. | |
90 | - `name` is a string containing the cookie name | |
91 | - `value` is a string containing the cookie value | |
92 | - `params` is the a table where the keys are cookie attribute names and values are cookie attribute values | |
93 | ||
94 | Returns a boolean indicating if a cookie was stored. | |
95 | ||
96 | ||
97 | ### `store:store_from_request(req_headers, resp_headers, req_host, req_site_for_cookies)` <!-- --> {#http.cookie.store:store_from_request} | |
98 | ||
99 | Attempt to store any cookies found in the response headers. | |
100 | ||
101 | - `req_headers` is the [*http.headers*](#http.headers) object for the outgoing request | |
102 | - `resp_headers` is the [*http.headers*](#http.headers) object received in response | |
103 | - `req_host` is the host that your query was directed at (only used if `req_headers` is missing a `Host` header) | |
104 | - `req_site_for_cookies` is a string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown. | |
105 | ||
106 | ||
107 | ### `store:get(domain, path, name)` <!-- --> {#http.cookie.store:get} | |
108 | ||
109 | Returns the cookie value for the cookie stored for the passed `domain`, `path` and `name`. | |
110 | ||
111 | ||
112 | ### `store:remove(domain, path, name)` <!-- --> {#http.cookie.store:remove} | |
113 | ||
114 | Deletes the cookie stored for the passed `domain`, `path` and `name`. | |
115 | ||
116 | If `name` is `nil` or not passed then all cookies for the `domain` and `path` are removed. | |
117 | ||
118 | If `path` is `nil` or not passed (in addition to `name`) then all cookies for the `domain` are removed. | |
119 | ||
120 | ||
121 | ### `store:lookup(domain, path, is_http, is_secure, is_safe_method, site_for_cookies, is_top_level, max_cookie_length)` <!-- --> {#http.cookie.store:lookup} | |
122 | ||
123 | Finds cookies visible to suitable for passing to an entity. | |
124 | ||
125 | - `domain` is the domain that will be sent the cookie | |
126 | - `path` is the path that will be sent the cookie | |
127 | - `is_http` is a boolean flag indicating if the destination is a "non-HTTP" API | |
128 | - `is_secure` is a boolean flag indicating if the destination will be communicated with over a "secure" protocol | |
129 | - `is_safe_method` is a boolean flag indicating if the cookie will be sent via a safe HTTP method (See also [http.util.is_safe_method](#http.util.is_safe_method)) | |
130 | - `site_for_cookies` is a string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown. | |
131 | - `is_top_level` is a boolean flag indicating if this request is a "top level" request (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)) | |
132 | - `max_cookie_length` is the maximum cookie length to allow (See also [`store.max_cookie_length`](#http.cookie.store.max_cookie_length)) | |
133 | ||
134 | Returns a string suitable for use in a `Cookie` header. | |
135 | ||
136 | ||
137 | ### `store:lookup_for_request(headers, host, site_for_cookies, is_top_level, max_cookie_length)` <!-- --> {#http.cookie.store:lookup_for_request} | |
138 | ||
139 | Finds cookies suitable for adding to a request. | |
140 | ||
141 | - `headers` is the [*http.headers*](#http.headers) object for the outgoing request | |
142 | - `host` is the host that your query was directed at (only used if `headers` is missing a `Host` header) | |
143 | - `site_for_cookies` is a string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown. | |
144 | - `is_top_level` is a boolean flag indicating if this request is a "top level" request (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)) | |
145 | - `max_cookie_length` is the maximum cookie length to allow (See also [`store.max_cookie_length`](#http.cookie.store.max_cookie_length)) | |
146 | ||
147 | Returns a string suitable for use in a `Cookie` header. | |
148 | ||
149 | ||
150 | ### `store:clean_due()` <!-- --> {#http.cookie.store:clean_due} | |
151 | ||
152 | Returns the number of seconds until the next cookie in the `store` expires. | |
153 | ||
154 | ||
155 | ### `store:clean()` <!-- --> {#http.cookie.store:clean} | |
156 | ||
157 | Remove all expired cookies from the `store`. | |
158 | ||
159 | ||
160 | ### `store:load_from_file(file)` <!-- --> {#http.cookie.store:load_from_file} | |
161 | ||
162 | Loads cookie data from the file object `file` into `store`. | |
163 | The file should be in the Netscape Cookiejar format. | |
164 | Invalid lines in the file are ignored. | |
165 | ||
166 | Returns `true` on success or passes along `nil, err, errno` if a `:read` call fails. | |
167 | ||
168 | ||
169 | ### `store:save_to_file(file)` <!-- --> {#http.cookie.store:save_to_file} | |
170 | ||
171 | Writes the cookie data from `store` into the file object `file` in the Netscape Cookiejar format. | |
172 | `file` is not `seek`-ed or truncated before writing. | |
173 | ||
174 | Returns `true` on success or passes along `nil, err, errno` if a `:write` call fails. |
14 | 14 | - `1.1` |
15 | 15 | |
16 | 16 | See [`connection.version`](#connection.version) |
17 | ||
18 | ||
19 | ### `h1_connection:pollfd()` <!-- --> {#http.h1_connection:pollfd} | |
20 | ||
21 | See [`connection:pollfd()`](#connection:pollfd) | |
22 | ||
23 | ||
24 | ### `h1_connection:events()` <!-- --> {#http.h1_connection:events} | |
25 | ||
26 | See [`connection:events()`](#connection:events) | |
27 | ||
28 | ||
29 | ### `h1_connection:timeout()` <!-- --> {#http.h1_connection:timeout} | |
30 | ||
31 | See [`connection:timeout()`](#connection:timeout) | |
17 | 32 | |
18 | 33 | |
19 | 34 | ### `h1_connection:connect(timeout)` <!-- --> {#http.h1_connection:connect} |
41 | 56 | See [`connection:flush(timeout)`](#connection:flush) |
42 | 57 | |
43 | 58 | |
44 | ### `h1_connection:shutdown()` <!-- --> {#http.h1_connection:shutdown} | |
59 | ### `h1_connection:shutdown(dir)` <!-- --> {#http.h1_connection:shutdown} | |
60 | ||
61 | Shut down is as graceful as possible: pipelined streams are [shutdown](#http.h1_stream:shutdown), then the underlying socket is shut down in the appropriate direction(s). | |
62 | ||
63 | `dir` is a string representing the direction of communication to shut down communication in. If it contains `"r"` it will shut down reading, if it contains `"w"` it will shut down writing. The default is `"rw"`, i.e. to shutdown communication in both directions. | |
45 | 64 | |
46 | 65 | See [`connection:shutdown()`](#connection:shutdown) |
47 | 66 | |
68 | 87 | See [`connection:onidle(new_handler)`](#connection:onidle) |
69 | 88 | |
70 | 89 | |
90 | ### `h1_connection:setmaxline(read_length)` <!-- --> {#http.h1_connection:setmaxline} | |
91 | ||
92 | Sets the maximum read buffer size (in bytes) to `read_length`. i.e. sets the maximum length lines (such as headers). | |
93 | ||
94 | The default comes from the underlying socket, which gets the (changable) cqueues default at time of construction. | |
95 | The default cqueues default is 4096 bytes. | |
96 | ||
97 | ||
71 | 98 | ### `h1_connection:clearerr(...)` <!-- --> {#http.h1_connection:clearerr} |
72 | 99 | |
73 | 100 | Clears errors to allow for further read or write operations on the connection. Returns the error number of existing errors. This function is used to recover from known errors. |
85 | 112 | |
86 | 113 | ### `h1_connection:read_request_line(timeout)` <!-- --> {#http.h1_connection:read_request_line} |
87 | 114 | |
88 | Reads a request line from the socket. Returns the request method, requested path and HTTP version for an incoming request. `:read_request_line()` yields until a `"\r\n"` terminated chunk is received, or `timeout` is exceeded. If the incoming chunk is not a valid HTTP request line, `nil` is returned. On error, returns `nil`, an error message and an error number. | |
115 | Reads a request line from the socket. Returns the request method, request target and HTTP version for an incoming request. `:read_request_line()` yields until a `"\r\n"` terminated chunk is received, or `timeout` is exceeded. If the incoming chunk is not a valid HTTP request line, `nil` is returned. On error, returns `nil`, an error message and an error number. | |
89 | 116 | |
90 | 117 | |
91 | 118 | ### `h1_connection:read_status_line(timeout)` <!-- --> {#http.h1_connection:read_status_line} |
118 | 145 | Reads the next available line of data from the request and returns the chunk and any chunk extensions. This function will yield until chunk size is received or `timeout` is exceeded. If the chunk size is indicated as `0` then `false` and any chunk extensions are returned. Returns `nil`, an error message and an error number if there was an error reading reading the chunk header or the socket. |
119 | 146 | |
120 | 147 | |
121 | ### `h1_connection:write_request_line(method, path, httpversion, timeout)` <!-- --> {#http.h1_connection:write_request_line} | |
148 | ### `h1_connection:write_request_line(method, target, httpversion, timeout)` <!-- --> {#http.h1_connection:write_request_line} | |
122 | 149 | |
123 | 150 | Writes the opening HTTP 1.x request line for a new request to the socket buffer. Yields until success or `timeout`. If the write fails, returns `nil`, an error message and an error number. |
124 | 151 | |
148 | 175 | |
149 | 176 | Writes a chunk of data to the socket. `chunk_ext` must be `nil` as chunk extensions are not supported. Will yield until complete or `timeout` is exceeded. Returns true on success. Returns `nil`, an error message and an error number if the write fails. |
150 | 177 | |
151 | *Note that `chunk` will not be flushed to the remote server until [`write_body_last_chunk`](#http.h1_connection:write_body_last_chunk) is called.* | |
152 | ||
153 | 178 | |
154 | 179 | ### `h1_connection:write_body_last_chunk(chunk_ext, timeout)` <!-- --> {#http.h1_connection:write_body_last_chunk} |
155 | 180 | |
156 | Writes the chunked body terminator `"0\r\n"` to the socket and flushes the socket output buffer. `chunk_ext` must be `nil` as chunk extensions are not supported. Will yield until complete or `timeout` is exceeded. Returns `nil`, an error message and an error number if the write fails. | |
181 | Writes the chunked body terminator `"0\r\n"` to the socket. `chunk_ext` must be `nil` as chunk extensions are not supported. Will yield until complete or `timeout` is exceeded. Returns `nil`, an error message and an error number if the write fails. | |
182 | ||
183 | *Note that the connection will not be immediately flushed to the remote server; normally this will occur when trailers are written.* | |
157 | 184 | |
158 | 185 | |
159 | 186 | ### `h1_connection:write_body_plain(body, timeout)` <!-- --> {#http.h1_connection:write_body_plain} |
3 | 3 | |
4 | 4 | The gzip transfer encoding is supported transparently. |
5 | 5 | |
6 | ### `h1_stream.connection` <!-- --> {#h1_stream.connection} | |
6 | ### `h1_stream.connection` <!-- --> {#http.h1_stream.connection} | |
7 | 7 | |
8 | 8 | See [`stream.connection`](#stream.connection) |
9 | 9 | |
10 | 10 | |
11 | ### `h1_stream:checktls()` <!-- --> {#h1_stream:checktls} | |
11 | ### `h1_stream.max_header_lines` <!-- --> {#http.h1_stream.max_header_lines} | |
12 | ||
13 | The maximum number of header lines to read. Default is `100`. | |
14 | ||
15 | ||
16 | ### `h1_stream:checktls()` <!-- --> {#http.h1_stream:checktls} | |
12 | 17 | |
13 | 18 | See [`stream:checktls()`](#stream:checktls) |
14 | 19 | |
15 | 20 | |
16 | ### `h1_stream:localname()` <!-- --> {#h1_stream:localname} | |
21 | ### `h1_stream:localname()` <!-- --> {#http.h1_stream:localname} | |
17 | 22 | |
18 | 23 | See [`stream:localname()`](#stream:localname) |
19 | 24 | |
20 | 25 | |
21 | ### `h1_stream:peername()` <!-- --> {#h1_stream:peername} | |
26 | ### `h1_stream:peername()` <!-- --> {#http.h1_stream:peername} | |
22 | 27 | |
23 | 28 | See [`stream:peername()`](#stream:peername) |
24 | 29 | |
25 | 30 | |
26 | ### `h1_stream:get_headers(timeout)` <!-- --> {#h1_stream:get_headers} | |
31 | ### `h1_stream:get_headers(timeout)` <!-- --> {#http.h1_stream:get_headers} | |
27 | 32 | |
28 | 33 | See [`stream:get_headers(timeout)`](#stream:get_headers) |
29 | 34 | |
30 | 35 | |
31 | ### `h1_stream:write_headers(headers, end_stream, timeout)` <!-- --> {#h1_stream:write_headers} | |
36 | ### `h1_stream:write_headers(headers, end_stream, timeout)` <!-- --> {#http.h1_stream:write_headers} | |
32 | 37 | |
33 | 38 | See [`stream:write_headers(headers, end_stream, timeout)`](#stream:write_headers) |
34 | 39 | |
35 | 40 | |
36 | ### `h1_stream:write_continue(timeout)` <!-- --> {#h1_stream:write_continue} | |
41 | ### `h1_stream:write_continue(timeout)` <!-- --> {#http.h1_stream:write_continue} | |
37 | 42 | |
38 | 43 | See [`stream:write_continue(timeout)`](#stream:write_continue) |
39 | 44 | |
40 | 45 | |
41 | ### `h1_stream:get_next_chunk(timeout)` <!-- --> {#h1_stream:get_next_chunk} | |
46 | ### `h1_stream:get_next_chunk(timeout)` <!-- --> {#http.h1_stream:get_next_chunk} | |
42 | 47 | |
43 | 48 | See [`stream:get_next_chunk(timeout)`](#stream:get_next_chunk) |
44 | 49 | |
45 | 50 | |
46 | ### `h1_stream:each_chunk()` <!-- --> {#h1_stream:each_chunk} | |
51 | ### `h1_stream:each_chunk()` <!-- --> {#http.h1_stream:each_chunk} | |
47 | 52 | |
48 | 53 | See [`stream:each_chunk()`](#stream:each_chunk) |
49 | 54 | |
50 | 55 | |
51 | ### `h1_stream:get_body_as_string(timeout)` <!-- --> {#h1_stream:get_body_as_string} | |
56 | ### `h1_stream:get_body_as_string(timeout)` <!-- --> {#http.h1_stream:get_body_as_string} | |
52 | 57 | |
53 | 58 | See [`stream:get_body_as_string(timeout)`](#stream:get_body_as_string) |
54 | 59 | |
55 | 60 | |
56 | ### `h1_stream:get_body_chars(n, timeout)` <!-- --> {#h1_stream:get_body_chars} | |
61 | ### `h1_stream:get_body_chars(n, timeout)` <!-- --> {#http.h1_stream:get_body_chars} | |
57 | 62 | |
58 | 63 | See [`stream:get_body_chars(n, timeout)`](#stream:get_body_chars) |
59 | 64 | |
60 | 65 | |
61 | ### `h1_stream:get_body_until(pattern, plain, include_pattern, timeout)` <!-- --> {#h1_stream:get_body_until} | |
66 | ### `h1_stream:get_body_until(pattern, plain, include_pattern, timeout)` <!-- --> {#http.h1_stream:get_body_until} | |
62 | 67 | |
63 | 68 | See [`stream:get_body_until(pattern, plain, include_pattern, timeout)`](#stream:get_body_until) |
64 | 69 | |
65 | 70 | |
66 | ### `h1_stream:save_body_to_file(file, timeout)` <!-- --> {#h1_stream:save_body_to_file} | |
71 | ### `h1_stream:save_body_to_file(file, timeout)` <!-- --> {#http.h1_stream:save_body_to_file} | |
67 | 72 | |
68 | 73 | See [`stream:save_body_to_file(file, timeout)`](#stream:save_body_to_file) |
69 | 74 | |
70 | 75 | |
71 | ### `h1_stream:get_body_as_file(timeout)` <!-- --> {#h1_stream:get_body_as_file} | |
76 | ### `h1_stream:get_body_as_file(timeout)` <!-- --> {#http.h1_stream:get_body_as_file} | |
72 | 77 | |
73 | 78 | See [`stream:get_body_as_file(timeout)`](#stream:get_body_as_file) |
74 | 79 | |
75 | 80 | |
76 | ### `h1_stream:unget(str)` <!-- --> {#h1_stream:unget} | |
81 | ### `h1_stream:unget(str)` <!-- --> {#http.h1_stream:unget} | |
77 | 82 | |
78 | 83 | See [`stream:unget(str)`](#stream:unget) |
79 | 84 | |
80 | 85 | |
81 | ### `h1_stream:write_chunk(chunk, end_stream, timeout)` <!-- --> {#h1_stream:write_chunk} | |
86 | ### `h1_stream:write_chunk(chunk, end_stream, timeout)` <!-- --> {#http.h1_stream:write_chunk} | |
82 | 87 | |
83 | 88 | See [`stream:write_chunk(chunk, end_stream, timeout)`](#stream:write_chunk) |
84 | 89 | |
85 | 90 | |
86 | ### `h1_stream:write_body_from_string(str, timeout)` <!-- --> {#h1_stream:write_body_from_string} | |
91 | ### `h1_stream:write_body_from_string(str, timeout)` <!-- --> {#http.h1_stream:write_body_from_string} | |
87 | 92 | |
88 | 93 | See [`stream:write_body_from_string(str, timeout)`](#stream:write_body_from_string) |
89 | 94 | |
90 | 95 | |
91 | ### `h1_stream:write_body_from_file(file, timeout)` <!-- --> {#h1_stream:write_body_from_file} | |
96 | ### `h1_stream:write_body_from_file(options|file, timeout)` <!-- --> {#http.h1_stream:write_body_from_file} | |
92 | 97 | |
93 | See [`stream:write_body_from_file(file, timeout)`](#stream:write_body_from_file) | |
98 | See [`stream:write_body_from_file(options|file, timeout)`](#stream:write_body_from_file) | |
94 | 99 | |
95 | 100 | |
96 | ### `h1_stream:shutdown()` <!-- --> {#h1_stream:shutdown} | |
101 | ### `h1_stream:shutdown()` <!-- --> {#http.h1_stream:shutdown} | |
97 | 102 | |
98 | 103 | See [`stream:shutdown()`](#stream:shutdown) |
99 | 104 | |
112 | 117 | |
113 | 118 | ### `h1_stream:read_headers(timeout)` <!-- --> {#http.h1_stream:read_headers} |
114 | 119 | |
115 | Reads and returns a table containing the request line and all HTTP headers as key value pairs. | |
120 | Reads and returns a [header block](#http.headers) from the underlying connection. Does *not* take into account buffered header blocks. On error, returns `nil`, an error message and an error number. | |
116 | 121 | |
117 | This function should rarely be used, you're probably looking for [`:get_headers()`](#h1_stream:get_headers). | |
122 | This function should rarely be used, you're probably looking for [`:get_headers()`](#http.h1_stream:get_headers). | |
123 | ||
124 | ||
125 | ### `h1_stream:read_next_chunk(timeout)` <!-- --> {#http.h1_stream:read_next_chunk} | |
126 | ||
127 | Reads and returns the next chunk as a string from the underlying connection. Does *not* take into account buffered chunks. On error, returns `nil`, an error message and an error number. | |
128 | ||
129 | This function should rarely be used, you're probably looking for [`:get_next_chunk()`](#http.h1_stream:get_next_chunk). |
16 | 16 | |
17 | 17 | ### `h2_connection:pollfd()` <!-- --> {#http.h2_connection:pollfd} |
18 | 18 | |
19 | See [`connection:pollfd()`](#connection:pollfd) | |
20 | ||
19 | 21 | |
20 | 22 | ### `h2_connection:events()` <!-- --> {#http.h2_connection:events} |
21 | 23 | |
24 | See [`connection:events()`](#connection:events) | |
25 | ||
22 | 26 | |
23 | 27 | ### `h2_connection:timeout()` <!-- --> {#http.h2_connection:timeout} |
28 | ||
29 | See [`connection:timeout()`](#connection:timeout) | |
24 | 30 | |
25 | 31 | |
26 | 32 | ### `h2_connection:empty()` <!-- --> {#http.h2_connection:empty} |
88 | 94 | ### `h2_connection:read_http2_frame(timeout)` <!-- --> {#http.h2_connection:read_http2_frame} |
89 | 95 | |
90 | 96 | |
91 | ### `h2_connection:write_http2_frame(typ, flags, streamid, payload, timeout)` <!-- --> {#http.h2_connection:write_http2_frame} | |
97 | ### `h2_connection:write_http2_frame(typ, flags, streamid, payload, timeout, flush)` <!-- --> {#http.h2_connection:write_http2_frame} | |
92 | 98 | |
93 | 99 | |
94 | 100 | ### `h2_connection:ping(timeout)` <!-- --> {#http.h2_connection:ping} |
1 | 1 | |
2 | 2 | An h2_stream represents an HTTP 2 stream. The module follows the [*stream*](#stream) interface as well as HTTP 2 specific functions. |
3 | 3 | |
4 | ### `h2_stream.connection` <!-- --> {#h2_stream.connection} | |
4 | ### `h2_stream.connection` <!-- --> {#http.h2_stream.connection} | |
5 | 5 | |
6 | 6 | See [`stream.connection`](#stream.connection) |
7 | 7 | |
8 | 8 | |
9 | ### `h2_stream:checktls()` <!-- --> {#h2_stream:checktls} | |
9 | ### `h2_stream:checktls()` <!-- --> {#http.h2_stream:checktls} | |
10 | 10 | |
11 | 11 | See [`stream:checktls()`](#stream:checktls) |
12 | 12 | |
13 | 13 | |
14 | ### `h2_stream:localname()` <!-- --> {#h2_stream:localname} | |
14 | ### `h2_stream:localname()` <!-- --> {#http.h2_stream:localname} | |
15 | 15 | |
16 | 16 | See [`stream:localname()`](#stream:localname) |
17 | 17 | |
18 | 18 | |
19 | ### `h2_stream:peername()` <!-- --> {#h2_stream:peername} | |
19 | ### `h2_stream:peername()` <!-- --> {#http.h2_stream:peername} | |
20 | 20 | |
21 | 21 | See [`stream:peername()`](#stream:peername) |
22 | 22 | |
23 | 23 | |
24 | ### `h2_stream:get_headers(timeout)` <!-- --> {#h2_stream:get_headers} | |
24 | ### `h2_stream:get_headers(timeout)` <!-- --> {#http.h2_stream:get_headers} | |
25 | 25 | |
26 | 26 | See [`stream:get_headers(timeout)`](#stream:get_headers) |
27 | 27 | |
28 | 28 | |
29 | ### `h2_stream:write_headers(headers, end_stream, timeout)` <!-- --> {#h2_stream:write_headers} | |
29 | ### `h2_stream:write_headers(headers, end_stream, timeout)` <!-- --> {#http.h2_stream:write_headers} | |
30 | 30 | |
31 | 31 | See [`stream:write_headers(headers, end_stream, timeout)`](#stream:write_headers) |
32 | 32 | |
33 | 33 | |
34 | ### `h2_stream:write_continue(timeout)` <!-- --> {#h2_stream:write_continue} | |
34 | ### `h2_stream:write_continue(timeout)` <!-- --> {#http.h2_stream:write_continue} | |
35 | 35 | |
36 | 36 | See [`stream:write_continue(timeout)`](#stream:write_continue) |
37 | 37 | |
38 | 38 | |
39 | ### `h2_stream:get_next_chunk(timeout)` <!-- --> {#h2_stream:get_next_chunk} | |
39 | ### `h2_stream:get_next_chunk(timeout)` <!-- --> {#http.h2_stream:get_next_chunk} | |
40 | 40 | |
41 | 41 | See [`stream:get_next_chunk(timeout)`](#stream:get_next_chunk) |
42 | 42 | |
43 | 43 | |
44 | ### `h2_stream:each_chunk()` <!-- --> {#h2_stream:each_chunk} | |
44 | ### `h2_stream:each_chunk()` <!-- --> {#http.h2_stream:each_chunk} | |
45 | 45 | |
46 | 46 | See [`stream:each_chunk()`](#stream:each_chunk) |
47 | 47 | |
48 | 48 | |
49 | ### `h2_stream:get_body_as_string(timeout)` <!-- --> {#h2_stream:get_body_as_string} | |
49 | ### `h2_stream:get_body_as_string(timeout)` <!-- --> {#http.h2_stream:get_body_as_string} | |
50 | 50 | |
51 | 51 | See [`stream:get_body_as_string(timeout)`](#stream:get_body_as_string) |
52 | 52 | |
53 | 53 | |
54 | ### `h2_stream:get_body_chars(n, timeout)` <!-- --> {#h2_stream:get_body_chars} | |
54 | ### `h2_stream:get_body_chars(n, timeout)` <!-- --> {#http.h2_stream:get_body_chars} | |
55 | 55 | |
56 | 56 | See [`stream:get_body_chars(n, timeout)`](#stream:get_body_chars) |
57 | 57 | |
58 | 58 | |
59 | ### `h2_stream:get_body_until(pattern, plain, include_pattern, timeout)` <!-- --> {#h2_stream:get_body_until} | |
59 | ### `h2_stream:get_body_until(pattern, plain, include_pattern, timeout)` <!-- --> {#http.h2_stream:get_body_until} | |
60 | 60 | |
61 | 61 | See [`stream:get_body_until(pattern, plain, include_pattern, timeout)`](#stream:get_body_until) |
62 | 62 | |
63 | 63 | |
64 | ### `h2_stream:save_body_to_file(file, timeout)` <!-- --> {#h2_stream:save_body_to_file} | |
64 | ### `h2_stream:save_body_to_file(file, timeout)` <!-- --> {#http.h2_stream:save_body_to_file} | |
65 | 65 | |
66 | 66 | See [`stream:save_body_to_file(file, timeout)`](#stream:save_body_to_file) |
67 | 67 | |
68 | 68 | |
69 | ### `h2_stream:get_body_as_file(timeout)` <!-- --> {#h2_stream:get_body_as_file} | |
69 | ### `h2_stream:get_body_as_file(timeout)` <!-- --> {#http.h2_stream:get_body_as_file} | |
70 | 70 | |
71 | 71 | See [`stream:get_body_as_file(timeout)`](#stream:get_body_as_file) |
72 | 72 | |
73 | 73 | |
74 | ### `h2_stream:unget(str)` <!-- --> {#h2_stream:unget} | |
74 | ### `h2_stream:unget(str)` <!-- --> {#http.h2_stream:unget} | |
75 | 75 | |
76 | 76 | See [`stream:unget(str)`](#stream:unget) |
77 | 77 | |
78 | 78 | |
79 | ### `h2_stream:write_chunk(chunk, end_stream, timeout)` <!-- --> {#h2_stream:write_chunk} | |
79 | ### `h2_stream:write_chunk(chunk, end_stream, timeout)` <!-- --> {#http.h2_stream:write_chunk} | |
80 | 80 | |
81 | 81 | See [`stream:write_chunk(chunk, end_stream, timeout)`](#stream:write_chunk) |
82 | 82 | |
83 | 83 | |
84 | ### `h2_stream:write_body_from_string(str, timeout)` <!-- --> {#h2_stream:write_body_from_string} | |
84 | ### `h2_stream:write_body_from_string(str, timeout)` <!-- --> {#http.h2_stream:write_body_from_string} | |
85 | 85 | |
86 | 86 | See [`stream:write_body_from_string(str, timeout)`](#stream:write_body_from_string) |
87 | 87 | |
88 | 88 | |
89 | ### `h2_stream:write_body_from_file(file, timeout)` <!-- --> {#h2_stream:write_body_from_file} | |
89 | ### `h2_stream:write_body_from_file(options|file, timeout)` <!-- --> {#http.h2_stream:write_body_from_file} | |
90 | 90 | |
91 | See [`stream:write_body_from_file(file, timeout)`](#stream:write_body_from_file) | |
91 | See [`stream:write_body_from_file(options|file, timeout)`](#stream:write_body_from_file) | |
92 | 92 | |
93 | 93 | |
94 | ### `h2_stream:shutdown()` <!-- --> {#h2_stream:shutdown} | |
94 | ### `h2_stream:shutdown()` <!-- --> {#http.h2_stream:shutdown} | |
95 | 95 | |
96 | 96 | See [`stream:shutdown()`](#stream:shutdown) |
97 | ||
98 | ||
99 | ### `h2_stream:pick_id(id)` <!-- --> {#http.h2_stream:pick_id} | |
97 | 100 | |
98 | 101 | |
99 | 102 | ### `h2_stream:set_state(new)` <!-- --> {#http.h2_stream:set_state} |
102 | 105 | ### `h2_stream:reprioritise(child, exclusive)` <!-- --> {#http.h2_stream:reprioritise} |
103 | 106 | |
104 | 107 | |
105 | ### `h2_stream:write_http2_frame(typ, flags, payload, timeout)` <!-- --> {#http.h2_stream:write_http2_frame} | |
108 | ### `h2_stream:write_http2_frame(typ, flags, payload, timeout, flush)` <!-- --> {#http.h2_stream:write_http2_frame} | |
106 | 109 | |
107 | 110 | Writes a frame with `h2_stream`'s stream id. |
108 | 111 | |
109 | See [`h2_connection:write_http2_frame(typ, flags, streamid, payload, timeout)`](#http.h2_connection:write_http2_frame) | |
112 | See [`h2_connection:write_http2_frame(typ, flags, streamid, payload, timeout, flush)`](#http.h2_connection:write_http2_frame) | |
110 | 113 | |
111 | 114 | |
112 | ### `h2_stream:write_data_frame(payload, end_stream, padded, timeout)` <!-- --> {#http.h2_stream:write_data_frame} | |
115 | ### `h2_stream:write_data_frame(payload, end_stream, padded, timeout, flush)` <!-- --> {#http.h2_stream:write_data_frame} | |
113 | 116 | |
114 | 117 | |
115 | ### `h2_stream:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, timeout)` <!-- --> {#http.h2_stream:write_headers_frame} | |
118 | ### `h2_stream:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, timeout, flush)` <!-- --> {#http.h2_stream:write_headers_frame} | |
116 | 119 | |
117 | 120 | |
118 | ### `h2_stream:write_priority_frame(exclusive, stream_dep, weight, timeout)` <!-- --> {#http.h2_stream:write_priority_frame} | |
121 | ### `h2_stream:write_priority_frame(exclusive, stream_dep, weight, timeout, flush)` <!-- --> {#http.h2_stream:write_priority_frame} | |
119 | 122 | |
120 | 123 | |
121 | ### `h2_stream:write_rst_stream(err_code, timeout)` <!-- --> {#http.h2_stream:write_rst_stream} | |
124 | ### `h2_stream:write_rst_stream_frame(err_code, timeout, flush)` <!-- --> {#http.h2_stream:write_rst_stream} | |
122 | 125 | |
123 | 126 | |
124 | ### `h2_stream:write_settings_frame(ACK, settings, timeout)` <!-- --> {#http.h2_stream:write_settings_frame} | |
127 | ### `h2_stream:rst_stream(err, timeout)` <!-- --> {#http.h2_stream:rst_stream} | |
125 | 128 | |
126 | 129 | |
127 | ### `h2_stream:write_push_promise_frame(promised_stream_id, payload, end_headers, padded, timeout)` <!-- --> {#http.h2_stream:write_push_promise_frame} | |
130 | ### `h2_stream:write_settings_frame(ACK, settings, timeout, flush)` <!-- --> {#http.h2_stream:write_settings_frame} | |
131 | ||
132 | ||
133 | ### `h2_stream:write_push_promise_frame(promised_stream_id, payload, end_headers, padded, timeout, flush)` <!-- --> {#http.h2_stream:write_push_promise_frame} | |
128 | 134 | |
129 | 135 | |
130 | 136 | ### `h2_stream:push_promise(headers, timeout)` <!-- --> {#http.h2_stream:push_promise} |
134 | 140 | Returns the new stream as a [h2_stream](#http.h2_stream). |
135 | 141 | |
136 | 142 | |
137 | ### `h2_stream:write_ping_frame(ACK, payload, timeout)` <!-- --> {#http.h2_stream:write_ping_frame} | |
143 | ### `h2_stream:write_ping_frame(ACK, payload, timeout, flush)` <!-- --> {#http.h2_stream:write_ping_frame} | |
138 | 144 | |
139 | 145 | |
140 | ### `h2_stream:write_goaway_frame(last_streamid, err_code, debug_msg, timeout)` <!-- --> {#http.h2_stream:write_goaway_frame} | |
146 | ### `h2_stream:write_goaway_frame(last_streamid, err_code, debug_msg, timeout, flush)` <!-- --> {#http.h2_stream:write_goaway_frame} | |
141 | 147 | |
142 | 148 | |
143 | ### `h2_stream:write_window_update_frame(inc, timeout)` <!-- --> {#http.h2_stream:write_window_update_frame} | |
149 | ### `h2_stream:write_window_update_frame(inc, timeout, flush)` <!-- --> {#http.h2_stream:write_window_update_frame} | |
144 | 150 | |
145 | 151 | |
146 | 152 | ### `h2_stream:write_window_update(inc, timeout)` <!-- --> {#http.h2_stream:write_window_update} |
147 | 153 | |
148 | 154 | |
149 | ### `h2_stream:write_continuation_frame(payload, end_headers, timeout)` <!-- --> {#http.h2_stream:write_continuation_frame} | |
155 | ### `h2_stream:write_continuation_frame(payload, end_headers, timeout, flush)` <!-- --> {#http.h2_stream:write_continuation_frame} |
38 | 38 | ### `hpack_context:lookup_name_index(name)` <!-- --> {#http.hpack:lookup_name_index} |
39 | 39 | |
40 | 40 | |
41 | ### `hpack_context:lookup_index(index, allow_single)` <!-- --> {#http.hpack:lookup_index} | |
41 | ### `hpack_context:lookup_index(index)` <!-- --> {#http.hpack:lookup_index} | |
42 | 42 | |
43 | 43 | |
44 | 44 | ### `hpack_context:add_header_indexed(name, value, huffman)` <!-- --> {#http.hpack:add_header_indexed} |
4 | 4 | ### `new_store()` <!-- --> {#http.hsts.new_store} |
5 | 5 | |
6 | 6 | Creates and returns a new HSTS store. |
7 | ||
8 | ||
9 | ### `hsts_store.max_items` <!-- --> {#http.hsts.max_items} | |
10 | ||
11 | The maximum number of items allowed in the store. | |
12 | Decreasing this value will only prevent new items from being added, it will not remove old items. | |
13 | ||
14 | Defaults to infinity (any number of items is allowed). | |
7 | 15 | |
8 | 16 | |
9 | 17 | ### `hsts_store:clone()` <!-- --> {#http.hsts:clone} |
15 | 23 | |
16 | 24 | Add new directives to the store about the given `host`. `directives` should be a table of directives, which *must* include the key `"max-age"`. |
17 | 25 | |
26 | Returns a boolean indicating if the item was accepted. | |
27 | ||
28 | ||
29 | ### `hsts_store:remove(host)` <!-- --> {#http.hsts:remove} | |
30 | ||
31 | Removes the entry for `host` from the store (if it exists). | |
32 | ||
18 | 33 | |
19 | 34 | ### `hsts_store:check(host)` <!-- --> {#http.hsts:check} |
20 | 35 | |
21 | 36 | Returns a boolean indicating if the given `host` is a known HSTS host. |
22 | 37 | |
23 | 38 | |
39 | ### `hsts_store:clean_due()` <!-- --> {#http.hsts:clean_due} | |
40 | ||
41 | Returns the number of seconds until the next item in the store expires. | |
42 | ||
43 | ||
24 | 44 | ### `hsts_store:clean()` <!-- --> {#http.hsts:clean} |
25 | 45 | |
26 | 46 | Removes expired entries from the store. |
19 | 19 | ### `request.port` <!-- --> {#http.request.port} |
20 | 20 | |
21 | 21 | The port this request should be sent to. |
22 | ||
23 | ||
24 | ### `request.bind` <!-- --> {#http.request.bind} | |
25 | ||
26 | The local outgoing address and optionally port to bind in the form of `"address[:port]"`. Default is to allow the kernel to choose an address+port. | |
27 | ||
28 | IPv6 addresses may be specified via square bracket notation. e.g. `"127.0.0.1"`, `"127.0.0.1:50000"`, `"[::1]:30000"`. | |
29 | ||
30 | This option is rarely needed. Supplying an address can be used to manually select the network interface to make the request from, while supplying a port is only really used to interoperate with firewalls or devices that demand use of a certain port. | |
22 | 31 | |
23 | 32 | |
24 | 33 | ### `request.tls` <!-- --> {#http.request.tls} |
51 | 60 | ### `request.headers` <!-- --> {#http.request.headers} |
52 | 61 | |
53 | 62 | A [*http.headers*](#http.headers) object of headers that will be sent in the request. |
63 | ||
64 | ||
65 | ### `request.hsts` <!-- --> {#http.request.hsts} | |
66 | ||
67 | The [*http.hsts*](#http.hsts) store that will be used to enforce HTTP strict transport security. | |
68 | An attempt will be made to add strict transport headers from a response to the store. | |
69 | ||
70 | Defaults to a shared store. | |
71 | ||
72 | ||
73 | ### `request.proxies` <!-- --> {#http.request.proxies} | |
74 | ||
75 | The [*http.proxies*](#http.proxies) object used to select a proxy for the request. | |
76 | Only consulted if `request.proxy` is `nil`. | |
77 | ||
78 | ||
79 | ### `request.cookie_store` <!-- --> {#http.request.cookie_store} | |
80 | ||
81 | The [*http.cookie.store*](#http.cookie.store) that will be used to find cookies for the request. | |
82 | An attempt will be made to add cookies from a response to the store. | |
83 | ||
84 | Defaults to a shared store. | |
85 | ||
86 | ||
87 | ### `request.is_top_level` <!-- --> {#http.request.is_top_level} | |
88 | ||
89 | A boolean flag indicating if this request is a "top level" request (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)) | |
90 | ||
91 | Defaults to `true` | |
92 | ||
93 | ||
94 | ### `request.site_for_cookies` <!-- --> {#http.request.site_for_cookies} | |
95 | ||
96 | A string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown. | |
97 | ||
98 | Defaults to `nil`. | |
54 | 99 | |
55 | 100 | |
56 | 101 | ### `request.follow_redirects` <!-- --> {#http.request.follow_redirects} |
0 | 0 | ## http.server |
1 | 1 | |
2 | *http.server* objects are used to encapsulate the accept() and dispatch of http clients. Each client request triggers `onstream` which is called from an independent cqueue, providing an independent process for each request. `onstream` can also be used for testing and upgrading a request, with HTTP 1.1 to WebSockets being the notable example. | |
2 | *http.server* objects are used to encapsulate the `accept()` and dispatch of http clients. Each new client request will invoke the `onstream` callback in a new cqueues managed coroutine. In addition to constructing and returning a HTTP response, an `onstream` handler may decide to take ownership of the connection for other purposes, e.g. upgrade from a HTTP 1.1 connection to a WebSocket connection. | |
3 | 3 | |
4 | 4 | For examples of how to use the server library, please see the [examples directory](https://github.com/daurnimator/lua-http/tree/master/examples) in the source tree. |
5 | 5 | |
15 | 15 | - `true`: Allows tls connections only |
16 | 16 | - `false`: Allows non-tls connections only |
17 | 17 | - `.ctx` (*context object*): An `openssl.ssl.context` object to use for tls connections. If `nil` is passed, a self-signed context will be generated. |
18 | - `.client_timeout` (*number*): Timeout (in seconds) to wait for client to send first bytes and/or complete TLS handshake. Default is 10 seconds. | |
18 | - `.connection_setup_timeout` (*number*): Timeout (in seconds) to wait for client to send first bytes and/or complete TLS handshake. Default is 10 seconds. | |
19 | - `.intra_stream_timeout` (*number*): Timeout (in seconds) to wait for a new [*stream*](#stream) on an idle connection before giving up and closing the connection | |
19 | 20 | - `.version` (*number*): The http version to allow to connect (default: any) |
20 | 21 | - `.cq` (*cqueue*): A cqueues controller to use as a main loop. The default is a new controller for the server. |
21 | 22 | - `.max_concurrent` (*number*): Maximum number of connections to allow live at a time. Default is infinity. |
115 | 116 | - Another cqueues thread with some other master socket. |
116 | 117 | - From inetd for start on demand daemons. |
117 | 118 | - A Unix socket with `SCM_RIGHTS`. |
119 | ||
120 | ||
121 | ### `server:add_stream(stream)` <!-- --> {#http.server:add_stream} | |
122 | ||
123 | Add an existing stream to the server for processing. |
10 | 10 | |
11 | 11 | ### `fdopen(socket)` <!-- --> {#http.socks.fdopen} |
12 | 12 | |
13 | This function takes an existing cqueues.socket as a parameter and returns a *http.socks* object with `socket` as it's base. | |
13 | This function takes an existing cqueues.socket as a parameter and returns a *http.socks* object with `socket` as its base. | |
14 | 14 | |
15 | 15 | |
16 | 16 | ### `socks.needs_resolve` <!-- --> {#http.socks.needs_resolve} |
23 | 23 | The [Mozilla "Intermediate" cipher list](https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29) as a colon separated list, ready to pass to OpenSSL |
24 | 24 | |
25 | 25 | |
26 | ### `old_cipher_list` <!-- --> {#http.tls.old_cipher_list} | |
27 | ||
28 | The [Mozilla "Old" cipher list](https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility) as a colon separated list, ready to pass to OpenSSL | |
29 | ||
30 | ||
26 | 31 | ### `banned_ciphers` <!-- --> {#http.tls.banned_ciphers} |
27 | 32 | |
28 | 33 | A set (table with string keys and values of `true`) of the [ciphers banned in HTTP 2](https://http2.github.io/http2-spec/#BadCipherSuites) where the keys are OpenSSL cipher names. |
44 | 44 | ### `resolve_relative_path(orig_path, relative_path)` <!-- --> {#http.util.resolve_relative_path} |
45 | 45 | |
46 | 46 | |
47 | ### `is_safe_method(method)` <!-- --> {#http.util.is_safe_method} | |
48 | ||
49 | Returns a boolean indicating if the passed string `method` is a "safe" method. | |
50 | See [RFC 7231 section 4.2.1](https://tools.ietf.org/html/rfc7231#section-4.2.1) for more information. | |
51 | ||
52 | ||
53 | ### `is_ip(str)` <!-- --> {#http.util.is_ip} | |
54 | ||
55 | Returns a boolean indicating if the passed string `str` is a valid IP. | |
56 | ||
57 | ||
47 | 58 | ### `scheme_to_port` <!-- --> {#http.util.scheme_to_port} |
48 | 59 | |
49 | 60 | Map from schemes (as strings) to default ports (as integers). |
63 | 63 | Send the given `data` as a data frame. |
64 | 64 | |
65 | 65 | - `data` should be a string |
66 | - `opcode` can be a numeric opcode, `"text"` or `"binary"`. If `nil`, defaults to a text frame | |
66 | - `opcode` can be a numeric opcode, `"text"` or `"binary"`. If `nil`, defaults to a text frame. | |
67 | Note this `opcode` is the websocket frame opcode, not an application specific opcode. The opcode should be one from the [IANA registry](https://www.iana.org/assignments/websocket/websocket.xhtml#opcode). | |
67 | 68 | |
68 | 69 | |
69 | 70 | ### `websocket:send_ping(data, timeout)` <!-- --> {#http.websocket:send_ping} |
95 | 95 | element.className = 'collapsible'; |
96 | 96 | element.setAttribute('state', 'collapsed'); |
97 | 97 | element.onclick = function(event) { |
98 | if (event.toElement != this) return; | |
98 | if (event.target != this) return; | |
99 | 99 | if (this.getAttribute('state') == 'collapsed') { |
100 | 100 | this.removeAttribute('state'); |
101 | 101 | } else { |
0 | #!/usr/bin/env lua | |
0 | 1 | --[[ |
1 | 2 | Makes a request to an HTTP2 endpoint that has an infinite length response. |
2 | 3 |
0 | #!/usr/bin/env lua | |
0 | 1 | --[=[ |
1 | 2 | This example serves a file/directory browser |
2 | 3 | It defaults to serving the current directory. |
90 | 91 | res_headers:append(":status", nil) |
91 | 92 | res_headers:append("server", default_server) |
92 | 93 | res_headers:append("date", http_util.imf_date()) |
94 | ||
95 | if req_method ~= "GET" and req_method ~= "HEAD" then | |
96 | res_headers:upsert(":status", "405") | |
97 | assert(stream:write_headers(res_headers, true)) | |
98 | return | |
99 | end | |
93 | 100 | |
94 | 101 | local path = req_headers:get(":path") |
95 | 102 | local uri_t = assert(uri_reference:match(path), "invalid path") |
144 | 151 | </tr></thead> |
145 | 152 | <tbody> |
146 | 153 | ]], xml_escape(path), xml_escape(path)), false)) |
147 | -- lfs doesn't provide a way to get an errno for attempting to open a directory https://github.com/keplerproject/luafilesystem/issues/87 | |
154 | -- lfs doesn't provide a way to get an errno for attempting to open a directory | |
155 | -- See https://github.com/keplerproject/luafilesystem/issues/87 | |
148 | 156 | for filename in lfs.dir(real_path) do |
149 | 157 | if not (filename == ".." and path == "/") then -- Exclude parent directory entry listing from top level |
150 | 158 | local stats = lfs.attributes(real_path .. "/" .. filename) |
0 | #!/usr/bin/env lua | |
0 | 1 | --[[ |
1 | 2 | A server that responds with an infinite server-side-events format. |
2 | 3 | https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format |
20 | 21 | |
21 | 22 | -- Build response headers |
22 | 23 | local res_headers = http_headers.new() |
24 | if req_method ~= "GET" and req_method ~= "HEAD" then | |
25 | res_headers:upsert(":status", "405") | |
26 | assert(stream:write_headers(res_headers, true)) | |
27 | return | |
28 | end | |
23 | 29 | if req_headers:get ":path" == "/" then |
24 | 30 | res_headers:append(":status", "200") |
25 | 31 | res_headers:append("content-type", "text/html") |
0 | #!/usr/bin/env lua | |
0 | 1 | --[[ |
1 | 2 | Verbosely fetches an HTTP resource |
2 | 3 | If a body is given, use a POST request |
0 | #!/usr/bin/env lua | |
0 | 1 | --[[ |
1 | 2 | Example of websocket client usage |
2 | 3 | |
3 | - Connects to the coinbase feed. | |
4 | Documentation of feed: https://docs.exchange.coinbase.com/#websocket-feed | |
4 | - Connects to the gdax market data feed. | |
5 | Documentation of feed: https://docs.gdax.com/#websocket-feed | |
5 | 6 | - Sends a subscribe message |
6 | 7 | - Prints off 5 messages |
7 | 8 | - Close the socket and clean up. |
9 | 10 | |
10 | 11 | local websocket = require "http.websocket" |
11 | 12 | |
12 | local ws = websocket.new_from_uri("wss://ws-feed.exchange.coinbase.com") | |
13 | local ws = websocket.new_from_uri("wss://ws-feed.gdax.com") | |
13 | 14 | assert(ws:connect()) |
14 | 15 | assert(ws:send([[{"type": "subscribe", "product_id": "BTC-USD"}]])) |
15 | 16 | for _=1, 5 do |
0 | band: (integer, integer) -> (integer) | |
1 | bor: (integer, integer) -> (integer) | |
2 | bxor: (integer, integer) -> (integer) |
0 | 0 | local ca = require "cqueues.auxlib" |
1 | 1 | local cs = require "cqueues.socket" |
2 | 2 | local http_tls = require "http.tls" |
3 | local http_util = require "http.util" | |
3 | 4 | local connection_common = require "http.connection_common" |
4 | 5 | local onerror = connection_common.onerror |
5 | 6 | local new_h1_connection = require "http.h1_connection".new |
7 | 8 | local openssl_ssl = require "openssl.ssl" |
8 | 9 | local openssl_ctx = require "openssl.ssl.context" |
9 | 10 | local openssl_verify_param = require "openssl.x509.verify_param" |
10 | ||
11 | local EOF = require "lpeg".P(-1) | |
12 | local IPv4address = require "lpeg_patterns.IPv4".IPv4address | |
13 | local IPv6addrz = require "lpeg_patterns.IPv6".IPv6addrz | |
14 | local IPaddress = (IPv4address + IPv6addrz) * EOF | |
15 | 11 | |
16 | 12 | -- Create a shared 'default' TLS context |
17 | 13 | local default_ctx = http_tls.new_client_context() |
23 | 19 | if tls then |
24 | 20 | local ctx = options.ctx or default_ctx |
25 | 21 | local ssl = openssl_ssl.new(ctx) |
26 | local ip = options.host and IPaddress:match(options.host) | |
27 | if options.sendname ~= nil then | |
28 | if options.sendname then -- false indicates no sendname wanted | |
29 | ssl:setHostName(options.sendname) | |
30 | end | |
31 | elseif options.host and not ip then | |
32 | ssl:setHostName(options.host) | |
22 | local host = options.host | |
23 | local host_is_ip = host and http_util.is_ip(host) | |
24 | local sendname = options.sendname | |
25 | if sendname == nil and not host_is_ip and host then | |
26 | sendname = host | |
27 | end | |
28 | if sendname then -- false indicates no sendname wanted | |
29 | ssl:setHostName(sendname) | |
33 | 30 | end |
34 | 31 | if http_tls.has_alpn then |
35 | 32 | if version == nil then |
43 | 40 | if version == 2 then |
44 | 41 | ssl:setOptions(openssl_ctx.OP_NO_TLSv1 + openssl_ctx.OP_NO_TLSv1_1) |
45 | 42 | end |
46 | if options.host and http_tls.has_hostname_validation then | |
43 | if host and http_tls.has_hostname_validation then | |
47 | 44 | local params = openssl_verify_param.new() |
48 | if ip then | |
49 | params:setIP(options.host) | |
45 | if host_is_ip then | |
46 | params:setIP(host) | |
50 | 47 | else |
51 | params:setHost(options.host) | |
48 | params:setHost(host) | |
52 | 49 | end |
53 | 50 | -- Allow user defined params to override |
54 | 51 | local old = ssl:getParam() |
83 | 80 | end |
84 | 81 | |
85 | 82 | local function connect(options, timeout) |
83 | local bind = options.bind | |
84 | if bind ~= nil then | |
85 | assert(type(bind) == "string") | |
86 | local bind_address, bind_port = bind:match("^(.-):(%d+)$") | |
87 | if bind_address then | |
88 | bind_port = tonumber(bind_port, 10) | |
89 | else | |
90 | bind_address = bind | |
91 | end | |
92 | local ipv6 = bind_address:match("^%[([:%x]+)%]$") | |
93 | if ipv6 then | |
94 | bind_address = ipv6 | |
95 | end | |
96 | bind = { | |
97 | address = bind_address; | |
98 | port = bind_port; | |
99 | } | |
100 | end | |
86 | 101 | local s, err, errno = ca.fileresult(cs.connect { |
87 | 102 | family = options.family; |
88 | 103 | host = options.host; |
89 | 104 | port = options.port; |
90 | 105 | path = options.path; |
106 | bind = bind; | |
91 | 107 | sendname = false; |
92 | 108 | v6only = options.v6only; |
93 | 109 | nodelay = true; |
22 | 22 | end |
23 | 23 | end |
24 | 24 | return err, why |
25 | end | |
26 | ||
27 | function connection_methods:pollfd() | |
28 | if self.socket == nil then | |
29 | return nil | |
30 | end | |
31 | return self.socket:pollfd() | |
32 | end | |
33 | ||
34 | function connection_methods:events() | |
35 | if self.socket == nil then | |
36 | return nil | |
37 | end | |
38 | return self.socket:events() | |
39 | end | |
40 | ||
41 | function connection_methods:timeout() | |
42 | if self.socket == nil then | |
43 | return nil | |
44 | end | |
45 | return self.socket:timeout() | |
25 | 46 | end |
26 | 47 | |
27 | 48 | function connection_methods:onidle_() -- luacheck: ignore 212 |
0 | interface connection | |
1 | -- implements cqueues polling interface | |
2 | const pollfd: (self) -> (nil)|(integer) -- TODO: cqueues condition | |
3 | const events: (self) -> (nil)|(string|integer) | |
4 | const timeout: (self) -> (nil)|(number) | |
5 | ||
6 | const checktls: (self) -> (nil)|(any) -- TODO: luaossl SSL object | |
7 | const localname: (self) -> (integer, string, integer?)|(nil)|(nil, string, number) | |
8 | const peername: (self) -> (integer, string, integer?)|(nil)|(nil, string, number) | |
9 | const onidle: (self, (connection)->()) -> ((connection)->()) | |
10 | const connect: (self) -> (true)|(nil)|(nil, string, number) | |
11 | const flush: (self, number) -> (true)|(nil, string, number) | |
12 | const close: (self) -> (true) | |
13 | ||
14 | -- Not in connection_common.lua | |
15 | const version: integer | |
16 | -- XXX: needs circular require https://github.com/andremm/typedlua/issues/120 | |
17 | -- const new_stream: (self) -> (stream)|(nil) -- Note: in http2 this takes optional id argument | |
18 | -- const get_next_incoming_stream: (self, number?) -> (stream)|(nil)|(nil, string, number) | |
19 | const shutdown: (self) -> (true) | |
20 | end |
0 | --[[ | |
1 | Data structures useful for Cookies | |
2 | RFC 6265 | |
3 | ]] | |
4 | ||
5 | local http_patts = require "lpeg_patterns.http" | |
6 | local binaryheap = require "binaryheap" | |
7 | local http_util = require "http.util" | |
8 | local has_psl, psl = pcall(require, "psl") | |
9 | ||
10 | local EOF = require "lpeg".P(-1) | |
11 | local sane_cookie_date = http_patts.IMF_fixdate * EOF | |
12 | local Cookie = http_patts.Cookie * EOF | |
13 | local Set_Cookie = http_patts.Set_Cookie * EOF | |
14 | ||
15 | local function bake(name, value, expiry_time, domain, path, secure_only, http_only, same_site) | |
16 | -- This function is optimised to only do one concat operation at the end | |
17 | local cookie = { name, "=", value } | |
18 | local n = 3 | |
19 | if expiry_time and expiry_time ~= (1e999) then | |
20 | -- Prefer Expires over Max-age unless it is a deletion request | |
21 | if expiry_time == (-1e999) then | |
22 | n = n + 1 | |
23 | cookie[n] = "; Max-Age=0" | |
24 | else | |
25 | n = n + 2 | |
26 | cookie[n-1] = "; Expires=" | |
27 | cookie[n] = http_util.imf_date(expiry_time) | |
28 | end | |
29 | end | |
30 | if domain then | |
31 | n = n + 2 | |
32 | cookie[n-1] = "; Domain=" | |
33 | cookie[n] = domain | |
34 | end | |
35 | if path then | |
36 | n = n + 2 | |
37 | cookie[n-1] = "; Path=" | |
38 | cookie[n] = http_util.encodeURI(path) | |
39 | end | |
40 | if secure_only then | |
41 | n = n + 1 | |
42 | cookie[n] = "; Secure" | |
43 | end | |
44 | if http_only then | |
45 | n = n + 1 | |
46 | cookie[n] = "; HttpOnly" | |
47 | end | |
48 | -- https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2 | |
49 | if same_site then | |
50 | local v | |
51 | if same_site == "strict" then | |
52 | v = "; SameSite=Strict" | |
53 | elseif same_site == "lax" then | |
54 | v = "; SameSite=Lax" | |
55 | else | |
56 | error('invalid value for same_site, expected "strict" or "lax"') | |
57 | end | |
58 | n = n + 1 | |
59 | cookie[n] = v | |
60 | end | |
61 | return table.concat(cookie, "", 1, n) | |
62 | end | |
63 | ||
64 | local function parse_cookie(cookie_header) | |
65 | return Cookie:match(cookie_header) | |
66 | end | |
67 | ||
68 | local function parse_cookies(req_headers) | |
69 | local cookie_headers = req_headers:get_as_sequence("cookie") | |
70 | local cookies | |
71 | for i=1, cookie_headers.n do | |
72 | local header_cookies = parse_cookie(cookie_headers[i]) | |
73 | if header_cookies then | |
74 | if cookies then | |
75 | for k, v in pairs(header_cookies) do | |
76 | cookies[k] = v | |
77 | end | |
78 | else | |
79 | cookies = header_cookies | |
80 | end | |
81 | end | |
82 | end | |
83 | return cookies or {} | |
84 | end | |
85 | ||
86 | local function parse_setcookie(setcookie_header) | |
87 | return Set_Cookie:match(setcookie_header) | |
88 | end | |
89 | ||
90 | local canonicalise_host | |
91 | if has_psl then | |
92 | canonicalise_host = psl.str_to_utf8lower | |
93 | else | |
94 | canonicalise_host = function(str) | |
95 | -- fail on non-ascii chars | |
96 | if str:find("[^%p%w]") then | |
97 | return nil | |
98 | end | |
99 | return str:lower() | |
100 | end | |
101 | end | |
102 | ||
103 | --[[ | |
104 | A string domain-matches a given domain string if at least one of the following | |
105 | conditions hold: | |
106 | - The domain string and the string are identical. (Note that both the domain | |
107 | string and the string will have been canonicalized to lower case at this point.) | |
108 | - All of the following conditions hold: | |
109 | - The domain string is a suffix of the string. | |
110 | - The last character of the string that is not included in the domain string | |
111 | is a %x2E (".") character. | |
112 | - The string is a host name (i.e., not an IP address). | |
113 | ]] | |
114 | local function domain_match(domain_string, str) | |
115 | return str == domain_string or ( | |
116 | str:sub(-#domain_string) == domain_string | |
117 | and str:sub(-#domain_string-1, -#domain_string-1) == "." | |
118 | and not http_util.is_ip(str) | |
119 | ) | |
120 | end | |
121 | ||
122 | --[[ A request-path path-matches a given cookie-path if at least one of the following conditions holds: | |
123 | - The cookie-path and the request-path are identical. | |
124 | - The cookie-path is a prefix of the request-path, and the last | |
125 | character of the cookie-path is %x2F ("/"). | |
126 | - The cookie-path is a prefix of the request-path, and the first | |
127 | character of the request-path that is not included in the cookie-path is a %x2F ("/") character. | |
128 | ]] | |
129 | local function path_match(path, req_path) | |
130 | if path == req_path then | |
131 | return true | |
132 | elseif path == req_path:sub(1, #path) then | |
133 | if path:sub(-1, -1) == "/" then | |
134 | return true | |
135 | elseif req_path:sub(#path + 1, #path + 1) == "/" then | |
136 | return true | |
137 | end | |
138 | end | |
139 | return false | |
140 | end | |
141 | ||
142 | local cookie_methods = {} | |
143 | local cookie_mt = { | |
144 | __name = "http.cookie.cookie"; | |
145 | __index = cookie_methods; | |
146 | } | |
147 | ||
148 | function cookie_methods:netscape_format() | |
149 | return string.format("%s%s\t%s\t%s\t%s\t%d\t%s\t%s\n", | |
150 | self.http_only and "#HttpOnly_" or "", | |
151 | self.domain or "unknown", | |
152 | self.host_only and "TRUE" or "FALSE", | |
153 | self.path, | |
154 | self.secure_only and "TRUE" or "FALSE", | |
155 | math.max(0, math.min(2147483647, self.expiry_time)), | |
156 | self.name, | |
157 | self.value) | |
158 | end | |
159 | ||
160 | ||
161 | local default_psl | |
162 | if has_psl and psl.latest then | |
163 | default_psl = psl.latest() | |
164 | elseif has_psl then | |
165 | default_psl = psl.builtin() | |
166 | end | |
167 | local store_methods = { | |
168 | psl = default_psl; | |
169 | time = function() return os.time() end; | |
170 | max_cookie_length = (1e999); | |
171 | max_cookies = (1e999); | |
172 | max_cookies_per_domain = (1e999); | |
173 | } | |
174 | ||
175 | local store_mt = { | |
176 | __name = "http.cookie.store"; | |
177 | __index = store_methods; | |
178 | } | |
179 | ||
180 | local function new_store() | |
181 | return setmetatable({ | |
182 | domains = {}; | |
183 | expiry_heap = binaryheap.minUnique(); | |
184 | n_cookies = 0; | |
185 | n_cookies_per_domain = {}; | |
186 | }, store_mt) | |
187 | end | |
188 | ||
189 | local function add_to_store(self, cookie, req_is_http, now) | |
190 | if cookie.expiry_time < now then | |
191 | -- This was all just a trigger to delete the old cookie | |
192 | self:remove(cookie.domain, cookie.path, cookie.name) | |
193 | else | |
194 | local name = cookie.name | |
195 | local cookie_length = #name + 1 + #cookie.value | |
196 | if cookie_length > self.max_cookie_length then | |
197 | return false | |
198 | end | |
199 | ||
200 | local domain = cookie.domain | |
201 | local domain_cookies = self.domains[domain] | |
202 | local path_cookies | |
203 | local old_cookie | |
204 | if domain_cookies ~= nil then | |
205 | path_cookies = domain_cookies[cookie.path] | |
206 | if path_cookies ~= nil then | |
207 | old_cookie = path_cookies[name] | |
208 | end | |
209 | end | |
210 | ||
211 | -- If the cookie store contains a cookie with the same name, | |
212 | -- domain, and path as the newly created cookie: | |
213 | if old_cookie then | |
214 | -- If the newly created cookie was received from a "non-HTTP" | |
215 | -- API and the old-cookie's http-only-flag is set, abort these | |
216 | -- steps and ignore the newly created cookie entirely. | |
217 | if not req_is_http and old_cookie.http_only then | |
218 | return false | |
219 | end | |
220 | ||
221 | -- Update the creation-time of the newly created cookie to | |
222 | -- match the creation-time of the old-cookie. | |
223 | cookie.creation_time = old_cookie.creation_time | |
224 | ||
225 | -- Remove the old-cookie from the cookie store. | |
226 | self.expiry_heap:remove(old_cookie) | |
227 | else | |
228 | if self.n_cookies >= self.max_cookies or self.max_cookies_per_domain < 1 then | |
229 | return false | |
230 | end | |
231 | ||
232 | -- Cookie will be added | |
233 | if domain_cookies == nil then | |
234 | path_cookies = {} | |
235 | domain_cookies = { | |
236 | [cookie.path] = path_cookies; | |
237 | } | |
238 | self.domains[domain] = domain_cookies | |
239 | self.n_cookies_per_domain[domain] = 1 | |
240 | else | |
241 | local n_cookies_per_domain = self.n_cookies_per_domain[domain] | |
242 | if n_cookies_per_domain >= self.max_cookies_per_domain then | |
243 | return false | |
244 | end | |
245 | path_cookies = domain_cookies[cookie.path] | |
246 | if path_cookies == nil then | |
247 | path_cookies = {} | |
248 | domain_cookies[cookie.path] = path_cookies | |
249 | end | |
250 | self.n_cookies_per_domain[domain] = n_cookies_per_domain | |
251 | end | |
252 | ||
253 | self.n_cookies = self.n_cookies + 1 | |
254 | end | |
255 | ||
256 | path_cookies[name] = cookie | |
257 | self.expiry_heap:insert(cookie.expiry_time, cookie) | |
258 | end | |
259 | ||
260 | return true | |
261 | end | |
262 | ||
263 | function store_methods:store(req_domain, req_path, req_is_http, req_is_secure, req_site_for_cookies, name, value, params) | |
264 | assert(type(req_domain) == "string") | |
265 | assert(type(req_path) == "string") | |
266 | assert(type(name) == "string") | |
267 | assert(type(value) == "string") | |
268 | assert(type(params) == "table") | |
269 | ||
270 | local now = self.time() | |
271 | ||
272 | req_domain = assert(canonicalise_host(req_domain), "invalid request domain") | |
273 | ||
274 | -- Clean now so that we can assume there are no expired cookies in store | |
275 | self:clean() | |
276 | ||
277 | -- RFC 6265 Section 5.3 | |
278 | local cookie = setmetatable({ | |
279 | name = name; | |
280 | value = value; | |
281 | expiry_time = (1e999); | |
282 | domain = req_domain; | |
283 | path = nil; | |
284 | creation_time = now; | |
285 | last_access_time = now; | |
286 | persistent = false; | |
287 | host_only = true; | |
288 | secure_only = not not params.secure; | |
289 | http_only = not not params.httponly; | |
290 | same_site = nil; | |
291 | }, cookie_mt) | |
292 | ||
293 | -- If a cookie has both the Max-Age and the Expires attribute, the Max- | |
294 | -- Age attribute has precedence and controls the expiration date of the | |
295 | -- cookie. | |
296 | local max_age = params["max-age"] | |
297 | if max_age and max_age:find("^%-?[0-9]+$") then | |
298 | max_age = tonumber(max_age, 10) | |
299 | cookie.persistent = true | |
300 | if max_age <= 0 then | |
301 | cookie.expiry_time = (-1e999) | |
302 | else | |
303 | cookie.expiry_time = now + max_age | |
304 | end | |
305 | elseif params.expires then | |
306 | local date = sane_cookie_date:match(params.expires) | |
307 | if date then | |
308 | cookie.persistent = true | |
309 | cookie.expiry_time = os.time(date) | |
310 | end | |
311 | end | |
312 | ||
313 | local domain = params.domain or ""; | |
314 | ||
315 | -- If the first character of the attribute-value string is %x2E ("."): | |
316 | -- Let cookie-domain be the attribute-value without the leading %x2E (".") character. | |
317 | if domain:sub(1, 1) == "." then | |
318 | domain = domain:sub(2) | |
319 | end | |
320 | ||
321 | -- Convert the cookie-domain to lower case. | |
322 | domain = canonicalise_host(domain) | |
323 | if not domain then | |
324 | return false | |
325 | end | |
326 | ||
327 | -- If the user agent is configured to reject "public suffixes" and | |
328 | -- the domain-attribute is a public suffix: | |
329 | if domain ~= "" and self.psl and self.psl:is_public_suffix(domain) then | |
330 | -- If the domain-attribute is identical to the canonicalized request-host: | |
331 | if domain == req_domain then | |
332 | -- Let the domain-attribute be the empty string. | |
333 | domain = "" | |
334 | else | |
335 | -- Ignore the cookie entirely and abort these steps. | |
336 | return false | |
337 | end | |
338 | end | |
339 | ||
340 | -- If the domain-attribute is non-empty: | |
341 | if domain ~= "" then | |
342 | -- If the canonicalized request-host does not domain-match the | |
343 | -- domain-attribute: | |
344 | if not domain_match(domain, req_domain) then | |
345 | -- Ignore the cookie entirely and abort these steps. | |
346 | return false | |
347 | else | |
348 | -- Set the cookie's host-only-flag to false. | |
349 | cookie.host_only = false | |
350 | -- Set the cookie's domain to the domain-attribute. | |
351 | cookie.domain = domain | |
352 | end | |
353 | end | |
354 | ||
355 | -- RFC 6265 Section 5.2.4 | |
356 | -- If the attribute-value is empty or if the first character of the | |
357 | -- attribute-value is not %x2F ("/") | |
358 | local path = params.path or "" | |
359 | if path:sub(1, 1) ~= "/" then | |
360 | -- Let cookie-path be the default-path. | |
361 | local default_path | |
362 | -- RFC 6265 Section 5.1.4 | |
363 | -- Let uri-path be the path portion of the request-uri if such a | |
364 | -- portion exists (and empty otherwise). For example, if the | |
365 | -- request-uri contains just a path (and optional query string), | |
366 | -- then the uri-path is that path (without the %x3F ("?") character | |
367 | -- or query string), and if the request-uri contains a full | |
368 | -- absoluteURI, the uri-path is the path component of that URI. | |
369 | ||
370 | -- If the uri-path is empty or if the first character of the uri- | |
371 | -- path is not a %x2F ("/") character, output %x2F ("/") and skip | |
372 | -- the remaining steps. | |
373 | -- If the uri-path contains no more than one %x2F ("/") character, | |
374 | -- output %x2F ("/") and skip the remaining step. | |
375 | if req_path:sub(1, 1) ~= "/" or not req_path:find("/", 2, true) then | |
376 | default_path = "/" | |
377 | else | |
378 | -- Output the characters of the uri-path from the first character up | |
379 | -- to, but not including, the right-most %x2F ("/"). | |
380 | default_path = req_path:match("^([^?]*)/") | |
381 | end | |
382 | cookie.path = default_path | |
383 | else | |
384 | cookie.path = path | |
385 | end | |
386 | ||
387 | -- If the scheme component of the request-uri does not denote a | |
388 | -- "secure" protocol (as defined by the user agent), and the | |
389 | -- cookie's secure-only-flag is true, then abort these steps and | |
390 | -- ignore the cookie entirely. | |
391 | if not req_is_secure and cookie.secure_only then | |
392 | return false | |
393 | end | |
394 | ||
395 | -- If the cookie was received from a "non-HTTP" API and the | |
396 | -- cookie's http-only-flag is set, abort these steps and ignore the | |
397 | -- cookie entirely. | |
398 | if not req_is_http and cookie.http_only then | |
399 | return false | |
400 | end | |
401 | ||
402 | -- If the cookie's secure-only-flag is not set, and the scheme | |
403 | -- component of request-uri does not denote a "secure" protocol, | |
404 | if not req_is_secure and not cookie.secure_only then | |
405 | -- then abort these steps and ignore the cookie entirely if the | |
406 | -- cookie store contains one or more cookies that meet all of the | |
407 | -- following criteria: | |
408 | for d, domain_cookies in pairs(self.domains) do | |
409 | -- See '3' below | |
410 | if domain_match(cookie.domain, d) or domain_match(d, cookie.domain) then | |
411 | for p, path_cookies in pairs(domain_cookies) do | |
412 | local cmp_cookie = path_cookies[name] | |
413 | -- 1. Their name matches the name of the newly-created cookie. | |
414 | if cmp_cookie | |
415 | -- 2. Their secure-only-flag is true. | |
416 | and cmp_cookie.secure_only | |
417 | -- 3. Their domain domain-matches the domain of the newly-created | |
418 | -- cookie, or vice-versa. | |
419 | -- Note: already checked above in domain_match | |
420 | -- 4. The path of the newly-created cookie path-matches the path | |
421 | -- of the existing cookie. | |
422 | and path_match(p, cookie.path) | |
423 | then | |
424 | return false | |
425 | end | |
426 | end | |
427 | end | |
428 | end | |
429 | end | |
430 | ||
431 | -- If the cookie-attribute-list contains an attribute with an | |
432 | -- attribute-name of "SameSite", set the cookie's same-site-flag to | |
433 | -- attribute-value (i.e. either "Strict" or "Lax"). Otherwise, set | |
434 | -- the cookie's same-site-flag to "None". | |
435 | local same_site = params.samesite | |
436 | if same_site then | |
437 | same_site = same_site:lower() | |
438 | if same_site == "lax" or same_site == "strict" then | |
439 | -- If the cookie's "same-site-flag" is not "None", and the cookie | |
440 | -- is being set from a context whose "site for cookies" is not an | |
441 | -- exact match for request-uri's host's registered domain, then | |
442 | -- abort these steps and ignore the newly created cookie entirely. | |
443 | if req_domain ~= req_site_for_cookies then | |
444 | return false | |
445 | end | |
446 | ||
447 | cookie.same_site = same_site | |
448 | end | |
449 | end | |
450 | ||
451 | -- If the cookie-name begins with a case-sensitive match for the | |
452 | -- string "__Secure-", abort these steps and ignore the cookie | |
453 | -- entirely unless the cookie's secure-only-flag is true. | |
454 | if not cookie.secure_only and name:sub(1, 9) == "__Secure-" then | |
455 | return false | |
456 | end | |
457 | ||
458 | -- If the cookie-name begins with a case-sensitive match for the | |
459 | -- string "__Host-", abort these steps and ignore the cookie | |
460 | -- entirely unless the cookie meets all the following criteria: | |
461 | -- 1. The cookie's secure-only-flag is true. | |
462 | -- 2. The cookie's host-only-flag is true. | |
463 | -- 3. The cookie-attribute-list contains an attribute with an | |
464 | -- attribute-name of "Path", and the cookie's path is "/". | |
465 | if not (cookie.secure_only and cookie.host_only and cookie.path == "/") and name:sub(1, 7) == "__Host-" then | |
466 | return false | |
467 | end | |
468 | ||
469 | return add_to_store(self, cookie, req_is_http, now) | |
470 | end | |
471 | ||
472 | function store_methods:store_from_request(req_headers, resp_headers, req_host, req_site_for_cookies) | |
473 | local set_cookies = resp_headers:get_as_sequence("set-cookie") | |
474 | local n = set_cookies.n | |
475 | if n == 0 then | |
476 | return true | |
477 | end | |
478 | ||
479 | local req_scheme = req_headers:get(":scheme") | |
480 | local req_authority = req_headers:get(":authority") | |
481 | local req_domain | |
482 | if req_authority then | |
483 | req_domain = http_util.split_authority(req_authority, req_scheme) | |
484 | else -- :authority can be missing for HTTP/1.0 requests; fall back to req_host | |
485 | req_domain = req_host | |
486 | end | |
487 | local req_path = req_headers:get(":path") | |
488 | local req_is_secure = req_scheme == "https" | |
489 | ||
490 | for i=1, n do | |
491 | local name, value, params = parse_setcookie(set_cookies[i]) | |
492 | if name then | |
493 | self:store(req_domain, req_path, true, req_is_secure, req_site_for_cookies, name, value, params) | |
494 | end | |
495 | end | |
496 | return true | |
497 | end | |
498 | ||
499 | function store_methods:get(domain, path, name) | |
500 | assert(type(domain) == "string") | |
501 | assert(type(path) == "string") | |
502 | assert(type(name) == "string") | |
503 | ||
504 | -- Clean now so that we can assume there are no expired cookies in store | |
505 | self:clean() | |
506 | ||
507 | local domain_cookies = self.domains[domain] | |
508 | if domain_cookies then | |
509 | local path_cookies = domain_cookies[path] | |
510 | if path_cookies then | |
511 | local cookie = path_cookies[name] | |
512 | if cookie then | |
513 | return cookie.value | |
514 | end | |
515 | end | |
516 | end | |
517 | return nil | |
518 | end | |
519 | ||
520 | function store_methods:remove(domain, path, name) | |
521 | assert(type(domain) == "string") | |
522 | assert(type(path) == "string" or (path == nil and name == nil)) | |
523 | assert(type(name) == "string" or name == nil) | |
524 | local domain_cookies = self.domains[domain] | |
525 | if not domain_cookies then | |
526 | return | |
527 | end | |
528 | local n_cookies = self.n_cookies | |
529 | if path == nil then | |
530 | -- Delete whole domain | |
531 | for _, path_cookies in pairs(domain_cookies) do | |
532 | for _, cookie in pairs(path_cookies) do | |
533 | self.expiry_heap:remove(cookie) | |
534 | n_cookies = n_cookies - 1 | |
535 | end | |
536 | end | |
537 | self.domains[domain] = nil | |
538 | self.n_cookies_per_domain[domain] = nil | |
539 | else | |
540 | local path_cookies = domain_cookies[path] | |
541 | if path_cookies then | |
542 | if name == nil then | |
543 | -- Delete all names at path | |
544 | local domains_deleted = 0 | |
545 | for _, cookie in pairs(path_cookies) do | |
546 | self.expiry_heap:remove(cookie) | |
547 | domains_deleted = domains_deleted + 1 | |
548 | end | |
549 | domain_cookies[path] = nil | |
550 | n_cookies = n_cookies - domains_deleted | |
551 | if next(domain_cookies) == nil then | |
552 | self.domains[domain] = nil | |
553 | self.n_cookies_per_domain[domain] = nil | |
554 | else | |
555 | self.n_cookies_per_domain[domain] = self.n_cookies_per_domain[domain] - domains_deleted | |
556 | end | |
557 | else | |
558 | -- Delete singular cookie | |
559 | local cookie = path_cookies[name] | |
560 | if cookie then | |
561 | self.expiry_heap:remove(cookie) | |
562 | n_cookies = n_cookies - 1 | |
563 | self.n_cookies_per_domain[domain] = self.n_cookies_per_domain[domain] - 1 | |
564 | path_cookies[name] = nil | |
565 | if next(path_cookies) == nil then | |
566 | domain_cookies[path] = nil | |
567 | if next(domain_cookies) == nil then | |
568 | self.domains[domain] = nil | |
569 | self.n_cookies_per_domain[domain] = nil | |
570 | end | |
571 | end | |
572 | end | |
573 | end | |
574 | end | |
575 | end | |
576 | self.n_cookies = n_cookies | |
577 | end | |
578 | ||
579 | --[[ The user agent SHOULD sort the cookie-list in the following order: | |
580 | - Cookies with longer paths are listed before cookies with shorter paths. | |
581 | - Among cookies that have equal-length path fields, cookies with earlier | |
582 | creation-times are listed before cookies with later creation-times. | |
583 | ]] | |
584 | local function cookie_cmp(a, b) | |
585 | if #a.path ~= #b.path then | |
586 | return #a.path > #b.path | |
587 | end | |
588 | if a.creation_time ~= b.creation_time then | |
589 | return a.creation_time < b.creation_time | |
590 | end | |
591 | -- Now order doesn't matter, but have to be consistent for table.sort: | |
592 | -- use the fields that make a cookie unique | |
593 | if a.domain ~= b.domain then | |
594 | return a.domain < b.domain | |
595 | end | |
596 | return a.name < b.name | |
597 | end | |
598 | ||
599 | local function cookie_match(cookie, req_domain, req_is_http, req_is_secure, req_is_safe_method, req_site_for_cookies, req_is_top_level) | |
600 | -- req_domain should be already canonicalized | |
601 | ||
602 | if cookie.host_only then -- Either: | |
603 | -- The cookie's host-only-flag is true and the canonicalized | |
604 | -- request-host is identical to the cookie's domain. | |
605 | if cookie.domain ~= req_domain then | |
606 | return false | |
607 | end | |
608 | end | |
609 | -- Or: | |
610 | -- The cookie's host-only-flag is false and the canonicalized | |
611 | -- request-host domain-matches the cookie's domain. | |
612 | ||
613 | -- already done domain_match and path_match | |
614 | ||
615 | -- If the cookie's http-only-flag is true, then exclude the | |
616 | -- cookie if the cookie-string is being generated for a "non- | |
617 | -- HTTP" API (as defined by the user agent). | |
618 | if cookie.http_only and not req_is_http then | |
619 | return false | |
620 | end | |
621 | ||
622 | if cookie.secure_only and not req_is_secure then | |
623 | return false | |
624 | end | |
625 | ||
626 | -- If the cookie's same-site-flag is not "None", and the HTTP | |
627 | -- request is cross-site (as defined in Section 5.2) then exclude | |
628 | -- the cookie unless all of the following statements hold: | |
629 | if cookie.same_site and req_site_for_cookies ~= req_domain and not ( | |
630 | -- 1. The same-site-flag is "Lax" | |
631 | cookie.same_site == "lax" | |
632 | -- 2. The HTTP request's method is "safe". | |
633 | and req_is_safe_method | |
634 | -- 3. The HTTP request's target browsing context is a top-level browsing context. | |
635 | and req_is_top_level | |
636 | ) then | |
637 | return false | |
638 | end | |
639 | ||
640 | return true | |
641 | end | |
642 | ||
643 | function store_methods:lookup(req_domain, req_path, req_is_http, req_is_secure, req_is_safe_method, req_site_for_cookies, req_is_top_level, max_cookie_length) | |
644 | req_domain = assert(type(req_domain) == "string" and canonicalise_host(req_domain), "invalid request domain") | |
645 | assert(type(req_path) == "string") | |
646 | if max_cookie_length ~= nil then | |
647 | assert(type(max_cookie_length) == "number") | |
648 | else | |
649 | max_cookie_length = self.max_cookie_length | |
650 | end | |
651 | ||
652 | local now = self.time() | |
653 | ||
654 | -- Clean now so that we can assume there are no expired cookies in store | |
655 | self:clean() | |
656 | ||
657 | local list = {} | |
658 | local n = 0 | |
659 | for domain, domain_cookies in pairs(self.domains) do | |
660 | if domain_match(domain, req_domain) then | |
661 | for path, path_cookies in pairs(domain_cookies) do | |
662 | if path_match(path, req_path) then | |
663 | for _, cookie in pairs(path_cookies) do | |
664 | if cookie_match(cookie, req_domain, req_is_http, req_is_secure, req_is_safe_method, req_site_for_cookies, req_is_top_level) then | |
665 | cookie.last_access_time = now | |
666 | n = n + 1 | |
667 | list[n] = cookie | |
668 | end | |
669 | end | |
670 | end | |
671 | end | |
672 | end | |
673 | end | |
674 | table.sort(list, cookie_cmp) | |
675 | local cookie_length = -2 -- length of separator ("; ") | |
676 | for i=1, n do | |
677 | local cookie = list[i] | |
678 | -- TODO: validate? | |
679 | local cookie_pair = cookie.name .. "=" .. cookie.value | |
680 | local new_length = cookie_length + #cookie_pair + 2 | |
681 | if new_length > max_cookie_length then | |
682 | break | |
683 | end | |
684 | list[i] = cookie_pair | |
685 | cookie_length = new_length | |
686 | end | |
687 | return table.concat(list, "; ", 1, n) | |
688 | end | |
689 | ||
690 | function store_methods:lookup_for_request(req_headers, req_host, req_site_for_cookies, req_is_top_level, max_cookie_length) | |
691 | local req_method = req_headers:get(":method") | |
692 | if req_method == "CONNECT" then | |
693 | return "" | |
694 | end | |
695 | local req_scheme = req_headers:get(":scheme") | |
696 | local req_authority = req_headers:get(":authority") | |
697 | local req_domain | |
698 | if req_authority then | |
699 | req_domain = http_util.split_authority(req_authority, req_scheme) | |
700 | else -- :authority can be missing for HTTP/1.0 requests; fall back to req_host | |
701 | req_domain = req_host | |
702 | end | |
703 | local req_path = req_headers:get(":path") | |
704 | local req_is_secure = req_scheme == "https" | |
705 | local req_is_safe_method = http_util.is_safe_method(req_method) | |
706 | return self:lookup(req_domain, req_path, true, req_is_secure, req_is_safe_method, req_site_for_cookies, req_is_top_level, max_cookie_length) | |
707 | end | |
708 | ||
709 | function store_methods:clean_due() | |
710 | local next_expiring = self.expiry_heap:peek() | |
711 | if not next_expiring then | |
712 | return (1e999) | |
713 | end | |
714 | return next_expiring.expiry_time | |
715 | end | |
716 | ||
717 | function store_methods:clean() | |
718 | local now = self.time() | |
719 | while self:clean_due() < now do | |
720 | local cookie = self.expiry_heap:pop() | |
721 | self.n_cookies = self.n_cookies - 1 | |
722 | local domain = cookie.domain | |
723 | local domain_cookies = self.domains[domain] | |
724 | if domain_cookies then | |
725 | self.n_cookies_per_domain[domain] = self.n_cookies_per_domain[domain] - 1 | |
726 | local path_cookies = domain_cookies[cookie.path] | |
727 | if path_cookies then | |
728 | path_cookies[cookie.name] = nil | |
729 | if next(path_cookies) == nil then | |
730 | domain_cookies[cookie.path] = nil | |
731 | if next(domain_cookies) == nil then | |
732 | self.domains[domain] = nil | |
733 | self.n_cookies_per_domain[domain] = nil | |
734 | end | |
735 | end | |
736 | end | |
737 | end | |
738 | end | |
739 | return true | |
740 | end | |
741 | ||
742 | -- Files in 'netscape format' | |
743 | -- curl's lib/cookie.c is best reference for the format | |
744 | local function parse_netscape_format(line, now) | |
745 | if line == "" then | |
746 | return | |
747 | end | |
748 | local i = 1 | |
749 | local http_only = false | |
750 | if line:sub(1, 1) == "#" then | |
751 | if line:sub(1, 10) == "#HttpOnly_" then | |
752 | http_only = true | |
753 | i = 11 | |
754 | else | |
755 | return | |
756 | end | |
757 | end | |
758 | ||
759 | local domain, host_only, path, secure_only, expiry, name, value = | |
760 | line:match("^%.?([^\t]+)\t([^\t]+)\t([^\t]+)\t([^\t]+)\t(%d+)\t([^\t]+)\t(.+)", i) | |
761 | if not domain then | |
762 | return | |
763 | end | |
764 | domain = canonicalise_host(domain) | |
765 | if domain == nil then | |
766 | return | |
767 | end | |
768 | ||
769 | if host_only == "TRUE" then | |
770 | host_only = true | |
771 | elseif host_only == "FALSE" then | |
772 | host_only = false | |
773 | else | |
774 | return | |
775 | end | |
776 | ||
777 | if secure_only == "TRUE" then | |
778 | secure_only = true | |
779 | elseif secure_only == "FALSE" then | |
780 | secure_only = false | |
781 | else | |
782 | return | |
783 | end | |
784 | ||
785 | expiry = tonumber(expiry, 10) | |
786 | ||
787 | return setmetatable({ | |
788 | name = name; | |
789 | value = value; | |
790 | expiry_time = expiry; | |
791 | domain = domain; | |
792 | path = path; | |
793 | creation_time = now; | |
794 | last_access_time = now; | |
795 | persistent = expiry == 0; | |
796 | host_only = host_only; | |
797 | secure_only = secure_only; | |
798 | http_only = http_only; | |
799 | same_site = nil; | |
800 | }, cookie_mt) | |
801 | end | |
802 | ||
803 | function store_methods:load_from_file(file) | |
804 | local now = self.time() | |
805 | ||
806 | -- Clean now so that we don't hit storage limits | |
807 | self:clean() | |
808 | ||
809 | local cookies = {} | |
810 | local n = 0 | |
811 | while true do | |
812 | local line, err, errno = file:read() | |
813 | if not line then | |
814 | if err ~= nil then | |
815 | return nil, err, errno | |
816 | end | |
817 | break | |
818 | end | |
819 | local cookie = parse_netscape_format(line, now) | |
820 | if cookie then | |
821 | n = n + 1 | |
822 | cookies[n] = cookie | |
823 | end | |
824 | end | |
825 | for i=1, n do | |
826 | local cookie = cookies[i] | |
827 | add_to_store(self, cookie, cookie.http_only, now) | |
828 | end | |
829 | return true | |
830 | end | |
831 | ||
832 | function store_methods:save_to_file(file) | |
833 | do -- write a preamble | |
834 | local ok, err, errno = file:write [[ | |
835 | # Netscape HTTP Cookie File | |
836 | # This file was generated by lua-http | |
837 | ||
838 | ]] | |
839 | if not ok then | |
840 | return nil, err, errno | |
841 | end | |
842 | end | |
843 | for _, domain_cookies in pairs(self.domains) do | |
844 | for _, path_cookies in pairs(domain_cookies) do | |
845 | for _, cookie in pairs(path_cookies) do | |
846 | local ok, err, errno = file:write(cookie:netscape_format()) | |
847 | if not ok then | |
848 | return nil, err, errno | |
849 | end | |
850 | end | |
851 | end | |
852 | end | |
853 | return true | |
854 | end | |
855 | ||
856 | return { | |
857 | bake = bake; | |
858 | ||
859 | parse_cookie = parse_cookie; | |
860 | parse_cookies = parse_cookies; | |
861 | parse_setcookie = parse_setcookie; | |
862 | ||
863 | new_store = new_store; | |
864 | store_mt = store_mt; | |
865 | store_methods = store_methods; | |
866 | } |
0 | require "http.headers" | |
1 | ||
2 | bake: (string, string, number?, string?, string?, true?, true?, string?) -> (string) | |
3 | ||
4 | parse_cookie: (string) -> ({string:string}) | |
5 | parse_cookies: (headers) -> ({{string:string}}) | |
6 | parse_setcookie: (string) -> (string, string, {string:string}) | |
7 | ||
8 | interface cookie_store | |
9 | psl: any|false -- TODO: use psl type | |
10 | time: () -> (number) | |
11 | max_cookie_length: number | |
12 | max_cookies: number | |
13 | max_cookies_per_domain: number | |
14 | ||
15 | const store: (self, string, string, boolean, boolean, string?, string, string, {string:string}) -> (boolean) | |
16 | const store_from_request: (self, headers, headers, string, string?) -> (boolean) | |
17 | const get: (self, string, string, string) -> (string) | |
18 | const remove: (self, string, string?, string?) -> () | |
19 | const lookup: (self, string, string, boolean?, boolean?, boolean?, string?, boolean?, integer?) -> () | |
20 | const lookup_for_request: (self, headers, string, string?, boolean?, integer?) -> () | |
21 | const clean_due: (self) -> (number) | |
22 | const clean: (self) -> (boolean) | |
23 | const load_from_file: (self, file) -> (true) | (nil, string, integer) | |
24 | const save_to_file: (self, file) -> (true) | (nil, string, integer) | |
25 | end | |
26 | ||
27 | new_store: () -> (cookie_store) |
49 | 49 | -- A function that will be called if the connection becomes idle |
50 | 50 | onidle_ = nil; |
51 | 51 | }, connection_mt) |
52 | socket:setvbuf("full", math.huge) -- 'infinite' buffering; no write locks needed | |
52 | 53 | socket:setmode("b", "bf") |
53 | 54 | socket:onerror(onerror) |
54 | 55 | return self |
56 | end | |
57 | ||
58 | function connection_methods:setmaxline(read_length) | |
59 | if self.socket == nil then | |
60 | return nil | |
61 | end | |
62 | self.socket:setmaxline(read_length) | |
63 | return true | |
55 | 64 | end |
56 | 65 | |
57 | 66 | function connection_methods:clearerr(...) |
108 | 117 | -- this function *should never throw* |
109 | 118 | function connection_methods:get_next_incoming_stream(timeout) |
110 | 119 | assert(self.type == "server") |
111 | local deadline = timeout and (monotime()+timeout) | |
112 | 120 | -- Make sure we don't try and read before the previous request has been fully read |
113 | repeat | |
114 | -- Wait until previous requests have been fully read | |
115 | if self.req_locked then | |
116 | if not self.req_cond:wait(deadline and deadline - monotime()) then | |
117 | return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT | |
118 | end | |
119 | assert(self.req_locked == nil) | |
120 | end | |
121 | if self.socket == nil then | |
122 | return nil | |
123 | end | |
124 | -- Wait for at least one byte | |
125 | local ok, err, errno = self.socket:fill(1, deadline and deadline-monotime()) | |
126 | if not ok then | |
127 | return nil, err, errno | |
128 | end | |
129 | until not self.req_locked | |
121 | if self.req_locked then | |
122 | local deadline = timeout and monotime()+timeout | |
123 | assert(cqueues.running(), "cannot wait for condition if not within a cqueues coroutine") | |
124 | if cqueues.poll(self.req_cond, timeout) == timeout then | |
125 | return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT | |
126 | end | |
127 | timeout = deadline and deadline-monotime() | |
128 | assert(self.req_locked == nil) | |
129 | end | |
130 | if self.socket == nil then | |
131 | return nil | |
132 | end | |
133 | -- Wait for at least one byte | |
134 | local ok, err, errno = self.socket:fill(1, 0) | |
135 | if not ok then | |
136 | if errno == ce.ETIMEDOUT then | |
137 | local deadline = timeout and monotime()+timeout | |
138 | if cqueues.poll(self.socket, timeout) ~= timeout then | |
139 | return self:get_next_incoming_stream(deadline and deadline-monotime()) | |
140 | end | |
141 | end | |
142 | return nil, err, errno | |
143 | end | |
130 | 144 | local stream = h1_stream.new(self) |
131 | 145 | self.pipeline:push(stream) |
132 | 146 | self.req_locked = stream |
144 | 158 | line, err, errno = self.socket:xread("*L", deadline and (deadline-monotime())) |
145 | 159 | end |
146 | 160 | if line == nil then |
147 | if err == nil and self.socket:pending() > 0 then | |
148 | self.socket:seterror("r", ce.EILSEQ) | |
149 | if preline then | |
150 | local ok, errno2 = self.socket:unget(preline) | |
151 | if not ok then | |
152 | return nil, onerror(self.socket, "unget", errno2) | |
153 | end | |
154 | end | |
155 | return nil, onerror(self.socket, "read_request_line", ce.EILSEQ) | |
156 | end | |
157 | return nil, err, errno | |
158 | end | |
159 | local method, path, httpversion = line:match("^(%w+) (%S+) HTTP/(1%.[01])\r\n$") | |
161 | if preline then | |
162 | local ok, errno2 = self.socket:unget(preline) | |
163 | if not ok then | |
164 | return nil, onerror(self.socket, "unget", errno2) | |
165 | end | |
166 | end | |
167 | return nil, err, errno | |
168 | end | |
169 | local method, target, httpversion = line:match("^(%w+) (%S+) HTTP/(1%.[01])\r\n$") | |
160 | 170 | if not method then |
161 | 171 | self.socket:seterror("r", ce.EILSEQ) |
162 | 172 | local ok, errno2 = self.socket:unget(line) |
172 | 182 | return nil, onerror(self.socket, "read_request_line", ce.EILSEQ) |
173 | 183 | end |
174 | 184 | httpversion = httpversion == "1.0" and 1.0 or 1.1 -- Avoid tonumber() due to locale issues |
175 | return method, path, httpversion | |
185 | return method, target, httpversion | |
176 | 186 | end |
177 | 187 | |
178 | 188 | function connection_methods:read_status_line(timeout) |
179 | 189 | local line, err, errno = self.socket:xread("*L", timeout) |
180 | 190 | if line == nil then |
181 | if err == nil and self.socket:pending() > 0 then | |
182 | self.socket:seterror("r", ce.EILSEQ) | |
183 | return nil, onerror(self.socket, "read_status_line", ce.EILSEQ) | |
184 | end | |
185 | 191 | return nil, err, errno |
186 | 192 | end |
187 | 193 | local httpversion, status_code, reason_phrase = line:match("^HTTP/(1%.[01]) (%d%d%d) (.*)\r\n$") |
273 | 279 | local deadline = timeout and (monotime()+timeout) |
274 | 280 | local chunk_header, err, errno = self.socket:xread("*L", timeout) |
275 | 281 | if chunk_header == nil then |
276 | if err == nil and self.socket:pending() > 0 then | |
277 | self.socket:seterror("r", ce.EILSEQ) | |
278 | return nil, onerror(self.socket, "read_body_chunk", ce.EILSEQ) | |
279 | end | |
280 | 282 | return nil, err, errno |
281 | 283 | end |
282 | 284 | local chunk_size, chunk_ext = chunk_header:match("^(%x+) *(.-)\r\n") |
299 | 301 | -- you MUST read trailers after this! |
300 | 302 | return false, chunk_ext |
301 | 303 | else |
302 | local ok, err2, errno2 = self.socket:fill(chunk_size+2, deadline and deadline-monotime()) | |
304 | local ok, err2, errno2 = self.socket:fill(chunk_size+2, 0) | |
303 | 305 | if not ok then |
304 | 306 | local unget_ok1, unget_errno1 = self.socket:unget(chunk_header) |
305 | 307 | if not unget_ok1 then |
306 | 308 | return nil, onerror(self.socket, "unget", unget_errno1) |
309 | end | |
310 | if errno2 == ce.ETIMEDOUT then | |
311 | timeout = deadline and deadline-monotime() | |
312 | if cqueues.poll(self.socket, timeout) ~= timeout then | |
313 | -- retry | |
314 | return self:read_body_chunk(deadline and deadline-monotime()) | |
315 | end | |
316 | elseif err2 == nil then | |
317 | self.socket:seterror("r", ce.EILSEQ) | |
318 | return nil, onerror(self.socket, "read_body_chunk", ce.EILSEQ) | |
307 | 319 | end |
308 | 320 | return nil, err2, errno2 |
309 | 321 | end |
331 | 343 | end |
332 | 344 | end |
333 | 345 | |
334 | function connection_methods:write_request_line(method, path, httpversion, timeout) | |
346 | function connection_methods:write_request_line(method, target, httpversion, timeout) | |
335 | 347 | assert(method:match("^[^ \r\n]+$")) |
336 | assert(path:match("^[^ \r\n]+$")) | |
348 | assert(target:match("^[^ \r\n]+$")) | |
337 | 349 | assert(httpversion == 1.0 or httpversion == 1.1) |
338 | local line = string.format("%s %s HTTP/%1.1f\r\n", method, path, httpversion) | |
350 | local line = string.format("%s %s HTTP/%s\r\n", method, target, httpversion == 1.0 and "1.0" or "1.1") | |
339 | 351 | local ok, err, errno = self.socket:xwrite(line, "f", timeout) |
340 | 352 | if not ok then |
341 | 353 | return nil, err, errno |
347 | 359 | assert(httpversion == 1.0 or httpversion == 1.1) |
348 | 360 | assert(status_code:match("^[1-9]%d%d$"), "invalid status code") |
349 | 361 | assert(type(reason_phrase) == "string" and reason_phrase:match("^[^\r\n]*$"), "invalid reason phrase") |
350 | local line = string.format("HTTP/%1.1f %s %s\r\n", httpversion, status_code, reason_phrase) | |
362 | local line = string.format("HTTP/%s %s %s\r\n", httpversion == 1.0 and "1.0" or "1.1", status_code, reason_phrase) | |
351 | 363 | local ok, err, errno = self.socket:xwrite(line, "f", timeout) |
352 | 364 | if not ok then |
353 | 365 | return nil, err, errno |
4 | 4 | ["100"] = "Continue"; |
5 | 5 | ["101"] = "Switching Protocols"; |
6 | 6 | ["102"] = "Processing"; |
7 | ["103"] = "Early Hints"; | |
7 | 8 | |
8 | 9 | ["200"] = "OK"; |
9 | 10 | ["201"] = "Created"; |
0 | reason_phrases: {string:string} |
45 | 45 | |
46 | 46 | local stream_methods = { |
47 | 47 | use_zlib = has_zlib; |
48 | max_header_lines = 100; | |
48 | 49 | } |
49 | 50 | for k,v in pairs(stream_common.methods) do |
50 | 51 | stream_methods[k] = v |
72 | 73 | |
73 | 74 | req_method = nil; -- string |
74 | 75 | peer_version = nil; -- 1.0 or 1.1 |
76 | has_main_headers = false; | |
77 | headers_in_progress = nil; | |
75 | 78 | headers_fifo = new_fifo(); |
76 | 79 | headers_cond = cc.new(); |
77 | body_buffer = nil; | |
80 | chunk_fifo = new_fifo(); | |
81 | chunk_cond = cc.new(); | |
78 | 82 | body_write_type = nil; -- "closed", "chunked", "length" or "missing" |
79 | 83 | body_write_left = nil; -- integer: only set when body_write_type == "length" |
80 | 84 | body_write_deflate_encoding = nil; |
159 | 163 | local server_error_headers = new_headers() |
160 | 164 | server_error_headers:append(":status", "503") |
161 | 165 | function stream_methods:shutdown() |
162 | if self.type == "server" and (self.state == "open" or self.state == "half closed (remote)") then | |
163 | -- Make sure we're at the front of the pipeline | |
164 | if self.connection.pipeline:peek() ~= self then | |
165 | self.pipeline_cond:wait() -- wait without a timeout should never fail | |
166 | assert(self.connection.pipeline:peek() == self) | |
167 | end | |
168 | if not self.body_write_type then | |
169 | -- Can send an automatic error response | |
170 | local error_headers | |
171 | if self.connection:error("r") == ce.EILSEQ then | |
172 | error_headers = bad_request_headers | |
173 | else | |
174 | error_headers = server_error_headers | |
175 | end | |
176 | self:write_headers(error_headers, true) | |
177 | end | |
178 | end | |
179 | if self.state == "half closed (local)" then | |
180 | -- we'd like to finishing reading any remaining response so that we get out of the way | |
181 | local start = self.stats_recv | |
182 | repeat | |
183 | -- don't bother continuing if we're reading until connection is closed | |
184 | if self.body_read_type == "close" then | |
185 | break | |
186 | end | |
187 | if self:get_next_chunk(0) == nil then | |
188 | break -- ignore errors | |
189 | end | |
190 | until (self.stats_recv - start) >= clean_shutdown_limit | |
191 | -- state may still be "half closed (local)" (but hopefully moved on to "closed") | |
192 | end | |
193 | 166 | if self.state == "idle" then |
194 | 167 | self:set_state("closed") |
195 | elseif self.state ~= "closed" then | |
196 | -- This is a bad situation: we are trying to shutdown a connection that has the body partially sent | |
197 | -- Especially in the case of Connection: close, where closing indicates EOF, | |
198 | -- this will result in a client only getting a partial response. | |
199 | -- Could also end up here if a client sending headers fails. | |
200 | if self.connection.socket then | |
201 | self.connection.socket:shutdown() | |
202 | end | |
203 | self:set_state("closed") | |
168 | else | |
169 | if self.type == "server" and (self.state == "open" or self.state == "half closed (remote)") then | |
170 | -- Make sure we're at the front of the pipeline | |
171 | if self.connection.pipeline:peek() ~= self then | |
172 | -- FIXME: shouldn't have time-taking operation here | |
173 | self.pipeline_cond:wait() -- wait without a timeout should never fail | |
174 | assert(self.connection.pipeline:peek() == self) | |
175 | end | |
176 | if not self.body_write_type then | |
177 | -- Can send an automatic error response | |
178 | local error_headers | |
179 | if self.connection:error("r") == ce.EILSEQ then | |
180 | error_headers = bad_request_headers | |
181 | else | |
182 | error_headers = server_error_headers | |
183 | end | |
184 | self:write_headers(error_headers, true, 0) | |
185 | end | |
186 | end | |
187 | -- read any remaining available response and get out of the way | |
188 | local start = self.stats_recv | |
189 | while (self.state == "open" or self.state == "half closed (local)") and (self.stats_recv - start) < clean_shutdown_limit do | |
190 | if not self:step(0) then | |
191 | break | |
192 | end | |
193 | end | |
194 | ||
195 | if self.state ~= "closed" then | |
196 | -- This is a bad situation: we are trying to shutdown a connection that has the body partially sent | |
197 | -- Especially in the case of Connection: close, where closing indicates EOF, | |
198 | -- this will result in a client only getting a partial response. | |
199 | -- Could also end up here if a client sending headers fails. | |
200 | if self.connection.socket then | |
201 | self.connection.socket:shutdown() | |
202 | end | |
203 | self:set_state("closed") | |
204 | end | |
205 | end | |
206 | return true | |
207 | end | |
208 | ||
209 | function stream_methods:step(timeout) | |
210 | if self.state == "open" or self.state == "half closed (local)" or (self.state == "idle" and self.type == "server") then | |
211 | if self.connection.socket == nil then | |
212 | return nil, ce.strerror(ce.EPIPE), ce.EPIPE | |
213 | end | |
214 | if not self.has_main_headers then | |
215 | local headers, err, errno = self:read_headers(timeout) | |
216 | if headers == nil then | |
217 | return nil, err, errno | |
218 | end | |
219 | self.headers_fifo:push(headers) | |
220 | self.headers_cond:signal(1) | |
221 | return true | |
222 | end | |
223 | if self.body_read_left ~= 0 then | |
224 | local chunk, err, errno = self:read_next_chunk(timeout) | |
225 | if chunk == nil then | |
226 | if err == nil then | |
227 | return true | |
228 | end | |
229 | return nil, err, errno | |
230 | end | |
231 | self.chunk_fifo:push(chunk) | |
232 | self.chunk_cond:signal() | |
233 | return true | |
234 | end | |
235 | if self.body_read_type == "chunked" then | |
236 | local trailers, err, errno = self:read_headers(timeout) | |
237 | if trailers == nil then | |
238 | return nil, err, errno | |
239 | end | |
240 | self.headers_fifo:push(trailers) | |
241 | self.headers_cond:signal(1) | |
242 | return true | |
243 | end | |
244 | end | |
245 | if self.state == "half closed (remote)" then | |
246 | return nil, ce.strerror(ce.EIO), ce.EIO | |
204 | 247 | end |
205 | 248 | return true |
206 | 249 | end |
213 | 256 | if self.state == "closed" or self.state == "half closed (remote)" then |
214 | 257 | return nil |
215 | 258 | end |
216 | local headers = new_headers() | |
217 | 259 | local status_code |
218 | 260 | local is_trailers = self.body_read_type == "chunked" |
219 | if is_trailers then -- luacheck: ignore 542 | |
220 | elseif self.type == "server" then | |
221 | if self.state == "half closed (local)" then | |
222 | return nil | |
223 | end | |
224 | local method, path, httpversion = | |
225 | self.connection:read_request_line(deadline and (deadline-monotime())) | |
226 | if method == nil then | |
227 | return nil, path, httpversion | |
228 | end | |
229 | self.req_method = method | |
230 | self.peer_version = httpversion | |
231 | headers:append(":method", method) | |
232 | if method == "CONNECT" then | |
233 | headers:append(":authority", path) | |
234 | else | |
235 | headers:append(":path", path) | |
236 | end | |
237 | headers:append(":scheme", self:checktls() and "https" or "http") | |
238 | self:set_state("open") | |
239 | else -- client | |
240 | -- Make sure we're at front of connection pipeline | |
241 | if self.connection.pipeline:peek() ~= self then | |
242 | if not self.pipeline_cond:wait(deadline and (deadline-monotime)) then | |
243 | return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT | |
244 | end | |
245 | assert(self.connection.pipeline:peek() == self) | |
246 | end | |
247 | local httpversion, reason_phrase | |
248 | httpversion, status_code, reason_phrase = | |
249 | self.connection:read_status_line(deadline and (deadline-monotime())) | |
250 | if httpversion == nil then | |
251 | if status_code == nil then | |
252 | return nil, ce.strerror(ce.EPIPE), ce.EPIPE | |
253 | end | |
254 | return nil, status_code, reason_phrase | |
255 | end | |
256 | self.peer_version = httpversion | |
257 | headers:append(":status", status_code) | |
258 | -- reason phase intentionally does not exist in HTTP2; discard for consistency | |
259 | end | |
261 | local headers = self.headers_in_progress | |
262 | if not headers then | |
263 | if is_trailers then | |
264 | headers = new_headers() | |
265 | elseif self.type == "server" then | |
266 | if self.state == "half closed (local)" then | |
267 | return nil | |
268 | end | |
269 | local method, target, httpversion = self.connection:read_request_line(0) | |
270 | if method == nil then | |
271 | if httpversion == ce.ETIMEDOUT then | |
272 | timeout = deadline and deadline-monotime() | |
273 | if cqueues.poll(self.connection.socket, timeout) ~= timeout then | |
274 | return self:read_headers(deadline and deadline-monotime()) | |
275 | end | |
276 | end | |
277 | return nil, target, httpversion | |
278 | end | |
279 | self.req_method = method | |
280 | self.peer_version = httpversion | |
281 | headers = new_headers() | |
282 | headers:append(":method", method) | |
283 | if method == "CONNECT" then | |
284 | headers:append(":authority", target) | |
285 | else | |
286 | headers:append(":path", target) | |
287 | end | |
288 | headers:append(":scheme", self:checktls() and "https" or "http") | |
289 | self:set_state("open") | |
290 | else -- client | |
291 | -- Make sure we're at front of connection pipeline | |
292 | if self.connection.pipeline:peek() ~= self then | |
293 | assert(cqueues.running(), "cannot wait for condition if not within a cqueues coroutine") | |
294 | if cqueues.poll(self.pipeline_cond, timeout) == timeout then | |
295 | return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT | |
296 | end | |
297 | assert(self.connection.pipeline:peek() == self) | |
298 | end | |
299 | local httpversion, reason_phrase | |
300 | httpversion, status_code, reason_phrase = self.connection:read_status_line(0) | |
301 | if httpversion == nil then | |
302 | if reason_phrase == ce.ETIMEDOUT then | |
303 | timeout = deadline and deadline-monotime() | |
304 | if cqueues.poll(self.connection.socket, timeout) ~= timeout then | |
305 | return self:read_headers(deadline and deadline-monotime()) | |
306 | end | |
307 | elseif status_code == nil then | |
308 | return nil, ce.strerror(ce.EPIPE), ce.EPIPE | |
309 | end | |
310 | return nil, status_code, reason_phrase | |
311 | end | |
312 | self.peer_version = httpversion | |
313 | headers = new_headers() | |
314 | headers:append(":status", status_code) | |
315 | -- reason phase intentionally does not exist in HTTP2; discard for consistency | |
316 | end | |
317 | self.headers_in_progress = headers | |
318 | else | |
319 | if not is_trailers and self.type == "client" then | |
320 | status_code = headers:get(":status") | |
321 | end | |
322 | end | |
323 | ||
260 | 324 | -- Use while loop for lua 5.1 compatibility |
261 | 325 | while true do |
262 | local k, v, errno = self.connection:read_header(deadline and (deadline-monotime())) | |
326 | if headers:len() >= self.max_header_lines then | |
327 | return nil, ce.strerror(ce.E2BIG), ce.E2BIG | |
328 | end | |
329 | local k, v, errno = self.connection:read_header(0) | |
263 | 330 | if k == nil then |
264 | 331 | if v ~= nil then |
332 | if errno == ce.ETIMEDOUT then | |
333 | timeout = deadline and deadline-monotime() | |
334 | if cqueues.poll(self.connection.socket, timeout) ~= timeout then | |
335 | return self:read_headers(deadline and deadline-monotime()) | |
336 | end | |
337 | end | |
265 | 338 | return nil, v, errno |
266 | 339 | end |
267 | 340 | break -- Success: End of headers. |
274 | 347 | end |
275 | 348 | |
276 | 349 | do |
277 | local ok, err, errno = self.connection:read_headers_done(deadline and (deadline-monotime())) | |
350 | local ok, err, errno = self.connection:read_headers_done(0) | |
278 | 351 | if ok == nil then |
279 | if err == nil then | |
352 | if errno == ce.ETIMEDOUT then | |
353 | timeout = deadline and deadline-monotime() | |
354 | if cqueues.poll(self.connection.socket, timeout) ~= timeout then | |
355 | return self:read_headers(deadline and deadline-monotime()) | |
356 | end | |
357 | elseif err == nil then | |
280 | 358 | return nil, ce.strerror(ce.EPIPE), ce.EPIPE |
281 | 359 | end |
282 | 360 | return nil, err, errno |
283 | 361 | end |
362 | self.headers_in_progress = nil | |
363 | self.has_main_headers = status_code == nil or status_code:sub(1,1) ~= "1" or status_code == "101" | |
284 | 364 | end |
285 | 365 | |
286 | 366 | do -- if client is sends `Connection: close`, server knows it can close at end of response |
382 | 462 | function stream_methods:get_headers(timeout) |
383 | 463 | if self.headers_fifo:length() > 0 then |
384 | 464 | return self.headers_fifo:pop() |
385 | end | |
386 | if self.body_read_type == "chunked" then | |
387 | -- wait for signal from trailers | |
388 | -- XXX: what if nothing is reading body? | |
389 | local deadline = timeout and monotime() + timeout | |
390 | repeat | |
391 | if self.state == "closed" or self.state == "half closed (remote)" then | |
392 | return nil | |
393 | end | |
394 | if not self.headers_cond:wait(timeout) then | |
395 | return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT | |
396 | end | |
397 | timeout = deadline and deadline-monotime() | |
398 | until self.headers_fifo:length() > 0 | |
399 | return self.headers_fifo:pop() | |
400 | end | |
401 | -- TODO: locking? | |
402 | return self:read_headers(timeout) | |
465 | else | |
466 | if self.state == "closed" or self.state == "half closed (remote)" then | |
467 | return nil | |
468 | end | |
469 | local deadline = timeout and monotime()+timeout | |
470 | local ok, err, errno = self:step(timeout) | |
471 | if not ok then | |
472 | return nil, err, errno | |
473 | end | |
474 | return self:get_headers(deadline and deadline-monotime()) | |
475 | end | |
403 | 476 | end |
404 | 477 | |
405 | 478 | local ignore_fields = { |
408 | 481 | [":path"] = true; |
409 | 482 | [":scheme"] = true; |
410 | 483 | [":status"] = true; |
484 | [":protocol"] = true; -- from RFC 8441 | |
411 | 485 | -- fields written manually in :write_headers |
412 | 486 | ["connection"] = true; |
413 | 487 | ["content-length"] = true; |
454 | 528 | if self.body_write_type == "chunked" then |
455 | 529 | -- we are writing trailers; close off body |
456 | 530 | is_trailers = true |
457 | local ok, err, errno = self.connection:write_body_last_chunk(nil, deadline and deadline-monotime()) | |
531 | local ok, err, errno = self.connection:write_body_last_chunk(nil, 0) | |
458 | 532 | if not ok then |
459 | 533 | return nil, err, errno |
460 | 534 | end |
470 | 544 | end |
471 | 545 | -- Make sure we're at the front of the pipeline |
472 | 546 | if self.connection.pipeline:peek() ~= self then |
473 | if not self.pipeline_cond:wait(deadline and (deadline-monotime)) then | |
547 | assert(cqueues.running(), "cannot wait for condition if not within a cqueues coroutine") | |
548 | headers = headers:clone() -- don't want user to edit it and send wrong headers | |
549 | if cqueues.poll(self.pipeline_cond, timeout) == timeout then | |
474 | 550 | return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT |
475 | 551 | end |
476 | 552 | assert(self.connection.pipeline:peek() == self) |
479 | 555 | -- Should send status line |
480 | 556 | local reason_phrase = reason_phrases[status_code] |
481 | 557 | local version = math.min(self.connection.version, self.peer_version) |
482 | local ok, err, errno = self.connection:write_status_line(version, status_code, reason_phrase, deadline and deadline-monotime()) | |
558 | local ok, err, errno = self.connection:write_status_line(version, status_code, reason_phrase, 0) | |
483 | 559 | if not ok then |
484 | 560 | return nil, err, errno |
485 | 561 | end |
488 | 564 | if self.state == "idle" then |
489 | 565 | method = assert(headers:get(":method"), "missing method") |
490 | 566 | self.req_method = method |
491 | local path | |
567 | local target | |
492 | 568 | if method == "CONNECT" then |
493 | path = assert(headers:get(":authority"), "missing authority") | |
569 | target = assert(headers:get(":authority"), "missing authority") | |
494 | 570 | assert(not headers:has(":path"), "CONNECT requests should not have a path") |
495 | 571 | else |
496 | 572 | -- RFC 7230 Section 5.4: A client MUST send a Host header field in all HTTP/1.1 request messages. |
497 | 573 | assert(self.connection.version < 1.1 or headers:has(":authority"), "missing authority") |
498 | path = assert(headers:get(":path"), "missing path") | |
499 | end | |
500 | if self.req_locked then | |
501 | -- Wait until previous responses have been fully written | |
502 | if not self.connection.req_cond:wait(deadline and (deadline-monotime())) then | |
574 | target = assert(headers:get(":path"), "missing path") | |
575 | end | |
576 | if self.connection.req_locked then | |
577 | -- Wait until previous request has been fully written | |
578 | assert(cqueues.running(), "cannot wait for condition if not within a cqueues coroutine") | |
579 | headers = headers:clone() -- don't want user to edit it and send wrong headers | |
580 | if cqueues.poll(self.connection.req_cond, timeout) == timeout then | |
503 | 581 | return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT |
504 | 582 | end |
505 | assert(self.req_locked == nil) | |
583 | assert(self.connection.req_locked == nil) | |
506 | 584 | end |
507 | 585 | self.connection.pipeline:push(self) |
508 | 586 | self.connection.req_locked = self |
509 | 587 | -- write request line |
510 | local ok, err, errno = self.connection:write_request_line(method, path, self.connection.version, deadline and (deadline-monotime())) | |
588 | local ok, err, errno = self.connection:write_request_line(method, target, self.connection.version, 0) | |
511 | 589 | if not ok then |
512 | 590 | return nil, err, errno |
513 | 591 | end |
531 | 609 | end |
532 | 610 | elseif self.type == "server" and status_code and status_code:sub(1, 1) == "1" then |
533 | 611 | assert(not end_stream, "cannot end stream directly after 1xx status code") |
534 | -- A server MUST NOT send a Content-Length header field in any response with a status code of 1xx (Informational) or 204 (No Content) | |
612 | -- A server MUST NOT send a Content-Length header field in any response | |
613 | -- with a status code of 1xx (Informational) or 204 (No Content) | |
535 | 614 | if cl then |
536 | 615 | error("Content-Length not allowed in response with 1xx status code") |
537 | 616 | end |
548 | 627 | end |
549 | 628 | if cl then |
550 | 629 | -- RFC 7230 Section 3.3.2: |
551 | -- A sender MUST NOT send a Content-Length header field in any message that contains a Transfer-Encoding header field. | |
630 | -- A sender MUST NOT send a Content-Length header field in any | |
631 | -- message that contains a Transfer-Encoding header field. | |
552 | 632 | if transfer_encoding_header then |
553 | 633 | error("Content-Length not allowed in message with a transfer-encoding") |
554 | 634 | elseif self.type == "server" then |
555 | -- A server MUST NOT send a Content-Length header field in any response with a status code of 1xx (Informational) or 204 (No Content) | |
635 | -- A server MUST NOT send a Content-Length header field in any response | |
636 | -- with a status code of 1xx (Informational) or 204 (No Content) | |
556 | 637 | if status_code == "204" then |
557 | 638 | error("Content-Length not allowed in response with 204 status code") |
558 | 639 | end |
655 | 736 | |
656 | 737 | for name, value in headers:each() do |
657 | 738 | if not ignore_fields[name] then |
658 | local ok, err, errno = self.connection:write_header(name, value, deadline and (deadline-monotime())) | |
739 | local ok, err, errno = self.connection:write_header(name, value, 0) | |
659 | 740 | if not ok then |
660 | 741 | return nil, err, errno |
661 | 742 | end |
663 | 744 | -- for CONNECT requests, :authority is the path |
664 | 745 | if self.req_method ~= "CONNECT" then |
665 | 746 | -- otherwise it's the Host header |
666 | local ok, err, errno = self.connection:write_header("host", value, deadline and (deadline-monotime())) | |
747 | local ok, err, errno = self.connection:write_header("host", value, 0) | |
667 | 748 | if not ok then |
668 | 749 | return nil, err, errno |
669 | 750 | end |
676 | 757 | if not has(connection_header, "te") then |
677 | 758 | table.insert(connection_header, "te") |
678 | 759 | end |
679 | local ok, err, errno = self.connection:write_header("te", "gzip, deflate", deadline and deadline-monotime()) | |
760 | local ok, err, errno = self.connection:write_header("te", "gzip, deflate", 0) | |
680 | 761 | if not ok then |
681 | 762 | return nil, err, errno |
682 | 763 | end |
698 | 779 | value[i] = table.concat(params, ";") |
699 | 780 | end |
700 | 781 | value = table.concat(value, ",") |
701 | local ok, err, errno = self.connection:write_header("transfer-encoding", value, deadline and (deadline-monotime())) | |
782 | local ok, err, errno = self.connection:write_header("transfer-encoding", value, 0) | |
702 | 783 | if not ok then |
703 | 784 | return nil, err, errno |
704 | 785 | end |
705 | 786 | elseif cl then |
706 | local ok, err, errno = self.connection:write_header("content-length", cl, deadline and (deadline-monotime())) | |
787 | local ok, err, errno = self.connection:write_header("content-length", cl, 0) | |
707 | 788 | if not ok then |
708 | 789 | return nil, err, errno |
709 | 790 | end |
710 | 791 | end |
711 | 792 | if connection_header and connection_header[1] then |
712 | 793 | local value = table.concat(connection_header, ",") |
713 | local ok, err, errno = self.connection:write_header("connection", value, deadline and (deadline-monotime())) | |
794 | local ok, err, errno = self.connection:write_header("connection", value, 0) | |
714 | 795 | if not ok then |
715 | 796 | return nil, err, errno |
716 | 797 | end |
741 | 822 | return true |
742 | 823 | end |
743 | 824 | |
744 | function stream_methods:get_next_chunk(timeout) | |
745 | local chunk = self.body_buffer | |
746 | if chunk then | |
747 | self.body_buffer = nil | |
748 | return chunk | |
749 | end | |
825 | function stream_methods:read_next_chunk(timeout) | |
750 | 826 | if self.state == "closed" or self.state == "half closed (remote)" then |
751 | 827 | return nil |
752 | 828 | end |
753 | 829 | local end_stream |
754 | local err, errno | |
830 | local chunk, err, errno | |
755 | 831 | if self.body_read_type == "chunked" then |
756 | 832 | local deadline = timeout and (monotime()+timeout) |
757 | chunk, err, errno = self.connection:read_body_chunk(timeout) | |
833 | if self.body_read_left == 0 then | |
834 | chunk = false | |
835 | else | |
836 | chunk, err, errno = self.connection:read_body_chunk(timeout) | |
837 | end | |
758 | 838 | if chunk == false then |
759 | -- read trailers | |
760 | local trailers | |
761 | trailers, err, errno = self:read_headers(deadline and (deadline-monotime())) | |
762 | if not trailers then | |
763 | return nil, err, errno | |
764 | end | |
765 | self.headers_fifo:push(trailers) | |
766 | self.headers_cond:signal(1) | |
767 | -- :read_headers has already closed connection; return immediately | |
839 | -- last chunk, :read_headers should be called to get trailers | |
840 | self.body_read_left = 0 | |
841 | -- for API compat: attempt to read trailers | |
842 | local ok | |
843 | ok, err, errno = self:step(deadline and deadline-monotime()) | |
844 | if not ok then | |
845 | return nil, err, errno | |
846 | end | |
768 | 847 | return nil |
769 | 848 | else |
770 | 849 | end_stream = false |
822 | 901 | return chunk, err, errno |
823 | 902 | end |
824 | 903 | |
904 | function stream_methods:get_next_chunk(timeout) | |
905 | if self.chunk_fifo:length() > 0 then | |
906 | return self.chunk_fifo:pop() | |
907 | end | |
908 | return self:read_next_chunk(timeout) | |
909 | end | |
910 | ||
825 | 911 | function stream_methods:unget(str) |
826 | local chunk = self.body_buffer | |
827 | if chunk then | |
828 | self.body_buffer = str .. chunk | |
829 | else | |
830 | self.body_buffer = str | |
831 | end | |
912 | self.chunk_fifo:insert(1, str) | |
913 | self.chunk_cond:signal() | |
832 | 914 | return true |
833 | 915 | end |
834 | 916 |
8 | 8 | local onerror = connection_common.onerror |
9 | 9 | local h2_error = require "http.h2_error" |
10 | 10 | local h2_stream = require "http.h2_stream" |
11 | local known_settings = h2_stream.known_settings | |
11 | 12 | local hpack = require "http.hpack" |
12 | 13 | local h2_banned_ciphers = require "http.tls".banned_ciphers |
13 | local spack = string.pack or require "compat53.string".pack | |
14 | local sunpack = string.unpack or require "compat53.string".unpack | |
14 | local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 | |
15 | local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143 | |
15 | 16 | |
16 | 17 | local assert = assert |
17 | 18 | if _VERSION:match("%d+%.?%d*") < "5.3" then |
25 | 26 | local preface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" |
26 | 27 | |
27 | 28 | local default_settings = { |
28 | [0x1] = 4096; -- HEADER_TABLE_SIZE | |
29 | [0x2] = true; -- ENABLE_PUSH | |
30 | [0x3] = math.huge; -- MAX_CONCURRENT_STREAMS | |
31 | [0x4] = 65535; -- INITIAL_WINDOW_SIZE | |
32 | [0x5] = 16384; -- MAX_FRAME_SIZE | |
33 | [0x6] = math.huge; -- MAX_HEADER_LIST_SIZE | |
29 | [known_settings.HEADER_TABLE_SIZE] = 4096; | |
30 | [known_settings.ENABLE_PUSH] = true; | |
31 | [known_settings.MAX_CONCURRENT_STREAMS] = math.huge; | |
32 | [known_settings.INITIAL_WINDOW_SIZE] = 65535; | |
33 | [known_settings.MAX_FRAME_SIZE] = 16384; | |
34 | [known_settings.MAX_HEADER_LIST_SIZE] = math.huge; | |
35 | [known_settings.SETTINGS_ENABLE_CONNECT_PROTOCOL] = false; | |
36 | [known_settings.TLS_RENEG_PERMITTED] = 0; | |
34 | 37 | } |
35 | 38 | |
36 | local function merge_settings(new, old) | |
37 | return { | |
38 | [0x1] = new[0x1] or old[0x1]; | |
39 | [0x2] = new[0x2] or old[0x2]; | |
40 | [0x3] = new[0x3] or old[0x3]; | |
41 | [0x4] = new[0x4] or old[0x4]; | |
42 | [0x5] = new[0x5] or old[0x5]; | |
43 | [0x6] = new[0x6] or old[0x6]; | |
44 | } | |
39 | local function merge_settings(tbl, new) | |
40 | for i=0x1, 0x6 do | |
41 | local v = new[i] | |
42 | if v ~= nil then | |
43 | tbl[i] = v | |
44 | end | |
45 | end | |
45 | 46 | end |
46 | 47 | |
47 | 48 | local connection_methods = {} |
99 | 100 | error('invalid connection type. must be "client" or "server"') |
100 | 101 | end |
101 | 102 | |
102 | socket:setvbuf("full", math.huge) -- 'infinite' buffering; no write locks needed | |
103 | socket:setmode("b", "bf") -- full buffering for now; will be set to no buffering after settings sent | |
104 | socket:onerror(onerror) | |
105 | ||
106 | 103 | local ssl = socket:checktls() |
107 | 104 | if ssl then |
108 | 105 | local cipher = ssl:getCipherInfo() |
127 | 124 | |
128 | 125 | -- For continuations |
129 | 126 | need_continuation = nil; -- stream |
127 | promised_stream = nil; -- stream | |
128 | recv_headers_end_stream = nil; | |
130 | 129 | recv_headers_buffer = nil; |
131 | 130 | recv_headers_buffer_pos = nil; |
132 | 131 | recv_headers_buffer_pad_len = nil; |
134 | 133 | recv_headers_buffer_length = nil; |
135 | 134 | |
136 | 135 | highest_odd_stream = -1; |
136 | highest_odd_non_idle_stream = -1; | |
137 | 137 | highest_even_stream = -2; |
138 | highest_even_non_idle_stream = -2; | |
138 | 139 | send_goaway_lowest = nil; |
139 | 140 | recv_goaway_lowest = nil; |
140 | 141 | recv_goaway = cc.new(); |
141 | 142 | new_streams = new_fifo(); |
142 | 143 | new_streams_cond = cc.new(); |
143 | peer_settings = default_settings; | |
144 | peer_settings = {}; | |
144 | 145 | peer_settings_cond = cc.new(); -- signaled when the peer has changed their settings |
145 | acked_settings = default_settings; | |
146 | acked_settings = {}; | |
146 | 147 | send_settings = {n = 0}; |
147 | 148 | send_settings_ack_cond = cc.new(); -- for when server ACKs our settings |
148 | 149 | send_settings_acked = 0; |
149 | 150 | peer_flow_credits = 65535; -- 5.2.1 |
150 | peer_flow_credits_increase = cc.new(); | |
151 | peer_flow_credits_change = cc.new(); | |
151 | 152 | encoding_context = nil; |
152 | 153 | decoding_context = nil; |
153 | 154 | pongs = {}; -- pending pings we've sent. keyed by opaque 8 byte payload |
154 | 155 | }, connection_mt) |
155 | 156 | self:new_stream(0) |
156 | self.encoding_context = hpack.new(default_settings[0x1]) | |
157 | self.decoding_context = hpack.new(default_settings[0x1]) | |
158 | ||
157 | merge_settings(self.peer_settings, default_settings) | |
158 | merge_settings(self.acked_settings, default_settings) | |
159 | self.encoding_context = hpack.new(default_settings[known_settings.HEADER_TABLE_SIZE]) | |
160 | self.decoding_context = hpack.new(default_settings[known_settings.HEADER_TABLE_SIZE]) | |
161 | ||
162 | socket:setvbuf("full", math.huge) -- 'infinite' buffering; no write locks needed | |
163 | socket:setmode("b", "bna") -- writes that don't explicitly buffer will now flush the buffer. autoflush on | |
164 | socket:onerror(onerror) | |
159 | 165 | if self.type == "client" then |
160 | -- fully buffered write; will be flushed when sending settings | |
161 | 166 | assert(socket:xwrite(preface, "f", 0)) |
162 | 167 | end |
163 | assert(self.stream0:write_settings_frame(false, settings or {}, 0)) | |
164 | socket:setmode("b", "bna") -- writes that don't explicitly buffer will now flush the buffer. autoflush on | |
168 | assert(self.stream0:write_settings_frame(false, settings or {}, 0, "f")) | |
165 | 169 | -- note that the buffer is *not* flushed right now |
166 | 170 | |
167 | 171 | return self |
168 | end | |
169 | ||
170 | function connection_methods:pollfd() | |
171 | return self.socket:pollfd() | |
172 | end | |
173 | ||
174 | function connection_methods:events() | |
175 | return self.socket:events() | |
176 | 172 | end |
177 | 173 | |
178 | 174 | function connection_methods:timeout() |
179 | 175 | if not self.had_eagain then |
180 | 176 | return 0 |
181 | 177 | end |
182 | return self.socket:timeout() | |
178 | return connection_common.methods.timeout(self) | |
183 | 179 | end |
184 | 180 | |
185 | 181 | local function handle_frame(self, typ, flag, streamid, payload, deadline) |
191 | 187 | -- Implementations MUST ignore and discard any frame that has a type that is unknown. |
192 | 188 | if handler then |
193 | 189 | local stream = self.streams[streamid] |
194 | if stream == nil and (not self.recv_goaway_lowest or streamid < self.recv_goaway_lowest) then | |
190 | if stream == nil then | |
195 | 191 | if xor(streamid % 2 == 1, self.type == "client") then |
196 | 192 | return nil, h2_error.errors.PROTOCOL_ERROR:new_traceback("Streams initiated by a client MUST use odd-numbered stream identifiers; those initiated by the server MUST use even-numbered stream identifiers"), ce.EILSEQ |
197 | 193 | end |
198 | 194 | -- TODO: check MAX_CONCURRENT_STREAMS |
199 | 195 | stream = self:new_stream(streamid) |
200 | self.new_streams:push(stream) | |
201 | self.new_streams_cond:signal(1) | |
196 | --[[ http2 spec section 6.8 | |
197 | the sender will ignore frames sent on streams initiated by | |
198 | the receiver if the stream has an identifier higher than the included | |
199 | last stream identifier | |
200 | ... | |
201 | After sending a GOAWAY frame, the sender can discard frames for | |
202 | streams initiated by the receiver with identifiers higher than the | |
203 | identified last stream. However, any frames that alter connection | |
204 | state cannot be completely ignored. For instance, HEADERS, | |
205 | PUSH_PROMISE, and CONTINUATION frames MUST be minimally processed to | |
206 | ensure the state maintained for header compression is consistent (see | |
207 | Section 4.3); similarly, DATA frames MUST be counted toward the | |
208 | connection flow-control window. Failure to process these frames can | |
209 | cause flow control or header compression state to become | |
210 | unsynchronized.]] | |
211 | -- If we haven't seen this stream before, and we should be discarding frames from it, | |
212 | -- then don't push it into the new_streams fifo | |
213 | if self.send_goaway_lowest == nil or streamid <= self.send_goaway_lowest then | |
214 | self.new_streams:push(stream) | |
215 | self.new_streams_cond:signal(1) | |
216 | end | |
202 | 217 | end |
203 | 218 | local ok, err, errno = handler(stream, flag, payload, deadline) |
204 | 219 | if not ok then |
205 | 220 | if h2_error.is(err) and err.stream_error and streamid ~= 0 and stream.state ~= "idle" then |
206 | local ok2, err2, errno2 = stream:write_rst_stream(err.code, deadline and deadline-monotime()) | |
221 | local ok2, err2, errno2 = stream:rst_stream(err, deadline and deadline-monotime()) | |
207 | 222 | if not ok2 then |
208 | 223 | return nil, err2, errno2 |
209 | 224 | end |
219 | 234 | local deadline = timeout and monotime()+timeout |
220 | 235 | if not self.has_confirmed_preface and self.type == "server" then |
221 | 236 | local ok, err, errno = socket_has_preface(self.socket, false, timeout) |
237 | self.had_eagain = false | |
222 | 238 | if ok == nil then |
223 | 239 | if errno == ce.ETIMEDOUT then |
240 | self.had_eagain = true | |
224 | 241 | return true |
225 | 242 | end |
226 | 243 | return nil, err, errno |
299 | 316 | end |
300 | 317 | |
301 | 318 | function connection_methods:new_stream(id) |
319 | if id and self.streams[id] ~= nil then | |
320 | error("stream id already in use") | |
321 | end | |
322 | local stream = h2_stream.new(self) | |
302 | 323 | if id then |
303 | assert(id % 1 == 0) | |
304 | else | |
305 | if self.recv_goaway_lowest then | |
306 | h2_error.errors.PROTOCOL_ERROR("Receivers of a GOAWAY frame MUST NOT open additional streams on the connection") | |
307 | end | |
308 | if self.type == "client" then | |
309 | -- Pick next free odd number | |
310 | id = self.highest_odd_stream + 2 | |
311 | else | |
312 | -- Pick next free odd number | |
313 | id = self.highest_even_stream + 2 | |
314 | end | |
315 | -- TODO: check MAX_CONCURRENT_STREAMS | |
316 | end | |
317 | assert(self.streams[id] == nil, "stream id already in use") | |
318 | assert(id < 2^32, "stream id too large") | |
319 | if id % 2 == 0 then | |
320 | if id > self.highest_even_stream then | |
321 | self.highest_even_stream = id | |
322 | end | |
323 | else | |
324 | if id > self.highest_odd_stream then | |
325 | self.highest_odd_stream = id | |
326 | end | |
327 | end | |
328 | local stream = h2_stream.new(self, id) | |
329 | if id == 0 then | |
330 | self.stream0 = stream | |
331 | else | |
332 | -- Add dependency on stream 0. http2 spec, 5.3.1 | |
333 | self.stream0:reprioritise(stream) | |
334 | end | |
335 | self.streams[id] = stream | |
324 | stream:pick_id(id) | |
325 | end | |
336 | 326 | return stream |
337 | 327 | end |
338 | 328 | |
384 | 374 | end |
385 | 375 | end |
386 | 376 | local size, typ, flags, streamid = sunpack(">I3 B B I4", frame_header) |
387 | if size > self.acked_settings[0x5] then | |
377 | if size > self.acked_settings[known_settings.MAX_FRAME_SIZE] then | |
378 | local ok, errno2 = self.socket:unget(frame_header) | |
379 | if not ok then | |
380 | return nil, onerror(self.socket, "unget", errno2, 2) | |
381 | end | |
388 | 382 | return nil, h2_error.errors.FRAME_SIZE_ERROR:new_traceback("frame too large"), ce.E2BIG |
389 | 383 | end |
390 | local payload, err2, errno2 = self.socket:xread(size, deadline and (deadline-monotime())) | |
384 | local payload, err2, errno2 = self.socket:xread(size, 0) | |
391 | 385 | self.had_eagain = false |
392 | 386 | if payload and #payload < size then -- hit EOF |
393 | 387 | local ok, errno4 = self.socket:unget(payload) |
397 | 391 | payload = nil |
398 | 392 | end |
399 | 393 | if payload == nil then |
394 | -- put frame header back into socket so a retry will work | |
395 | local ok, errno3 = self.socket:unget(frame_header) | |
396 | if not ok then | |
397 | return nil, onerror(self.socket, "unget", errno3, 2) | |
398 | end | |
400 | 399 | if errno2 == ce.ETIMEDOUT then |
401 | 400 | self.had_eagain = true |
402 | end | |
403 | -- put frame header back into socket so a retry will work | |
404 | local ok, errno3 = self.socket:unget(frame_header) | |
405 | if not ok then | |
406 | return nil, onerror(self.socket, "unget", errno3, 2) | |
407 | end | |
408 | if err2 == nil then | |
401 | timeout = deadline and deadline-monotime() | |
402 | if cqueues.poll(self.socket, timeout) ~= timeout then | |
403 | return self:read_http2_frame(deadline and deadline-monotime()) | |
404 | end | |
405 | elseif err2 == nil then | |
409 | 406 | self.socket:seterror("r", ce.EILSEQ) |
410 | 407 | return nil, onerror(self.socket, "read_http2_frame", ce.EILSEQ) |
411 | 408 | end |
419 | 416 | -- If this times out, it was the flushing; not the write itself |
420 | 417 | -- hence it's not always total failure. |
421 | 418 | -- It's up to the caller to take some action (e.g. closing) rather than doing it here |
422 | function connection_methods:write_http2_frame(typ, flags, streamid, payload, timeout) | |
423 | local deadline = timeout and monotime()+timeout | |
424 | if #payload > self.peer_settings[0x5] then | |
419 | function connection_methods:write_http2_frame(typ, flags, streamid, payload, timeout, flush) | |
420 | if #payload > self.peer_settings[known_settings.MAX_FRAME_SIZE] then | |
425 | 421 | return nil, h2_error.errors.FRAME_SIZE_ERROR:new_traceback("frame too large"), ce.E2BIG |
426 | 422 | end |
427 | 423 | local header = spack(">I3 B B I4", #payload, typ, flags, streamid) |
428 | local ok, err, errno = self.socket:xwrite(header, "f", timeout) | |
424 | local ok, err, errno = self.socket:xwrite(header, "f", 0) | |
429 | 425 | if not ok then |
430 | 426 | return nil, err, errno |
431 | 427 | end |
432 | return self.socket:xwrite(payload, deadline and deadline-monotime()) | |
428 | return self.socket:xwrite(payload, flush, timeout) | |
433 | 429 | end |
434 | 430 | |
435 | 431 | function connection_methods:ping(timeout) |
469 | 465 | end |
470 | 466 | |
471 | 467 | function connection_methods:set_peer_settings(peer_settings) |
472 | self.peer_settings = merge_settings(peer_settings, self.peer_settings) | |
468 | --[[ 6.9.2: | |
469 | In addition to changing the flow-control window for streams that are | |
470 | not yet active, a SETTINGS frame can alter the initial flow-control | |
471 | window size for streams with active flow-control windows (that is, | |
472 | streams in the "open" or "half-closed (remote)" state). When the | |
473 | value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust | |
474 | the size of all stream flow-control windows that it maintains by the | |
475 | difference between the new value and the old value. | |
476 | ||
477 | A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available | |
478 | space in a flow-control window to become negative. A sender MUST | |
479 | track the negative flow-control window and MUST NOT send new flow- | |
480 | controlled frames until it receives WINDOW_UPDATE frames that cause | |
481 | the flow-control window to become positive.]] | |
482 | local new_window_size = peer_settings[known_settings.INITIAL_WINDOW_SIZE] | |
483 | if new_window_size then | |
484 | local old_windows_size = self.peer_settings[known_settings.INITIAL_WINDOW_SIZE] | |
485 | local delta = new_window_size - old_windows_size | |
486 | if delta ~= 0 then | |
487 | for _, stream in pairs(self.streams) do | |
488 | stream.peer_flow_credits = stream.peer_flow_credits + delta | |
489 | stream.peer_flow_credits_change:signal() | |
490 | end | |
491 | end | |
492 | end | |
493 | ||
494 | merge_settings(self.peer_settings, peer_settings) | |
473 | 495 | self.peer_settings_cond:signal() |
474 | 496 | end |
475 | 497 | |
479 | 501 | local acked_settings = self.send_settings[n] |
480 | 502 | if acked_settings then |
481 | 503 | self.send_settings[n] = nil |
482 | self.acked_settings = merge_settings(acked_settings, self.acked_settings) | |
504 | merge_settings(self.acked_settings, acked_settings) | |
483 | 505 | end |
484 | 506 | self.send_settings_ack_cond:signal() |
485 | 507 | end |
0 | interface h2_error | |
1 | const new: (self, { | |
2 | "name": string?, | |
3 | "code": integer?, | |
4 | "description": string?, | |
5 | "message": string?, | |
6 | "traceback": string?, | |
7 | "stream_error": boolean? | |
8 | }) -> (h2_error) | |
9 | const new_traceback: (self, string, boolean, integer?) -> (h2_error) | |
10 | const error: (self, string, boolean, integer?) -> (void) | |
11 | end | |
12 | ||
13 | errors: {any:h2_error} | |
14 | is: (any) -> (boolean) |
4 | 4 | local new_fifo = require "fifo" |
5 | 5 | local band = require "http.bit".band |
6 | 6 | local bor = require "http.bit".bor |
7 | local h2_errors = require "http.h2_error".errors | |
7 | local h2_error = require "http.h2_error" | |
8 | local h2_errors = h2_error.errors | |
8 | 9 | local stream_common = require "http.stream_common" |
9 | local spack = string.pack or require "compat53.string".pack | |
10 | local sunpack = string.unpack or require "compat53.string".unpack | |
11 | local unpack = table.unpack or unpack -- luacheck: ignore 113 | |
10 | local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 | |
11 | local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143 | |
12 | local unpack = table.unpack or unpack -- luacheck: ignore 113 143 | |
12 | 13 | |
13 | 14 | local assert = assert |
14 | 15 | if _VERSION:match("%d+%.?%d*") < "5.3" then |
16 | 17 | end |
17 | 18 | |
18 | 19 | local MAX_HEADER_BUFFER_SIZE = 400*1024 -- 400 KB is max size in h2o |
20 | ||
21 | local known_settings = {} | |
22 | for i, s in pairs({ | |
23 | [0x1] = "HEADER_TABLE_SIZE"; | |
24 | [0x2] = "ENABLE_PUSH"; | |
25 | [0x3] = "MAX_CONCURRENT_STREAMS"; | |
26 | [0x4] = "INITIAL_WINDOW_SIZE"; | |
27 | [0x5] = "MAX_FRAME_SIZE"; | |
28 | [0x6] = "MAX_HEADER_LIST_SIZE"; | |
29 | [0x8] = "SETTINGS_ENABLE_CONNECT_PROTOCOL"; | |
30 | [0x10] = "TLS_RENEG_PERMITTED"; | |
31 | }) do | |
32 | known_settings[i] = s | |
33 | known_settings[s] = i | |
34 | end | |
35 | ||
36 | local frame_types = { | |
37 | [0x0] = "DATA"; | |
38 | [0x1] = "HEADERS"; | |
39 | [0x2] = "PRIORITY"; | |
40 | [0x3] = "RST_STREAM"; | |
41 | [0x4] = "SETTING"; | |
42 | [0x5] = "PUSH_PROMISE"; | |
43 | [0x6] = "PING"; | |
44 | [0x7] = "GOAWAY"; | |
45 | [0x8] = "WINDOW_UPDATE"; | |
46 | [0x9] = "CONTINUATION"; | |
47 | [0xa] = "ALTSVC"; | |
48 | [0xc] = "ORIGIN"; | |
49 | } | |
50 | for i=0x0, 0x9 do | |
51 | frame_types[frame_types[i]] = i | |
52 | end | |
19 | 53 | |
20 | 54 | local frame_handlers = {} |
21 | 55 | |
35 | 69 | end |
36 | 70 | table.sort(dependee_list) |
37 | 71 | dependee_list = table.concat(dependee_list, ",") |
38 | return string.format("http.h2_stream{connection=%s;id=%d;state=%q;parent=%s;dependees={%s}}", | |
39 | tostring(self.connection), self.id, self.state, | |
72 | return string.format("http.h2_stream{connection=%s;id=%s;state=%q;parent=%s;dependees={%s}}", | |
73 | tostring(self.connection), tostring(self.id), self.state, | |
40 | 74 | (self.parent and tostring(self.parent.id) or "nil"), dependee_list) |
41 | 75 | end |
42 | 76 | |
43 | local function new_stream(connection, id) | |
44 | assert(type(id) == "number" and id >= 0 and id <= 0x7fffffff, "invalid stream id") | |
77 | local function new_stream(connection) | |
45 | 78 | local self = setmetatable({ |
46 | 79 | connection = connection; |
47 | 80 | type = connection.type; |
48 | 81 | |
49 | 82 | state = "idle"; |
50 | 83 | |
51 | id = id; | |
52 | peer_flow_credits = id ~= 0 and connection.peer_settings[0x4]; | |
53 | peer_flow_credits_increase = cc.new(); | |
84 | id = nil; | |
85 | peer_flow_credits = 0; | |
86 | peer_flow_credits_change = cc.new(); | |
54 | 87 | parent = nil; |
55 | 88 | dependees = setmetatable({}, {__mode="kv"}); |
56 | 89 | weight = 16; -- http2 spec, section 5.3.5 |
67 | 100 | |
68 | 101 | chunk_fifo = new_fifo(); |
69 | 102 | chunk_cond = cc.new(); |
103 | ||
104 | end_stream_after_continuation = nil; | |
105 | content_length = nil; | |
70 | 106 | }, stream_mt) |
71 | 107 | return self |
108 | end | |
109 | ||
110 | function stream_methods:pick_id(id) | |
111 | assert(self.id == nil) | |
112 | if id == nil then | |
113 | if self.connection.recv_goaway_lowest then | |
114 | h2_error.errors.PROTOCOL_ERROR("Receivers of a GOAWAY frame MUST NOT open additional streams on the connection") | |
115 | end | |
116 | if self.type == "client" then | |
117 | -- Pick next free odd number | |
118 | id = self.connection.highest_odd_stream + 2 | |
119 | self.connection.highest_odd_stream = id | |
120 | else | |
121 | -- Pick next free even number | |
122 | id = self.connection.highest_even_stream + 2 | |
123 | self.connection.highest_even_stream = id | |
124 | end | |
125 | self.id = id | |
126 | else | |
127 | assert(type(id) == "number" and id >= 0 and id <= 0x7fffffff and id % 1 == 0, "invalid stream id") | |
128 | assert(self.connection.streams[id] == nil) | |
129 | self.id = id | |
130 | if id % 2 == 0 then | |
131 | if id > self.connection.highest_even_stream then | |
132 | self.connection.highest_even_stream = id | |
133 | end | |
134 | -- stream 'already' existed but was possibly collected. see http2 spec 5.1.1 | |
135 | if id <= self.connection.highest_even_non_idle_stream then | |
136 | self:set_state("closed") | |
137 | end | |
138 | else | |
139 | if id > self.connection.highest_odd_stream then | |
140 | self.connection.highest_odd_stream = id | |
141 | end | |
142 | -- stream 'already' existed but was possibly collected. see http2 spec 5.1.1 | |
143 | if id <= self.connection.highest_odd_non_idle_stream then | |
144 | self:set_state("closed") | |
145 | end | |
146 | end | |
147 | end | |
148 | -- TODO: check MAX_CONCURRENT_STREAMS | |
149 | self.connection.streams[id] = self | |
150 | if id == 0 then | |
151 | self.connection.stream0 = self | |
152 | else | |
153 | self.peer_flow_credits = self.connection.peer_settings[known_settings.INITIAL_WINDOW_SIZE] | |
154 | self.peer_flow_credits_change:signal() | |
155 | -- Add dependency on stream 0. http2 spec, 5.3.1 | |
156 | self.connection.stream0:reprioritise(self) | |
157 | end | |
158 | return true | |
72 | 159 | end |
73 | 160 | |
74 | 161 | local valid_states = { |
86 | 173 | if new_order <= valid_states[old] then |
87 | 174 | error("invalid state progression ('"..old.."' to '"..new.."')") |
88 | 175 | end |
176 | if new ~= "closed" then | |
177 | assert(self.id) | |
178 | end | |
89 | 179 | self.state = new |
180 | if new == "closed" or new == "half closed (remote)" then | |
181 | self.recv_headers_cond:signal() | |
182 | self.chunk_cond:signal() | |
183 | end | |
184 | if old == "idle" then | |
185 | if self.id % 2 == 0 then | |
186 | if self.id > self.connection.highest_even_non_idle_stream then | |
187 | self.connection.highest_even_non_idle_stream = self.id | |
188 | end | |
189 | else | |
190 | if self.id > self.connection.highest_odd_non_idle_stream then | |
191 | self.connection.highest_odd_non_idle_stream = self.id | |
192 | end | |
193 | end | |
194 | end | |
90 | 195 | if old == "idle" and new ~= "closed" then |
91 | 196 | self.connection.n_active_streams = self.connection.n_active_streams + 1 |
92 | 197 | elseif old ~= "idle" and new == "closed" then |
98 | 203 | end |
99 | 204 | end |
100 | 205 | |
101 | function stream_methods:write_http2_frame(typ, flags, payload, timeout) | |
102 | return self.connection:write_http2_frame(typ, flags, self.id, payload, timeout) | |
206 | function stream_methods:write_http2_frame(typ, flags, payload, timeout, flush) | |
207 | local stream_id = assert(self.id, "stream has unset id") | |
208 | return self.connection:write_http2_frame(typ, flags, stream_id, payload, timeout, flush) | |
103 | 209 | end |
104 | 210 | |
105 | 211 | function stream_methods:reprioritise(child, exclusive) |
106 | 212 | assert(child) |
213 | assert(child.id) | |
107 | 214 | assert(child.id ~= 0) -- cannot reprioritise stream 0 |
108 | 215 | if self == child then |
109 | 216 | -- http2 spec, section 5.3.1 |
148 | 255 | __index = chunk_methods; |
149 | 256 | } |
150 | 257 | |
151 | local function new_chunk(stream, original_length, data) | |
258 | local function new_chunk(original_length, data) | |
152 | 259 | return setmetatable({ |
153 | stream = stream; | |
154 | 260 | original_length = original_length; |
261 | acked = false; | |
155 | 262 | data = data; |
156 | acked = false; | |
157 | 263 | }, chunk_mt) |
158 | 264 | end |
159 | 265 | |
160 | function chunk_methods:ack(no_window_update) | |
266 | function chunk_methods:ack() | |
161 | 267 | if self.acked then |
162 | return | |
163 | end | |
164 | self.acked = true | |
165 | local len = self.original_length | |
166 | if len > 0 and not no_window_update then | |
167 | -- ignore errors | |
168 | self.stream:write_window_update(len, 0) | |
169 | self.stream.connection:write_window_update(len, 0) | |
170 | end | |
171 | end | |
172 | ||
173 | -- DATA | |
174 | frame_handlers[0x0] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
268 | return 0 | |
269 | else | |
270 | self.acked = true | |
271 | return self.original_length | |
272 | end | |
273 | end | |
274 | ||
275 | frame_handlers[frame_types.DATA] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
175 | 276 | if stream.id == 0 then |
176 | 277 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'DATA' framess MUST be associated with a stream"), ce.EILSEQ |
177 | 278 | end |
178 | 279 | if stream.state == "idle" or stream.state == "reserved (remote)" then |
179 | 280 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'DATA' frames not allowed in 'idle' state"), ce.EILSEQ |
180 | 281 | elseif stream.state ~= "open" and stream.state ~= "half closed (local)" then |
181 | return nil, h2_errors.STREAM_CLOSED:new_traceback("'DATA' frames not allowed in '" .. stream.state .. "' state", true), ce.EILSEQ | |
282 | return nil, h2_errors.STREAM_CLOSED:new_traceback("'DATA' frames not allowed in '" .. stream.state .. "' state"), ce.EILSEQ | |
182 | 283 | end |
183 | 284 | |
184 | 285 | local end_stream = band(flags, 0x1) ~= 0 |
191 | 292 | if pad_len >= #payload then -- >= will take care of the pad_len itself |
192 | 293 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("length of the padding is the length of the frame payload or greater"), ce.EILSEQ |
193 | 294 | elseif payload:match("[^%z]", -pad_len) then |
295 | -- 6.1: A receiver is not obligated to verify padding but MAY treat non-zero padding as a connection error of type PROTOCOL_ERROR. | |
194 | 296 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("padding not null bytes"), ce.EILSEQ |
195 | 297 | end |
196 | 298 | payload = payload:sub(2, -pad_len-1) |
197 | 299 | end |
198 | 300 | |
199 | local chunk = new_chunk(stream, original_length, payload) | |
301 | local stats_recv = stream.stats_recv + #payload | |
302 | if stream.content_length and stats_recv > stream.content_length then | |
303 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("content-length exceeded", true), ce.EILSEQ | |
304 | end | |
305 | ||
306 | local chunk = new_chunk(original_length, payload) | |
200 | 307 | stream.chunk_fifo:push(chunk) |
201 | stream.stats_recv = stream.stats_recv + #payload | |
308 | stream.stats_recv = stats_recv | |
309 | ||
202 | 310 | if end_stream then |
203 | 311 | stream.chunk_fifo:push(nil) |
204 | end | |
205 | stream.chunk_cond:signal() | |
206 | ||
207 | if end_stream then | |
312 | -- chunk_cond gets signaled by :set_state | |
208 | 313 | if stream.state == "half closed (local)" then |
209 | 314 | stream:set_state("closed") |
210 | 315 | else |
211 | 316 | stream:set_state("half closed (remote)") |
212 | 317 | end |
318 | else | |
319 | stream.chunk_cond:signal() | |
213 | 320 | end |
214 | 321 | |
215 | 322 | return true |
216 | 323 | end |
217 | 324 | |
218 | function stream_methods:write_data_frame(payload, end_stream, padded, timeout) | |
325 | function stream_methods:write_data_frame(payload, end_stream, padded, timeout, flush) | |
219 | 326 | if self.id == 0 then |
220 | 327 | h2_errors.PROTOCOL_ERROR("'DATA' frames MUST be associated with a stream") |
221 | 328 | end |
222 | 329 | if self.state ~= "open" and self.state ~= "half closed (remote)" then |
223 | h2_errors.STREAM_CLOSED("'DATA' frame not allowed in '" .. self.state .. "' state", true) | |
330 | h2_errors.STREAM_CLOSED("'DATA' frame not allowed in '" .. self.state .. "' state") | |
224 | 331 | end |
225 | 332 | local pad_len, padding = "", "" |
226 | 333 | local flags = 0 |
240 | 347 | if new_stream_peer_flow_credits < 0 or new_connection_peer_flow_credits < 0 then |
241 | 348 | h2_errors.FLOW_CONTROL_ERROR("not enough flow credits") |
242 | 349 | end |
243 | local ok, err, errno = self:write_http2_frame(0x0, flags, payload, timeout) | |
350 | local ok, err, errno = self:write_http2_frame(frame_types.DATA, flags, payload, timeout, flush) | |
244 | 351 | if not ok then return nil, err, errno end |
245 | 352 | self.peer_flow_credits = new_stream_peer_flow_credits |
246 | 353 | self.connection.peer_flow_credits = new_connection_peer_flow_credits |
264 | 371 | [":status"] = false; |
265 | 372 | } |
266 | 373 | local function validate_headers(headers, is_request, nth_header, ended_stream) |
267 | do -- Validate that all colon fields are before other ones (section 8.1.2.1) | |
374 | -- Section 8.1.2: A request or response containing uppercase header field names MUST be treated as malformed | |
375 | for name in headers:each() do | |
376 | if name:lower() ~= name then | |
377 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("header field names MUST be lowercase", true), ce.EINVAL | |
378 | end | |
379 | end | |
380 | do -- Section 8.1.2.1: Validate that all colon fields are before other ones | |
268 | 381 | local seen_non_colon = false |
269 | 382 | for name, value in headers:each() do |
270 | 383 | if name:sub(1,1) == ":" then |
274 | 387 | defined for responses MUST NOT appear in requests. |
275 | 388 | Pseudo-header fields MUST NOT appear in trailers. |
276 | 389 | Endpoints MUST treat a request or response that contains |
277 | undefined or invalid pseudo-header fields as malformed | |
278 | (Section 8.1.2.6)]] | |
390 | undefined or invalid pseudo-header fields as malformed]] | |
279 | 391 | if (is_request and nth_header ~= 1) or valid_pseudo_headers[name] ~= is_request then |
280 | 392 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("Pseudo-header fields are only valid in the context in which they are defined", true), ce.EILSEQ |
281 | 393 | end |
339 | 451 | return true |
340 | 452 | end |
341 | 453 | |
342 | local function process_end_headers(stream, end_stream, pad_len, pos, promised_stream_id, payload) | |
454 | local function process_end_headers(stream, end_stream, pad_len, pos, promised_stream, payload) | |
343 | 455 | if pad_len > 0 then |
344 | 456 | if pad_len + pos - 1 > #payload then |
345 | 457 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("length of the padding is the length of the frame payload or greater"), ce.EILSEQ |
346 | 458 | elseif payload:match("[^%z]", -pad_len) then |
459 | -- 6.2: Padding fields and flags are identical to those defined for DATA frames | |
460 | -- 6.1: A receiver is not obligated to verify padding but MAY treat non-zero padding as a connection error of type PROTOCOL_ERROR. | |
347 | 461 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("padding not null bytes"), ce.EILSEQ |
348 | 462 | end |
349 | 463 | payload = payload:sub(1, -pad_len-1) |
357 | 471 | return nil, h2_errors.COMPRESSION_ERROR:new_traceback("incomplete header fragment"), ce.EILSEQ |
358 | 472 | end |
359 | 473 | |
360 | if not promised_stream_id then | |
474 | if not promised_stream then | |
361 | 475 | stream.stats_recv_headers = stream.stats_recv_headers + 1 |
362 | local validate_ok, validate_err, errno2 = validate_headers(headers, stream.type ~= "client", stream.stats_recv_headers, stream.state == "half closed (remote)" or stream.state == "closed") | |
476 | local validate_ok, validate_err, errno2 = validate_headers(headers, stream.type ~= "client", stream.stats_recv_headers, end_stream) | |
363 | 477 | if not validate_ok then |
364 | 478 | return nil, validate_err, errno2 |
365 | 479 | end |
480 | if headers:has("content-length") then | |
481 | stream.content_length = tonumber(headers:get("content-length"), 10) | |
482 | end | |
366 | 483 | stream.recv_headers_fifo:push(headers) |
367 | stream.recv_headers_cond:signal() | |
368 | 484 | |
369 | 485 | if end_stream then |
370 | 486 | stream.chunk_fifo:push(nil) |
371 | stream.chunk_cond:signal() | |
487 | -- recv_headers_cond and chunk_cond get signaled by :set_state | |
372 | 488 | if stream.state == "half closed (local)" then |
373 | 489 | stream:set_state("closed") |
374 | 490 | else |
375 | 491 | stream:set_state("half closed (remote)") |
376 | 492 | end |
377 | 493 | else |
494 | stream.recv_headers_cond:signal() | |
378 | 495 | if stream.state == "idle" then |
379 | 496 | stream:set_state("open") |
380 | 497 | end |
385 | 502 | return nil, validate_err, errno2 |
386 | 503 | end |
387 | 504 | |
388 | local promised_stream = stream.connection:new_stream(promised_stream_id) | |
389 | stream:reprioritise(promised_stream) | |
390 | 505 | promised_stream:set_state("reserved (remote)") |
391 | 506 | promised_stream.recv_headers_fifo:push(headers) |
392 | stream.connection.new_streams:push(promised_stream) | |
393 | stream.connection.new_streams_cond:signal(1) | |
507 | promised_stream.recv_headers_cond:signal() | |
508 | ||
509 | -- If we have sent a haven't seen this stream before, and we should be discarding frames from it, | |
510 | -- then don't push it into the new_streams fifo | |
511 | if stream.connection.send_goaway_lowest == nil or promised_stream.id <= stream.connection.send_goaway_lowest then | |
512 | stream.connection.new_streams:push(promised_stream) | |
513 | stream.connection.new_streams_cond:signal(1) | |
514 | end | |
394 | 515 | end |
395 | 516 | return true |
396 | 517 | end |
397 | 518 | |
398 | -- HEADERS | |
399 | frame_handlers[0x1] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
519 | frame_handlers[frame_types.HEADERS] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
400 | 520 | if stream.id == 0 then |
401 | 521 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'HEADERS' frames MUST be associated with a stream"), ce.EILSEQ |
402 | 522 | end |
403 | 523 | if stream.state ~= "idle" and stream.state ~= "open" and stream.state ~= "half closed (local)" and stream.state ~= "reserved (remote)" then |
404 | return nil, h2_errors.STREAM_CLOSED:new_traceback("'HEADERS' frame not allowed in '" .. stream.state .. "' state", true), ce.EILSEQ | |
524 | return nil, h2_errors.STREAM_CLOSED:new_traceback("'HEADERS' frame not allowed in '" .. stream.state .. "' state"), ce.EILSEQ | |
405 | 525 | end |
406 | 526 | |
407 | 527 | local end_stream = band(flags, 0x1) ~= 0 |
452 | 572 | return process_end_headers(stream, end_stream, pad_len, pos, nil, payload) |
453 | 573 | else |
454 | 574 | stream.connection.need_continuation = stream |
575 | stream.connection.recv_headers_end_stream = end_stream | |
455 | 576 | stream.connection.recv_headers_buffer = { payload } |
456 | 577 | stream.connection.recv_headers_buffer_pos = pos |
457 | 578 | stream.connection.recv_headers_buffer_pad_len = pad_len |
461 | 582 | end |
462 | 583 | end |
463 | 584 | |
464 | function stream_methods:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, timeout) | |
585 | function stream_methods:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, timeout, flush) | |
465 | 586 | assert(self.state ~= "closed" and self.state ~= "half closed (local)") |
587 | if self.id == nil then | |
588 | self:pick_id() | |
589 | end | |
466 | 590 | local pad_len, pri, padding = "", "", "" |
467 | 591 | local flags = 0 |
468 | 592 | if end_stream then |
478 | 602 | end |
479 | 603 | if weight or stream_dep then |
480 | 604 | flags = bor(flags, 0x20) |
481 | assert(stream_dep < 0x80000000) | |
605 | assert(stream_dep <= 0x7fffffff) | |
482 | 606 | local tmp = stream_dep |
483 | 607 | if exclusive then |
484 | 608 | tmp = bor(tmp, 0x80000000) |
487 | 611 | pri = spack("> I4 B", tmp, weight) |
488 | 612 | end |
489 | 613 | payload = pad_len .. pri .. payload .. padding |
490 | local ok, err, errno = self:write_http2_frame(0x1, flags, payload, timeout) | |
491 | if ok == nil then return nil, err, errno end | |
614 | local ok, err, errno = self:write_http2_frame(frame_types.HEADERS, flags, payload, timeout, flush) | |
615 | if ok == nil then | |
616 | return nil, err, errno | |
617 | end | |
492 | 618 | self.stats_sent_headers = self.stats_sent_headers + 1 |
493 | if end_stream then | |
494 | if self.state == "half closed (remote)" then | |
495 | self:set_state("closed") | |
619 | if end_headers then | |
620 | if end_stream then | |
621 | if self.state == "half closed (remote)" or self.state == "reserved (local)" then | |
622 | self:set_state("closed") | |
623 | else | |
624 | self:set_state("half closed (local)") | |
625 | end | |
496 | 626 | else |
497 | self:set_state("half closed (local)") | |
498 | end | |
499 | else | |
500 | if self.state == "reserved (local)" then | |
501 | self:set_state("half closed (remote)") | |
502 | elseif self.state == "idle" then | |
503 | self:set_state("open") | |
504 | end | |
627 | if self.state == "idle" then | |
628 | self:set_state("open") | |
629 | elseif self.state == "reserved (local)" then | |
630 | self:set_state("half closed (remote)") | |
631 | end | |
632 | end | |
633 | else | |
634 | self.end_stream_after_continuation = end_stream | |
505 | 635 | end |
506 | 636 | return ok |
507 | 637 | end |
508 | 638 | |
509 | -- PRIORITY | |
510 | frame_handlers[0x2] = function(stream, flags, payload) -- luacheck: ignore 212 | |
639 | frame_handlers[frame_types.PRIORITY] = function(stream, flags, payload) -- luacheck: ignore 212 | |
511 | 640 | if stream.id == 0 then |
512 | 641 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'PRIORITY' frames MUST be associated with a stream"), ce.EILSEQ |
513 | 642 | end |
522 | 651 | exclusive = band(tmp, 0x80000000) ~= 0 |
523 | 652 | stream_dep = band(tmp, 0x7fffffff) |
524 | 653 | |
654 | -- 5.3.1. Stream Dependencies | |
655 | -- A dependency on a stream that is not currently in the tree | |
656 | -- results in that stream being given a default priority | |
525 | 657 | local new_parent = stream.connection.streams[stream_dep] |
526 | local ok, err, errno = new_parent:reprioritise(stream, exclusive) | |
527 | if not ok then | |
528 | return nil, err, errno | |
529 | end | |
530 | stream.weight = weight | |
658 | if new_parent then | |
659 | local ok, err, errno = new_parent:reprioritise(stream, exclusive) | |
660 | if not ok then | |
661 | return nil, err, errno | |
662 | end | |
663 | stream.weight = weight | |
664 | end | |
531 | 665 | |
532 | 666 | return true |
533 | 667 | end |
534 | 668 | |
535 | function stream_methods:write_priority_frame(exclusive, stream_dep, weight, timeout) | |
536 | assert(stream_dep < 0x80000000) | |
669 | function stream_methods:write_priority_frame(exclusive, stream_dep, weight, timeout, flush) | |
670 | assert(stream_dep <= 0x7fffffff) | |
671 | if self.id == nil then | |
672 | self:pick_id() | |
673 | end | |
537 | 674 | local tmp = stream_dep |
538 | 675 | if exclusive then |
539 | 676 | tmp = bor(tmp, 0x80000000) |
540 | 677 | end |
541 | 678 | weight = weight and weight - 1 or 0 |
542 | 679 | local payload = spack("> I4 B", tmp, weight) |
543 | return self:write_http2_frame(0x2, 0, payload, timeout) | |
544 | end | |
545 | ||
546 | -- RST_STREAM | |
547 | frame_handlers[0x3] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
680 | return self:write_http2_frame(frame_types.PRIORITY, 0, payload, timeout, flush) | |
681 | end | |
682 | ||
683 | frame_handlers[frame_types.RST_STREAM] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
548 | 684 | if stream.id == 0 then |
549 | 685 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'RST_STREAM' frames MUST be associated with a stream"), ce.EILSEQ |
550 | 686 | end |
553 | 689 | end |
554 | 690 | if stream.state == "idle" then |
555 | 691 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'RST_STREAM' frames MUST NOT be sent for a stream in the 'idle' state"), ce.EILSEQ |
692 | elseif stream.state == "closed" then | |
693 | -- probably a delayed RST_STREAM, ignore | |
694 | return true | |
556 | 695 | end |
557 | 696 | |
558 | 697 | local err_code = sunpack(">I4", payload) |
559 | 698 | |
560 | 699 | stream.rst_stream_error = (h2_errors[err_code] or h2_errors.INTERNAL_ERROR):new { |
561 | 700 | message = string.format("'RST_STREAM' on stream #%d (code=0x%x)", stream.id, err_code); |
701 | stream_error = true; | |
562 | 702 | } |
563 | 703 | |
564 | 704 | stream:set_state("closed") |
565 | stream.recv_headers_cond:signal() | |
566 | stream.chunk_cond:signal() | |
567 | 705 | |
568 | 706 | return true |
569 | 707 | end |
570 | 708 | |
571 | function stream_methods:write_rst_stream(err_code, timeout) | |
709 | function stream_methods:write_rst_stream_frame(err_code, timeout, flush) | |
572 | 710 | if self.id == 0 then |
573 | 711 | h2_errors.PROTOCOL_ERROR("'RST_STREAM' frames MUST be associated with a stream") |
574 | 712 | end |
577 | 715 | end |
578 | 716 | local flags = 0 |
579 | 717 | local payload = spack(">I4", err_code) |
580 | local ok, err, errno = self:write_http2_frame(0x3, flags, payload, timeout) | |
718 | local ok, err, errno = self:write_http2_frame(frame_types.RST_STREAM, flags, payload, timeout, flush) | |
581 | 719 | if not ok then return nil, err, errno end |
582 | 720 | if self.state ~= "closed" then |
583 | 721 | self:set_state("closed") |
586 | 724 | return ok |
587 | 725 | end |
588 | 726 | |
589 | -- SETTING | |
590 | frame_handlers[0x4] = function(stream, flags, payload, deadline) | |
727 | function stream_methods:rst_stream(err, timeout) | |
728 | local code | |
729 | if err == nil then | |
730 | code = 0 | |
731 | elseif h2_error.is(err) then | |
732 | code = err.code | |
733 | else | |
734 | err = h2_errors.INTERNAL_ERROR:new { | |
735 | message = tostring(err); | |
736 | stream_error = true; | |
737 | } | |
738 | code = err.code | |
739 | end | |
740 | if self.rst_stream_error == nil then | |
741 | self.rst_stream_error = err | |
742 | end | |
743 | return self:write_rst_stream_frame(code, timeout) | |
744 | end | |
745 | ||
746 | frame_handlers[frame_types.SETTING] = function(stream, flags, payload, deadline) | |
591 | 747 | if stream.id ~= 0 then |
592 | 748 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("stream identifier for a 'SETTINGS' frame MUST be zero"), ce.EILSEQ |
593 | 749 | end |
606 | 762 | local peer_settings = {} |
607 | 763 | for i=1, #payload, 6 do |
608 | 764 | local id, val = sunpack(">I2 I4", payload, i) |
609 | if id == 0x1 then | |
765 | if id == known_settings.HEADER_TABLE_SIZE then | |
610 | 766 | stream.connection.encoding_context:set_max_dynamic_table_size(val) |
611 | 767 | -- Add a 'max size' element to the next outgoing header |
612 | 768 | stream.connection.encoding_context:encode_max_size(val) |
613 | elseif id == 0x2 then | |
769 | elseif id == known_settings.ENABLE_PUSH then | |
614 | 770 | -- Convert to boolean |
615 | 771 | if val == 0 then |
616 | 772 | val = false |
625 | 781 | -- error of type PROTOCOL_ERROR. |
626 | 782 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("SETTINGS_ENABLE_PUSH not allowed for clients"), ce.EILSEQ |
627 | 783 | end |
628 | elseif id == 0x4 then | |
784 | elseif id == known_settings.INITIAL_WINDOW_SIZE then | |
629 | 785 | if val >= 2^31 then |
630 | 786 | return nil, h2_errors.FLOW_CONTROL_ERROR:new_traceback("SETTINGS_INITIAL_WINDOW_SIZE must be less than 2^31"), ce.EILSEQ |
631 | 787 | end |
632 | elseif id == 0x5 then | |
788 | elseif id == known_settings.MAX_FRAME_SIZE then | |
633 | 789 | if val < 16384 then |
634 | 790 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("SETTINGS_MAX_FRAME_SIZE must be greater than or equal to 16384"), ce.EILSEQ |
635 | 791 | elseif val >= 2^24 then |
640 | 796 | end |
641 | 797 | stream.connection:set_peer_settings(peer_settings) |
642 | 798 | -- Ack server's settings |
643 | -- XXX: This shouldn't ignore all errors (it probably should not flush) | |
644 | stream:write_settings_frame(true, nil, deadline and deadline-monotime()) | |
799 | local ok, err, errno = stream:write_settings_frame(true, nil, 0, "f") | |
800 | if not ok then | |
801 | return ok, err, errno | |
802 | end | |
803 | -- ignore :flush failure | |
804 | stream.connection:flush(deadline and deadline-monotime()) | |
645 | 805 | return true |
646 | 806 | end |
647 | 807 | end |
655 | 815 | i = i + 1 |
656 | 816 | end |
657 | 817 | local HEADER_TABLE_SIZE = settings[0x1] |
818 | if HEADER_TABLE_SIZE == nil then | |
819 | HEADER_TABLE_SIZE = settings.HEADER_TABLE_SIZE | |
820 | end | |
658 | 821 | if HEADER_TABLE_SIZE ~= nil then |
659 | 822 | append(0x1, HEADER_TABLE_SIZE) |
660 | 823 | end |
661 | 824 | local ENABLE_PUSH = settings[0x2] |
825 | if ENABLE_PUSH == nil then | |
826 | ENABLE_PUSH = settings.ENABLE_PUSH | |
827 | end | |
662 | 828 | if ENABLE_PUSH ~= nil then |
663 | 829 | if type(ENABLE_PUSH) == "boolean" then |
664 | 830 | ENABLE_PUSH = ENABLE_PUSH and 1 or 0 |
665 | 831 | end |
666 | 832 | append(0x2, ENABLE_PUSH) |
833 | ENABLE_PUSH = ENABLE_PUSH ~= 0 | |
667 | 834 | end |
668 | 835 | local MAX_CONCURRENT_STREAMS = settings[0x3] |
836 | if MAX_CONCURRENT_STREAMS == nil then | |
837 | MAX_CONCURRENT_STREAMS = settings.MAX_CONCURRENT_STREAMS | |
838 | end | |
669 | 839 | if MAX_CONCURRENT_STREAMS ~= nil then |
670 | 840 | append(0x3, MAX_CONCURRENT_STREAMS) |
671 | 841 | end |
672 | 842 | local INITIAL_WINDOW_SIZE = settings[0x4] |
843 | if INITIAL_WINDOW_SIZE == nil then | |
844 | INITIAL_WINDOW_SIZE = settings.INITIAL_WINDOW_SIZE | |
845 | end | |
673 | 846 | if INITIAL_WINDOW_SIZE ~= nil then |
674 | 847 | if INITIAL_WINDOW_SIZE >= 2^31 then |
675 | 848 | h2_errors.FLOW_CONTROL_ERROR("SETTINGS_INITIAL_WINDOW_SIZE must be less than 2^31") |
677 | 850 | append(0x4, INITIAL_WINDOW_SIZE) |
678 | 851 | end |
679 | 852 | local MAX_FRAME_SIZE = settings[0x5] |
853 | if MAX_FRAME_SIZE == nil then | |
854 | MAX_FRAME_SIZE = settings.MAX_FRAME_SIZE | |
855 | end | |
680 | 856 | if MAX_FRAME_SIZE ~= nil then |
681 | 857 | if MAX_FRAME_SIZE < 16384 then |
682 | 858 | h2_errors.PROTOCOL_ERROR("SETTINGS_MAX_FRAME_SIZE must be greater than or equal to 16384") |
686 | 862 | append(0x5, MAX_FRAME_SIZE) |
687 | 863 | end |
688 | 864 | local MAX_HEADER_LIST_SIZE = settings[0x6] |
865 | if MAX_HEADER_LIST_SIZE == nil then | |
866 | MAX_HEADER_LIST_SIZE = settings.MAX_HEADER_LIST_SIZE | |
867 | end | |
689 | 868 | if MAX_HEADER_LIST_SIZE ~= nil then |
690 | 869 | append(0x6, MAX_HEADER_LIST_SIZE) |
691 | 870 | end |
692 | return spack(">" .. ("I2 I4"):rep(i), unpack(a, 1, i*2)) | |
693 | end | |
694 | ||
695 | function stream_methods:write_settings_frame(ACK, settings, timeout) | |
871 | local settings_to_merge = { | |
872 | HEADER_TABLE_SIZE; | |
873 | ENABLE_PUSH; | |
874 | MAX_CONCURRENT_STREAMS; | |
875 | INITIAL_WINDOW_SIZE; | |
876 | MAX_FRAME_SIZE; | |
877 | MAX_HEADER_LIST_SIZE; | |
878 | } | |
879 | return spack(">" .. ("I2 I4"):rep(i), unpack(a, 1, i*2)), settings_to_merge | |
880 | end | |
881 | ||
882 | function stream_methods:write_settings_frame(ACK, settings, timeout, flush) | |
696 | 883 | if self.id ~= 0 then |
697 | 884 | h2_errors.PROTOCOL_ERROR("'SETTINGS' frames must be on stream id 0") |
698 | 885 | end |
699 | local flags, payload | |
886 | local flags, payload, settings_to_merge | |
700 | 887 | if ACK then |
701 | 888 | if settings ~= nil then |
702 | 889 | h2_errors.PROTOCOL_ERROR("'SETTINGS' ACK cannot have new settings") |
705 | 892 | payload = "" |
706 | 893 | else |
707 | 894 | flags = 0 |
708 | payload = pack_settings_payload(settings) | |
709 | end | |
710 | local ok, err, errno = self:write_http2_frame(0x4, flags, payload, timeout) | |
895 | payload, settings_to_merge = pack_settings_payload(settings) | |
896 | end | |
897 | local ok, err, errno = self:write_http2_frame(frame_types.SETTING, flags, payload, timeout, flush) | |
711 | 898 | if ok and not ACK then |
712 | 899 | local n = self.connection.send_settings.n + 1 |
713 | 900 | self.connection.send_settings.n = n |
714 | self.connection.send_settings[n] = settings | |
901 | self.connection.send_settings[n] = settings_to_merge | |
715 | 902 | ok = n |
716 | 903 | end |
717 | 904 | return ok, err, errno |
718 | 905 | end |
719 | 906 | |
720 | -- PUSH_PROMISE | |
721 | frame_handlers[0x5] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
722 | if not stream.connection.acked_settings[0x2] then | |
907 | frame_handlers[frame_types.PUSH_PROMISE] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
908 | if not stream.connection.acked_settings[known_settings.ENABLE_PUSH] then | |
723 | 909 | -- An endpoint that has both set this parameter to 0 and had it acknowledged MUST |
724 | 910 | -- treat the receipt of a PUSH_PROMISE frame as a connection error of type PROTOCOL_ERROR. |
725 | 911 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("SETTINGS_ENABLE_PUSH is 0"), ce.EILSEQ |
755 | 941 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("headers too large"), ce.EILSEQ |
756 | 942 | end |
757 | 943 | |
944 | local promised_stream = stream.connection:new_stream(promised_stream_id) | |
945 | stream:reprioritise(promised_stream) | |
946 | ||
758 | 947 | if end_headers then |
759 | return process_end_headers(stream, false, pad_len, pos, promised_stream_id, payload) | |
760 | else | |
948 | return process_end_headers(stream, false, pad_len, pos, promised_stream, payload) | |
949 | else | |
950 | stream.connection.need_continuation = stream | |
951 | stream.connection.promised_stream = promised_stream | |
952 | stream.connection.recv_headers_end_stream = false | |
761 | 953 | stream.connection.recv_headers_buffer = { payload } |
762 | 954 | stream.connection.recv_headers_buffer_pos = pos |
763 | 955 | stream.connection.recv_headers_buffer_pad_len = pad_len |
764 | 956 | stream.connection.recv_headers_buffer_items = 1 |
765 | 957 | stream.connection.recv_headers_buffer_length = len |
766 | stream.connection.promised_steam_id = promised_stream_id | |
767 | 958 | return true |
768 | 959 | end |
769 | 960 | end |
770 | 961 | |
771 | function stream_methods:write_push_promise_frame(promised_stream_id, payload, end_headers, padded, timeout) | |
962 | function stream_methods:write_push_promise_frame(promised_stream_id, payload, end_headers, padded, timeout, flush) | |
772 | 963 | assert(self.state == "open" or self.state == "half closed (remote)") |
773 | 964 | assert(self.id ~= 0) |
965 | local promised_stream = self.connection.streams[promised_stream_id] | |
966 | assert(promised_stream and promised_stream.state == "idle") | |
967 | -- 8.2.1: PUSH_PROMISE frames MUST NOT be sent by the client. | |
968 | assert(self.type == "server" and promised_stream.id % 2 == 0) | |
774 | 969 | local pad_len, padding = "", "" |
775 | 970 | local flags = 0 |
776 | 971 | if end_headers then |
781 | 976 | pad_len = spack("> B", padded) |
782 | 977 | padding = ("\0"):rep(padded) |
783 | 978 | end |
784 | assert(promised_stream_id > 0) | |
785 | assert(promised_stream_id < 0x80000000) | |
786 | assert(promised_stream_id % 2 == 0) | |
787 | -- TODO: promised_stream_id must be valid for sender | |
788 | 979 | promised_stream_id = spack(">I4", promised_stream_id) |
789 | 980 | payload = pad_len .. promised_stream_id .. payload .. padding |
790 | return self:write_http2_frame(0x5, flags, payload, timeout) | |
791 | end | |
792 | ||
793 | -- PING | |
794 | frame_handlers[0x6] = function(stream, flags, payload, deadline) | |
981 | local ok, err, errno = self:write_http2_frame(frame_types.PUSH_PROMISE, flags, payload, 0, "f") | |
982 | if ok == nil then | |
983 | return nil, err, errno | |
984 | end | |
985 | if end_headers then | |
986 | promised_stream:set_state("reserved (local)") | |
987 | else | |
988 | promised_stream.end_stream_after_continuation = false | |
989 | end | |
990 | if flush ~= "f" then | |
991 | return self.connection:flush(timeout) | |
992 | else | |
993 | return true | |
994 | end | |
995 | end | |
996 | ||
997 | frame_handlers[frame_types.PING] = function(stream, flags, payload, deadline) | |
795 | 998 | if stream.id ~= 0 then |
796 | 999 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'PING' must be on stream id 0"), ce.EILSEQ |
797 | 1000 | end |
813 | 1016 | end |
814 | 1017 | end |
815 | 1018 | |
816 | function stream_methods:write_ping_frame(ACK, payload, timeout) | |
1019 | function stream_methods:write_ping_frame(ACK, payload, timeout, flush) | |
817 | 1020 | if self.id ~= 0 then |
818 | 1021 | h2_errors.PROTOCOL_ERROR("'PING' frames must be on stream id 0") |
819 | 1022 | end |
821 | 1024 | h2_errors.FRAME_SIZE_ERROR("'PING' frames must have 8 byte payload") |
822 | 1025 | end |
823 | 1026 | local flags = ACK and 0x1 or 0 |
824 | return self:write_http2_frame(0x6, flags, payload, timeout) | |
825 | end | |
826 | ||
827 | -- GOAWAY | |
828 | frame_handlers[0x7] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
1027 | return self:write_http2_frame(frame_types.PING, flags, payload, timeout, flush) | |
1028 | end | |
1029 | ||
1030 | frame_handlers[frame_types.GOAWAY] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
829 | 1031 | if stream.id ~= 0 then |
830 | 1032 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'GOAWAY' frames must be on stream id 0"), ce.EILSEQ |
831 | 1033 | end |
843 | 1045 | return true |
844 | 1046 | end |
845 | 1047 | |
846 | function stream_methods:write_goaway_frame(last_streamid, err_code, debug_msg, timeout) | |
1048 | function stream_methods:write_goaway_frame(last_streamid, err_code, debug_msg, timeout, flush) | |
847 | 1049 | if self.id ~= 0 then |
848 | 1050 | h2_errors.PROTOCOL_ERROR("'GOAWAY' frames MUST be on stream 0") |
849 | 1051 | end |
850 | assert(last_streamid) | |
1052 | if self.connection.send_goaway_lowest and last_streamid > self.connection.send_goaway_lowest then | |
1053 | h2_errors.PROTOCOL_ERROR("Endpoints MUST NOT increase the value they send in the last stream identifier") | |
1054 | end | |
851 | 1055 | local flags = 0 |
852 | 1056 | local payload = spack(">I4 I4", last_streamid, err_code) |
853 | 1057 | if debug_msg then |
854 | 1058 | payload = payload .. debug_msg |
855 | 1059 | end |
856 | local ok, err, errno = self:write_http2_frame(0x7, flags, payload, timeout) | |
1060 | local ok, err, errno = self:write_http2_frame(frame_types.GOAWAY, flags, payload, 0, "f") | |
857 | 1061 | if not ok then |
858 | 1062 | return nil, err, errno |
859 | 1063 | end |
860 | self.connection.send_goaway_lowest = math.min(last_streamid, self.connection.send_goaway_lowest or math.huge) | |
861 | return true | |
862 | end | |
863 | ||
864 | -- WINDOW_UPDATE | |
865 | frame_handlers[0x8] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
1064 | self.connection.send_goaway_lowest = last_streamid | |
1065 | if flush ~= "f" then | |
1066 | return self.connection:flush(timeout) | |
1067 | else | |
1068 | return true | |
1069 | end | |
1070 | end | |
1071 | ||
1072 | frame_handlers[frame_types.WINDOW_UPDATE] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
866 | 1073 | if #payload ~= 4 then |
867 | 1074 | return nil, h2_errors.FRAME_SIZE_ERROR:new_traceback("'WINDOW_UPDATE' frames must be 4 bytes"), ce.EILSEQ |
868 | 1075 | end |
890 | 1097 | return nil, h2_errors.FLOW_CONTROL_ERROR:new_traceback("A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets", stream.id ~= 0), ce.EILSEQ |
891 | 1098 | end |
892 | 1099 | ob.peer_flow_credits = newval |
893 | ob.peer_flow_credits_increase:signal() | |
1100 | ob.peer_flow_credits_change:signal() | |
894 | 1101 | |
895 | 1102 | return true |
896 | 1103 | end |
897 | 1104 | |
898 | function stream_methods:write_window_update_frame(inc, timeout) | |
1105 | function stream_methods:write_window_update_frame(inc, timeout, flush) | |
899 | 1106 | local flags = 0 |
900 | 1107 | if self.id ~= 0 and self.state == "idle" then |
901 | 1108 | h2_errors.PROTOCOL_ERROR([['WINDOW_UPDATE' frames not allowed in "idle" state]]) |
902 | 1109 | end |
903 | if inc >= 0x80000000 or inc <= 0 then | |
1110 | if inc > 0x7fffffff or inc <= 0 then | |
904 | 1111 | h2_errors.PROTOCOL_ERROR("invalid window update increment", true) |
905 | 1112 | end |
906 | 1113 | local payload = spack(">I4", inc) |
907 | return self:write_http2_frame(0x8, flags, payload, timeout) | |
1114 | return self:write_http2_frame(frame_types.WINDOW_UPDATE, flags, payload, timeout, flush) | |
908 | 1115 | end |
909 | 1116 | |
910 | 1117 | function stream_methods:write_window_update(inc, timeout) |
911 | while inc >= 0x80000000 do | |
912 | local ok, err, errno = self:write_window_update_frame(0x7fffffff, 0) | |
1118 | while inc > 0x7fffffff do | |
1119 | local ok, err, errno = self:write_window_update_frame(0x7fffffff, 0, "f") | |
913 | 1120 | if not ok then |
914 | 1121 | return nil, err, errno |
915 | 1122 | end |
918 | 1125 | return self:write_window_update_frame(inc, timeout) |
919 | 1126 | end |
920 | 1127 | |
921 | -- CONTINUATION | |
922 | frame_handlers[0x9] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
1128 | frame_handlers[frame_types.CONTINUATION] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 | |
923 | 1129 | if stream.id == 0 then |
924 | 1130 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'CONTINUATION' frames MUST be associated with a stream"), ce.EILSEQ |
925 | 1131 | end |
937 | 1143 | stream.connection.recv_headers_buffer_length = len |
938 | 1144 | |
939 | 1145 | if end_headers then |
1146 | local promised_stream = stream.connection.promised_stream | |
940 | 1147 | local pad_len = stream.connection.recv_headers_buffer_pad_len |
941 | 1148 | local pos = stream.connection.recv_headers_buffer_pos |
1149 | local end_stream = stream.connection.recv_headers_end_stream | |
942 | 1150 | payload = table.concat(stream.connection.recv_headers_buffer, "", 1, stream.connection.recv_headers_buffer_items) |
943 | local promised_steam_id = stream.connection.promised_steam_id | |
1151 | stream.connection.recv_headers_end_stream = nil | |
944 | 1152 | stream.connection.recv_headers_buffer = nil |
945 | 1153 | stream.connection.recv_headers_buffer_pos = nil |
946 | 1154 | stream.connection.recv_headers_buffer_pad_len = nil |
947 | 1155 | stream.connection.recv_headers_buffer_items = nil |
948 | 1156 | stream.connection.recv_headers_buffer_length = nil |
949 | stream.connection.promised_steam_id = nil | |
1157 | stream.connection.promised_stream = nil | |
950 | 1158 | stream.connection.need_continuation = nil |
951 | return process_end_headers(stream, false, pad_len, pos, promised_steam_id, payload) | |
1159 | return process_end_headers(stream, end_stream, pad_len, pos, promised_stream, payload) | |
952 | 1160 | else |
953 | 1161 | return true |
954 | 1162 | end |
955 | 1163 | end |
956 | 1164 | |
957 | function stream_methods:write_continuation_frame(payload, end_headers, timeout) | |
958 | assert(self.state == "open" or self.state == "half closed (remote)") | |
1165 | function stream_methods:write_continuation_frame(payload, end_headers, timeout, flush) | |
1166 | assert(self.state ~= "closed" and self.state ~= "half closed (local)") | |
959 | 1167 | local flags = 0 |
960 | 1168 | if end_headers then |
961 | 1169 | flags = bor(flags, 0x4) |
962 | 1170 | end |
963 | return self:write_http2_frame(0x9, flags, payload, timeout) | |
1171 | local ok, err, errno = self:write_http2_frame(frame_types.CONTINUATION, flags, payload, timeout, flush) | |
1172 | if ok == nil then | |
1173 | return nil, err, errno | |
1174 | end | |
1175 | if end_headers then | |
1176 | if self.end_stream_after_continuation then | |
1177 | if self.state == "half closed (remote)" or self.state == "reserved (local)" then | |
1178 | self:set_state("closed") | |
1179 | else | |
1180 | self:set_state("half closed (local)") | |
1181 | end | |
1182 | else | |
1183 | if self.state == "idle" then | |
1184 | self:set_state("open") | |
1185 | elseif self.state == "reserved (local)" then | |
1186 | self:set_state("half closed (remote)") | |
1187 | end | |
1188 | end | |
1189 | else | |
1190 | self.end_stream_after_continuation = nil | |
1191 | end | |
1192 | return ok | |
964 | 1193 | end |
965 | 1194 | |
966 | 1195 | ------------------------------------------- |
967 | 1196 | |
968 | 1197 | function stream_methods:shutdown() |
969 | 1198 | if self.state ~= "idle" and self.state ~= "closed" and self.id ~= 0 then |
970 | self:write_rst_stream(0, 0) -- ignore result | |
1199 | self:rst_stream(nil, 0) -- ignore result | |
971 | 1200 | end |
972 | 1201 | local len = 0 |
973 | 1202 | for i=1, self.chunk_fifo:length() do |
974 | 1203 | local chunk = self.chunk_fifo:peek(i) |
975 | 1204 | if chunk ~= nil then |
976 | chunk:ack(true) | |
977 | len = len + #chunk.data | |
1205 | len = len + chunk:ack() | |
978 | 1206 | end |
979 | 1207 | end |
980 | 1208 | if len > 0 then |
1027 | 1255 | return nil |
1028 | 1256 | else |
1029 | 1257 | local data = chunk.data |
1030 | chunk:ack(false) | |
1258 | local len = chunk:ack() | |
1259 | if len > 0 then | |
1260 | -- if they don't get flushed now they will get flushed on next read or write | |
1261 | self:write_window_update(len, 0) | |
1262 | self.connection:write_window_update(len, 0) | |
1263 | end | |
1031 | 1264 | return data |
1032 | 1265 | end |
1033 | 1266 | end |
1034 | 1267 | |
1035 | 1268 | function stream_methods:unget(str) |
1036 | local chunk = new_chunk(self, 0, str) -- 0 means :ack does nothing | |
1269 | local chunk = new_chunk(0, str) | |
1037 | 1270 | self.chunk_fifo:insert(1, chunk) |
1271 | self.chunk_cond:signal() | |
1038 | 1272 | return true |
1039 | 1273 | end |
1040 | 1274 | |
1041 | local function write_headers(self, func, headers, timeout) | |
1275 | local function write_headers(self, func, headers, extra_frame_data_len, timeout) | |
1042 | 1276 | local deadline = timeout and (monotime()+timeout) |
1277 | ||
1278 | local SETTINGS_MAX_FRAME_SIZE = self.connection.peer_settings[known_settings.MAX_FRAME_SIZE] | |
1279 | local first_frame_max_size = SETTINGS_MAX_FRAME_SIZE - extra_frame_data_len | |
1280 | assert(first_frame_max_size >= 0) | |
1281 | ||
1043 | 1282 | local encoding_context = self.connection.encoding_context |
1044 | 1283 | encoding_context:encode_headers(headers) |
1045 | 1284 | local payload = encoding_context:render_data() |
1046 | 1285 | encoding_context:clear_data() |
1047 | 1286 | |
1048 | local SETTINGS_MAX_FRAME_SIZE = self.connection.peer_settings[0x5] | |
1049 | if #payload <= SETTINGS_MAX_FRAME_SIZE then | |
1287 | if #payload <= first_frame_max_size then | |
1050 | 1288 | local ok, err, errno = func(payload, true, deadline) |
1051 | 1289 | if not ok then |
1052 | 1290 | return ok, err, errno |
1053 | 1291 | end |
1054 | 1292 | else |
1055 | 1293 | do |
1056 | local partial = payload:sub(1, SETTINGS_MAX_FRAME_SIZE) | |
1294 | local partial = payload:sub(1, first_frame_max_size) | |
1057 | 1295 | local ok, err, errno = func(partial, false, deadline) |
1058 | 1296 | if not ok then |
1059 | 1297 | return ok, err, errno |
1060 | 1298 | end |
1061 | 1299 | end |
1062 | local sent = SETTINGS_MAX_FRAME_SIZE | |
1300 | local sent = first_frame_max_size | |
1063 | 1301 | local max = #payload-SETTINGS_MAX_FRAME_SIZE |
1064 | 1302 | while sent < max do |
1065 | 1303 | local partial = payload:sub(sent+1, sent+SETTINGS_MAX_FRAME_SIZE) |
1088 | 1326 | local padded, exclusive, stream_dep, weight = nil, nil, nil, nil |
1089 | 1327 | return write_headers(self, function(payload, end_headers, deadline) |
1090 | 1328 | return self:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, deadline and deadline-monotime()) |
1091 | end, headers, timeout) | |
1329 | end, headers, 0, timeout) | |
1092 | 1330 | end |
1093 | 1331 | |
1094 | 1332 | function stream_methods:push_promise(headers, timeout) |
1095 | 1333 | assert(self.type == "server") |
1096 | 1334 | assert(headers, "missing argument: headers") |
1097 | 1335 | assert(validate_headers(headers, true, 1, false)) |
1098 | assert(headers:has(":authority")) | |
1336 | assert(headers:has(":authority"), "PUSH_PROMISE must have an :authority") | |
1099 | 1337 | |
1100 | 1338 | local promised_stream = self.connection:new_stream() |
1339 | promised_stream:pick_id() | |
1101 | 1340 | self:reprioritise(promised_stream) |
1102 | local promised_stream_id = promised_stream.id | |
1103 | 1341 | |
1104 | 1342 | local padded = nil |
1105 | 1343 | local ok, err, errno = write_headers(self, function(payload, end_headers, deadline) |
1106 | return self:write_push_promise_frame(promised_stream_id, payload, end_headers, padded, deadline) | |
1107 | end, headers, timeout) | |
1344 | return self:write_push_promise_frame(promised_stream.id, payload, end_headers, padded, deadline) | |
1345 | end, headers, 4, timeout) -- 4 is size of promised stream id | |
1108 | 1346 | if not ok then |
1109 | 1347 | return nil, err, errno |
1110 | 1348 | end |
1111 | ||
1112 | promised_stream:set_state("reserved (local)") | |
1349 | promised_stream.recv_headers_fifo:push(headers) | |
1350 | promised_stream.recv_headers_cond:signal() | |
1113 | 1351 | |
1114 | 1352 | return promised_stream |
1115 | 1353 | end |
1118 | 1356 | local deadline = timeout and (monotime()+timeout) |
1119 | 1357 | local sent = 0 |
1120 | 1358 | while true do |
1121 | while self.peer_flow_credits == 0 do | |
1122 | local which = cqueues.poll(self.peer_flow_credits_increase, self.connection, timeout) | |
1359 | while self.peer_flow_credits <= 0 do | |
1360 | local which = cqueues.poll(self.peer_flow_credits_change, self.connection, timeout) | |
1123 | 1361 | if which == self.connection then |
1124 | 1362 | local ok, err, errno = self.connection:step(0) |
1125 | 1363 | if not ok then |
1130 | 1368 | end |
1131 | 1369 | timeout = deadline and (deadline-monotime()) |
1132 | 1370 | end |
1133 | while self.connection.peer_flow_credits == 0 do | |
1134 | local which = cqueues.poll(self.connection.peer_flow_credits_increase, self.connection, timeout) | |
1371 | while self.connection.peer_flow_credits <= 0 do | |
1372 | local which = cqueues.poll(self.connection.peer_flow_credits_change, self.connection, timeout) | |
1135 | 1373 | if which == self.connection then |
1136 | 1374 | local ok, err, errno = self.connection:step(0) |
1137 | 1375 | if not ok then |
1142 | 1380 | end |
1143 | 1381 | timeout = deadline and (deadline-monotime()) |
1144 | 1382 | end |
1145 | local SETTINGS_MAX_FRAME_SIZE = self.connection.peer_settings[0x5] | |
1383 | local SETTINGS_MAX_FRAME_SIZE = self.connection.peer_settings[known_settings.MAX_FRAME_SIZE] | |
1146 | 1384 | local max_available = math.min(self.peer_flow_credits, self.connection.peer_flow_credits, SETTINGS_MAX_FRAME_SIZE) |
1147 | 1385 | if max_available < (#payload - sent) then |
1148 | 1386 | if max_available > 0 then |
1149 | 1387 | -- send partial payload |
1150 | local ok, err, errno = self:write_data_frame(payload:sub(sent+1, sent+max_available), false, timeout) | |
1388 | local ok, err, errno = self:write_data_frame(payload:sub(sent+1, sent+max_available), false, false, timeout) | |
1151 | 1389 | if not ok then |
1152 | 1390 | return nil, err, errno |
1153 | 1391 | end |
1170 | 1408 | methods = stream_methods; |
1171 | 1409 | mt = stream_mt; |
1172 | 1410 | |
1411 | known_settings = known_settings; | |
1412 | frame_types = frame_types; | |
1173 | 1413 | frame_handlers = frame_handlers; |
1174 | 1414 | pack_settings_payload = pack_settings_payload; |
1175 | 1415 | } |
2 | 2 | |
3 | 3 | Design criteria: |
4 | 4 | - the same header field is allowed more than once |
5 | - must be able to fetch seperate occurences (important for some headers e.g. Set-Cookie) | |
6 | - optionally available as comma seperated list | |
5 | - must be able to fetch separate occurences (important for some headers e.g. Set-Cookie) | |
6 | - optionally available as comma separated list | |
7 | 7 | - http2 adds flag to headers that they should never be indexed |
8 | 8 | - header order should be recoverable |
9 | 9 | |
11 | 11 | An index of field name => array indices is kept. |
12 | 12 | ]] |
13 | 13 | |
14 | local unpack = table.unpack or unpack -- luacheck: ignore 113 | |
14 | local unpack = table.unpack or unpack -- luacheck: ignore 113 143 | |
15 | 15 | |
16 | 16 | local entry_methods = {} |
17 | 17 | local entry_mt = { |
0 | 0 | interface headers |
1 | const clone : (self) -> headers | |
2 | const append : (self, string, string, nil|boolean) -> () | |
3 | const each : (self) -> ((self) -> (string, string, boolean)) | |
4 | const has : (self, string) -> (boolean) | |
5 | const delete : (self, string) -> (boolean) | |
6 | const geti : (self, integer) -> (string, string, boolean) | |
7 | const get_as_sequence : (self, string) -> ({"n": integer, integer:string}) | |
8 | const get : (self, string) -> (string*) | |
9 | const get_comma_separated : (self, string) -> (string|nil) | |
10 | const modifyi : (self, integer, string, boolean?) -> () | |
11 | const upsert : (self, string, string, boolean?) -> () | |
12 | const sort : (self) -> () | |
13 | const dump : (self, nil|file, nil|string) -> () | |
1 | const clone: (self) -> (headers) | |
2 | const append: (self, string, string, boolean?) -> () | |
3 | const each: (self) -> ((self) -> (string, string, boolean)) | |
4 | const has: (self, string) -> (boolean) | |
5 | const delete: (self, string) -> (boolean) | |
6 | const geti: (self, integer) -> (string, string, boolean) | |
7 | const get_as_sequence: (self, string) -> ({"n": integer, integer:string}) | |
8 | const get: (self, string) -> (string*) | |
9 | const get_comma_separated: (self, string) -> (string|nil) | |
10 | const modifyi: (self, integer, string, boolean?) -> () | |
11 | const upsert: (self, string, string, boolean?) -> () | |
12 | const sort: (self) -> () | |
13 | const dump: (self, file?, string?) -> () | |
14 | 14 | end |
15 | 15 | |
16 | 16 | new : () -> (headers) |
1 | 1 | -- Reference documentation: https://http2.github.io/http2-spec/compression.html |
2 | 2 | |
3 | 3 | local schar = string.char |
4 | local spack = string.pack or require "compat53.string".pack | |
5 | local sunpack = string.unpack or require "compat53.string".unpack | |
4 | local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 | |
5 | local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143 | |
6 | 6 | local band = require "http.bit".band |
7 | 7 | local bor = require "http.bit".bor |
8 | 8 | local new_headers = require "http.headers".new |
9 | local unpack = table.unpack or unpack -- luacheck: ignore 113 | |
9 | local unpack = table.unpack or unpack -- luacheck: ignore 113 143 | |
10 | 10 | local h2_errors = require "http.h2_error".errors |
11 | 11 | |
12 | 12 | -- Section 5.1 |
352 | 352 | end |
353 | 353 | byte_to_bitstring[string.char(i)] = val |
354 | 354 | end |
355 | local EOS_length = #huffman_codes.EOS | |
355 | 356 | huffman_decode = function(s) |
356 | 357 | local bitstring = s:gsub(".", byte_to_bitstring) |
357 | 358 | local node = huffman_tree |
364 | 365 | node = huffman_tree |
365 | 366 | elseif node == "EOS" then |
366 | 367 | -- 5.2: A Huffman encoded string literal containing the EOS symbol MUST be treated as a decoding error. |
367 | assert(node ~= 256, "invalid huffman code (EOS)") | |
368 | return nil, h2_errors.COMPRESSION_ERROR:new_traceback("invalid huffman code (EOS)") | |
368 | 369 | elseif nt ~= "table" then |
369 | error("invalid huffman code") | |
370 | return nil, h2_errors.COMPRESSION_ERROR:new_traceback("invalid huffman code") | |
370 | 371 | end |
371 | 372 | end |
372 | 373 | --[[ Ensure that any left over bits are all one. |
373 | 374 | Section 5.2: A padding not corresponding to the most significant bits |
374 | 375 | of the code for the EOS symbol MUST be treated as a decoding error]] |
375 | while type(node) == "table" do | |
376 | node = node["1"] | |
377 | end | |
378 | assert(node == "EOS", "invalid huffman padding") | |
376 | if node ~= huffman_tree then | |
377 | -- We check this by continuing through on the '1' branch and ensure that we end up at EOS | |
378 | local n_padding = EOS_length | |
379 | while type(node) == "table" do | |
380 | node = node["1"] | |
381 | n_padding = n_padding - 1 | |
382 | end | |
383 | if node ~= "EOS" then | |
384 | return nil, h2_errors.COMPRESSION_ERROR:new_traceback("invalid huffman padding: expected most significant bits to match EOS") | |
385 | end | |
386 | -- Section 5.2: A padding strictly longer than 7 bits MUST be treated as a decoding error | |
387 | if n_padding < 0 or n_padding >= 8 then | |
388 | return nil, h2_errors.COMPRESSION_ERROR:new_traceback("invalid huffman padding: too much padding") | |
389 | end | |
390 | end | |
379 | 391 | |
380 | 392 | return string.char(unpack(output)) |
381 | 393 | end |
412 | 424 | if newpos > #str+1 then return end |
413 | 425 | local val = str:sub(pos, newpos-1) |
414 | 426 | if huffman then |
415 | return huffman_decode(val), newpos | |
416 | else | |
417 | return val, newpos | |
418 | end | |
427 | local err | |
428 | val, err = huffman_decode(val) | |
429 | if not val then | |
430 | return nil, err | |
431 | end | |
432 | end | |
433 | return val, newpos | |
419 | 434 | end |
420 | 435 | |
421 | 436 | local function compound_key(name, value) |
429 | 444 | return 32 - 8 + #k -- 8 is number of bytes of overhead introduced by compound_key |
430 | 445 | end |
431 | 446 | local static_names_to_index = {} |
432 | local static_index_to_names = {} | |
433 | local static_pairs = {} -- Duplicate writes are okay | |
447 | local static_pairs = {} | |
434 | 448 | local max_static_index |
435 | 449 | do |
436 | 450 | -- We prefer earlier indexes as examples in spec are like that |
437 | local function s(i, name) | |
451 | local function p(i, name, value) | |
438 | 452 | if not static_names_to_index[name] then |
439 | 453 | static_names_to_index[name] = i |
440 | static_index_to_names[i] = name | |
441 | end | |
442 | end | |
443 | local function p(i, name, value) | |
444 | s(i, name) | |
445 | local k = compound_key(name, value) | |
454 | end | |
455 | local k = compound_key(name, value or "") | |
446 | 456 | static_pairs[k] = i |
447 | 457 | static_pairs[i] = k |
448 | 458 | end |
449 | s( 1, ":authority") | |
459 | p( 1, ":authority") | |
450 | 460 | p( 2, ":method", "GET") |
451 | 461 | p( 3, ":method", "POST") |
452 | 462 | p( 4, ":path", "/") |
460 | 470 | p(12, ":status", "400") |
461 | 471 | p(13, ":status", "404") |
462 | 472 | p(14, ":status", "500") |
463 | s(15, "accept-charset") | |
473 | p(15, "accept-charset") | |
464 | 474 | p(16, "accept-encoding", "gzip, deflate") |
465 | s(17, "accept-language") | |
466 | s(18, "accept-ranges") | |
467 | s(19, "accept") | |
468 | s(20, "access-control-allow-origin") | |
469 | s(21, "age") | |
470 | s(22, "allow") | |
471 | s(23, "authorization") | |
472 | s(24, "cache-control") | |
473 | s(25, "content-disposition") | |
474 | s(26, "content-encoding") | |
475 | s(27, "content-language") | |
476 | s(28, "content-length") | |
477 | s(29, "content-location") | |
478 | s(30, "content-range") | |
479 | s(31, "content-type") | |
480 | s(32, "cookie") | |
481 | s(33, "date") | |
482 | s(34, "etag") | |
483 | s(35, "expect") | |
484 | s(36, "expires") | |
485 | s(37, "from") | |
486 | s(38, "host") | |
487 | s(39, "if-match") | |
488 | s(40, "if-modified-since") | |
489 | s(41, "if-none-match") | |
490 | s(42, "if-range") | |
491 | s(43, "if-unmodified-since") | |
492 | s(44, "last-modified") | |
493 | s(45, "link") | |
494 | s(46, "location") | |
495 | s(47, "max-forwards") | |
496 | s(48, "proxy-authenticate") | |
497 | s(49, "proxy-authorization") | |
498 | s(50, "range") | |
499 | s(51, "referer") | |
500 | s(52, "refresh") | |
501 | s(53, "retry-after") | |
502 | s(54, "server") | |
503 | s(55, "set-cookie") | |
504 | s(56, "strict-transport-security") | |
505 | s(57, "transfer-encoding") | |
506 | s(58, "user-agent") | |
507 | s(59, "vary") | |
508 | s(60, "via") | |
509 | s(61, "www-authenticate") | |
475 | p(17, "accept-language") | |
476 | p(18, "accept-ranges") | |
477 | p(19, "accept") | |
478 | p(20, "access-control-allow-origin") | |
479 | p(21, "age") | |
480 | p(22, "allow") | |
481 | p(23, "authorization") | |
482 | p(24, "cache-control") | |
483 | p(25, "content-disposition") | |
484 | p(26, "content-encoding") | |
485 | p(27, "content-language") | |
486 | p(28, "content-length") | |
487 | p(29, "content-location") | |
488 | p(30, "content-range") | |
489 | p(31, "content-type") | |
490 | p(32, "cookie") | |
491 | p(33, "date") | |
492 | p(34, "etag") | |
493 | p(35, "expect") | |
494 | p(36, "expires") | |
495 | p(37, "from") | |
496 | p(38, "host") | |
497 | p(39, "if-match") | |
498 | p(40, "if-modified-since") | |
499 | p(41, "if-none-match") | |
500 | p(42, "if-range") | |
501 | p(43, "if-unmodified-since") | |
502 | p(44, "last-modified") | |
503 | p(45, "link") | |
504 | p(46, "location") | |
505 | p(47, "max-forwards") | |
506 | p(48, "proxy-authenticate") | |
507 | p(49, "proxy-authorization") | |
508 | p(50, "range") | |
509 | p(51, "referer") | |
510 | p(52, "refresh") | |
511 | p(53, "retry-after") | |
512 | p(54, "server") | |
513 | p(55, "set-cookie") | |
514 | p(56, "strict-transport-security") | |
515 | p(57, "transfer-encoding") | |
516 | p(58, "user-agent") | |
517 | p(59, "vary") | |
518 | p(60, "via") | |
519 | p(61, "www-authenticate") | |
510 | 520 | max_static_index = 61 |
511 | 521 | end |
512 | 522 | |
722 | 732 | return nil |
723 | 733 | end |
724 | 734 | |
725 | function methods:lookup_index(index, allow_single) | |
735 | function methods:lookup_index(index) | |
726 | 736 | if index <= max_static_index then |
727 | 737 | local k = static_pairs[index] |
728 | 738 | if k then |
729 | 739 | return uncompound_key(k) |
730 | end | |
731 | if allow_single then | |
732 | local name = static_index_to_names[index] | |
733 | if name then | |
734 | return name, nil | |
735 | end | |
736 | 740 | end |
737 | 741 | else -- Dynamic? |
738 | 742 | local id = self:dynamic_index_to_table_id(index) |
796 | 800 | if name == nil then |
797 | 801 | return name, pos |
798 | 802 | end |
799 | if name:match("%u") then | |
800 | return nil, h2_errors.PROTOCOL_ERROR:new_traceback("malformed: header fields must not be uppercase") | |
801 | end | |
802 | 803 | value, pos = decode_string(payload, pos) |
803 | 804 | if value == nil then |
804 | 805 | return value, pos |
805 | 806 | end |
806 | 807 | else |
807 | name = self:lookup_index(index, true) | |
808 | name = self:lookup_index(index) | |
808 | 809 | if name == nil then |
809 | 810 | return nil, h2_errors.COMPRESSION_ERROR:new_traceback(string.format("index %d not found in table", index)) |
810 | 811 | end |
825 | 826 | local index, newpos = decode_integer(payload, 7, pos) |
826 | 827 | if index == nil then break end |
827 | 828 | pos = newpos |
828 | local name, value = self:lookup_index(index, false) | |
829 | local name, value = self:lookup_index(index) | |
829 | 830 | if name == nil then |
830 | 831 | return nil, h2_errors.COMPRESSION_ERROR:new_traceback(string.format("index %d not found in table", index)) |
831 | 832 | end |
2 | 2 | HSTS is described in RFC 6797 |
3 | 3 | ]] |
4 | 4 | |
5 | local EOF = require "lpeg".P(-1) | |
6 | local IPv4address = require "lpeg_patterns.IPv4".IPv4address | |
7 | local IPv6address = require "lpeg_patterns.IPv6".IPv6address | |
8 | local IPaddress = (IPv4address + IPv6address) * EOF | |
5 | local binaryheap = require "binaryheap" | |
6 | local http_util = require "http.util" | |
9 | 7 | |
10 | 8 | local store_methods = { |
11 | 9 | time = function() return os.time() end; |
10 | max_items = (1e999); | |
12 | 11 | } |
13 | 12 | |
14 | 13 | local store_mt = { |
22 | 21 | __index = store_item_methods; |
23 | 22 | } |
24 | 23 | |
25 | local function host_is_ip(host) | |
26 | if IPaddress:match(host) then | |
27 | return true | |
28 | else | |
29 | return false | |
30 | end | |
31 | end | |
32 | ||
33 | 24 | local function new_store() |
34 | 25 | return setmetatable({ |
35 | 26 | domains = {}; |
27 | expiry_heap = binaryheap.minUnique(); | |
28 | n_items = 0; | |
36 | 29 | }, store_mt) |
37 | 30 | end |
38 | 31 | |
39 | 32 | function store_methods:clone() |
40 | 33 | local r = new_store() |
41 | 34 | r.time = rawget(self, "time") |
35 | r.n_items = rawget(self, "n_items") | |
36 | r.expiry_heap = binaryheap.minUnique() | |
42 | 37 | for host, item in pairs(self.domains) do |
43 | 38 | r.domains[host] = item |
39 | r.expiry_heap:insert(item.expires, item) | |
44 | 40 | end |
45 | 41 | return r |
46 | 42 | end |
55 | 51 | else |
56 | 52 | max_age = tonumber(max_age, 10) |
57 | 53 | end |
58 | if host_is_ip(host) then | |
59 | return false | |
54 | ||
55 | -- Clean now so that we can assume there are no expired items in store | |
56 | self:clean() | |
57 | ||
58 | if max_age == 0 then | |
59 | return self:remove(host) | |
60 | else | |
61 | if http_util.is_ip(host) then | |
62 | return false | |
63 | end | |
64 | -- add to store | |
65 | local old_item = self.domains[host] | |
66 | if old_item then | |
67 | self.expiry_heap:remove(old_item) | |
68 | else | |
69 | local n_items = self.n_items | |
70 | if n_items >= self.max_items then | |
71 | return false | |
72 | end | |
73 | self.n_items = n_items + 1 | |
74 | end | |
75 | local expires = now + max_age | |
76 | local item = setmetatable({ | |
77 | host = host; | |
78 | includeSubdomains = directives.includeSubdomains; | |
79 | expires = expires; | |
80 | }, store_item_mt) | |
81 | self.domains[host] = item | |
82 | self.expiry_heap:insert(expires, item) | |
60 | 83 | end |
61 | if max_age == 0 then | |
62 | -- delete from store | |
84 | return true | |
85 | end | |
86 | ||
87 | function store_methods:remove(host) | |
88 | local item = self.domains[host] | |
89 | if item then | |
90 | self.expiry_heap:remove(item) | |
63 | 91 | self.domains[host] = nil |
64 | else | |
65 | -- add to store | |
66 | self.domains[host] = setmetatable({ | |
67 | includeSubdomains = directives.includeSubdomains; | |
68 | expires = now + max_age; | |
69 | }, store_item_mt) | |
92 | self.n_items = self.n_items - 1 | |
70 | 93 | end |
71 | 94 | return true |
72 | 95 | end |
73 | 96 | |
74 | 97 | function store_methods:check(host) |
75 | if host_is_ip(host) then | |
98 | if http_util.is_ip(host) then | |
76 | 99 | return false |
77 | 100 | end |
78 | local now = self.time() | |
101 | ||
102 | -- Clean now so that we can assume there are no expired items in store | |
103 | self:clean() | |
104 | ||
79 | 105 | local h = host |
80 | 106 | repeat |
81 | 107 | local item = self.domains[h] |
82 | 108 | if item then |
83 | if item.expires < now then | |
84 | self:clean() | |
85 | elseif host == h or item.includeSubdomains then | |
109 | if host == h or item.includeSubdomains then | |
86 | 110 | return true |
87 | 111 | end |
88 | 112 | end |
92 | 116 | return false |
93 | 117 | end |
94 | 118 | |
119 | function store_methods:clean_due() | |
120 | local next_expiring = self.expiry_heap:peek() | |
121 | if not next_expiring then | |
122 | return (1e999) | |
123 | end | |
124 | return next_expiring.expires | |
125 | end | |
126 | ||
95 | 127 | function store_methods:clean() |
96 | 128 | local now = self.time() |
97 | for host, item in pairs(self.domains) do | |
98 | if item.expires < now then | |
99 | self.domains[host] = nil | |
100 | end | |
129 | while self:clean_due() < now do | |
130 | local item = self.expiry_heap:pop() | |
131 | self.domains[item.host] = nil | |
132 | self.n_items = self.n_items - 1 | |
101 | 133 | end |
102 | 134 | return true |
103 | 135 | end |
0 | interface hsts_store | |
1 | time: () -> (number) | |
2 | max_items: number | |
3 | ||
4 | clone: (self) -> (hsts_store) | |
5 | store: (self, string, {string:string}) -> (boolean) | |
6 | remove: (self, string) -> (boolean) | |
7 | check: (self, hsts_store) -> (boolean) | |
8 | const clean_due: (self) -> (number) | |
9 | const clean: (self) -> (boolean) | |
10 | end | |
11 | ||
12 | new_store: () -> (hsts_store) |
56 | 56 | end |
57 | 57 | end |
58 | 58 | end |
59 | if scheme == "http" or scheme == "ws" then | |
59 | if scheme == "http" then | |
60 | 60 | if self.http_proxy then |
61 | 61 | return self.http_proxy |
62 | 62 | end |
63 | elseif scheme == "https" or scheme == "wss" then | |
63 | elseif scheme == "https" then | |
64 | 64 | if self.https_proxy then |
65 | 65 | return self.https_proxy |
66 | 66 | end |
0 | interface proxies | |
1 | const update: (self, (string)->(string?))->(self) | |
2 | const choose: (self, string, string)->(string?) | |
3 | end | |
4 | ||
5 | new: proxies |
3 | 3 | local basexx = require "basexx" |
4 | 4 | local client = require "http.client" |
5 | 5 | local new_headers = require "http.headers".new |
6 | local http_cookie = require "http.cookie" | |
6 | 7 | local http_hsts = require "http.hsts" |
7 | 8 | local http_socks = require "http.socks" |
8 | 9 | local http_proxies = require "http.proxies" |
14 | 15 | local default_user_agent = string.format("%s/%s", http_version.name, http_version.version) |
15 | 16 | local default_hsts_store = http_hsts.new_store() |
16 | 17 | local default_proxies = http_proxies.new():update() |
18 | local default_cookie_store = http_cookie.new_store() | |
19 | ||
20 | local default_h2_settings = { | |
21 | ENABLE_PUSH = false; | |
22 | } | |
17 | 23 | |
18 | 24 | local request_methods = { |
19 | 25 | hsts = default_hsts_store; |
20 | 26 | proxies = default_proxies; |
27 | cookie_store = default_cookie_store; | |
28 | is_top_level = true; | |
29 | site_for_cookies = nil; | |
21 | 30 | expect_100_timeout = 1; |
22 | 31 | follow_redirects = true; |
23 | 32 | max_redirects = 5; |
31 | 40 | } |
32 | 41 | |
33 | 42 | local EOF = lpeg.P(-1) |
34 | local sts_patt = lpeg.Cf(lpeg.Ct(true) * http_patts.Strict_Transport_Security, rawset) * EOF | |
43 | local sts_patt = http_patts.Strict_Transport_Security * EOF | |
35 | 44 | local uri_patt = uri_patts.uri * EOF |
36 | 45 | local uri_ref = uri_patts.uri_reference * EOF |
37 | 46 | |
43 | 52 | end |
44 | 53 | local scheme = assert(uri_t.scheme, "URI missing scheme") |
45 | 54 | assert(scheme == "https" or scheme == "http" or scheme == "ws" or scheme == "wss", "scheme not valid") |
46 | local host = tostring(assert(uri_t.host, "URI must include a host")) -- tostring required to e.g. convert lpeg_patterns IPv6 objects | |
55 | local host = assert(uri_t.host, "URI must include a host") | |
47 | 56 | local port = uri_t.port or http_util.scheme_to_port[scheme] |
48 | 57 | local is_connect -- CONNECT requests are a bit special, see http2 spec section 8.3 |
49 | 58 | if headers == nil then |
67 | 76 | path = path .. "?" .. uri_t.query |
68 | 77 | end |
69 | 78 | headers:upsert(":path", path) |
79 | if scheme == "wss" then | |
80 | scheme = "https" | |
81 | elseif scheme == "ws" then | |
82 | scheme = "http" | |
83 | end | |
70 | 84 | headers:upsert(":scheme", scheme) |
71 | 85 | end |
72 | 86 | if uri_t.userinfo then |
76 | 90 | else |
77 | 91 | field = "authorization" |
78 | 92 | end |
79 | local userinfo = http_util.decodeURIComponent(uri_t.userinfo) -- XXX: this doesn't seem right, but it's same behaviour as curl | |
80 | headers:append(field, "basic " .. basexx.to_base64(userinfo), true) | |
93 | local userinfo = http_util.decodeURIComponent(uri_t.userinfo) -- XXX: this doesn't seem right, but it's the same behaviour as curl | |
94 | headers:upsert(field, "basic " .. basexx.to_base64(userinfo), true) | |
81 | 95 | end |
82 | 96 | if not headers:has("user-agent") then |
83 | 97 | headers:append("user-agent", default_user_agent) |
85 | 99 | return setmetatable({ |
86 | 100 | host = host; |
87 | 101 | port = port; |
88 | tls = (scheme == "https" or scheme == "wss"); | |
102 | tls = (scheme == "https"); | |
89 | 103 | headers = headers; |
90 | 104 | body = nil; |
91 | 105 | }, request_mt) |
102 | 116 | return setmetatable({ |
103 | 117 | host = self.host; |
104 | 118 | port = self.port; |
119 | bind = self.bind; | |
105 | 120 | tls = self.tls; |
106 | 121 | ctx = self.ctx; |
107 | 122 | sendname = self.sendname; |
113 | 128 | |
114 | 129 | hsts = rawget(self, "hsts"); |
115 | 130 | proxies = rawget(self, "proxies"); |
131 | cookie_store = rawget(self, "cookie_store"); | |
132 | is_top_level = rawget(self, "is_top_level"); | |
133 | site_for_cookies = rawget(self, "site_for_cookies"); | |
116 | 134 | expect_100_timeout = rawget(self, "expect_100_timeout"); |
117 | 135 | follow_redirects = rawget(self, "follow_redirects"); |
118 | 136 | max_redirects = rawget(self, "max_redirects"); |
195 | 213 | if not is_connect then |
196 | 214 | new_req.headers:upsert(":scheme", new_scheme) |
197 | 215 | end |
198 | if new_scheme == "https" or new_scheme == "wss" then | |
216 | if new_scheme == "https" then | |
199 | 217 | new_req.tls = true |
200 | elseif new_scheme == "http" or new_scheme == "ws" then | |
218 | elseif new_scheme == "http" then | |
201 | 219 | new_req.tls = false |
202 | 220 | else |
203 | 221 | return nil, "unknown scheme", ce.EINVAL |
338 | 356 | local host = self.host |
339 | 357 | local port = self.port |
340 | 358 | local tls = self.tls |
359 | local version = self.version | |
341 | 360 | |
342 | 361 | -- RFC 6797 Section 8.3 |
343 | 362 | if not tls and self.hsts and self.hsts:check(host) then |
361 | 380 | end |
362 | 381 | end |
363 | 382 | |
383 | if self.cookie_store then | |
384 | local cookie_header = self.cookie_store:lookup_for_request(request_headers, host, self.site_for_cookies, self.is_top_level) | |
385 | if cookie_header ~= "" then | |
386 | if not cloned_headers then | |
387 | request_headers = request_headers:clone() | |
388 | cloned_headers = true | |
389 | end | |
390 | -- Append rather than upsert: user may have added their own cookies | |
391 | request_headers:append("cookie", cookie_header) | |
392 | end | |
393 | end | |
394 | ||
364 | 395 | local connection |
365 | 396 | |
366 | 397 | local proxy = self.proxy |
372 | 403 | if proxy then |
373 | 404 | if type(proxy) == "string" then |
374 | 405 | proxy = assert(uri_patt:match(proxy), "invalid proxy URI") |
406 | proxy.path = nil -- ignore proxy.path component | |
375 | 407 | else |
376 | 408 | assert(type(proxy) == "table" and getmetatable(proxy) == nil and proxy.scheme, "invalid proxy URI") |
409 | proxy = { | |
410 | scheme = proxy.scheme; | |
411 | userinfo = proxy.userinfo; | |
412 | host = proxy.host; | |
413 | port = proxy.port; | |
414 | -- ignore proxy.path component | |
415 | } | |
377 | 416 | end |
378 | 417 | if proxy.scheme == "http" or proxy.scheme == "https" then |
379 | 418 | if tls then |
382 | 421 | local connect_request = new_connect(proxy, authority) |
383 | 422 | connect_request.proxy = false |
384 | 423 | connect_request.version = 1.1 -- TODO: CONNECT over HTTP/2 |
385 | if tls then | |
386 | if connect_request.tls then | |
387 | error("NYI: TLS over TLS") | |
388 | end | |
424 | if connect_request.tls then | |
425 | error("NYI: TLS over TLS") | |
389 | 426 | end |
390 | 427 | -- Perform CONNECT request |
391 | 428 | local headers, stream, errno = connect_request:go(deadline and deadline-monotime()) |
403 | 440 | local sock = stream.connection:take_socket() |
404 | 441 | local err, errno2 |
405 | 442 | connection, err, errno2 = client.negotiate(sock, { |
443 | host = host; | |
406 | 444 | tls = tls; |
407 | 445 | ctx = self.ctx; |
408 | sendname = self.sendname ~= nil and self.sendname or host; | |
409 | version = self.version; | |
446 | sendname = self.sendname; | |
447 | version = version; | |
448 | h2_settings = default_h2_settings; | |
410 | 449 | }, deadline and deadline-monotime()) |
411 | 450 | if connection == nil then |
412 | 451 | sock:close() |
415 | 454 | else |
416 | 455 | if request_headers:get(":method") == "CONNECT" then |
417 | 456 | error("cannot use HTTP Proxy with CONNECT method") |
418 | end | |
419 | if proxy.path ~= nil and proxy.path ~= "" then | |
420 | error("an HTTP proxy cannot have a path component") | |
421 | 457 | end |
422 | 458 | -- TODO: Check if :path already has authority? |
423 | 459 | local old_url = self:to_uri(false) |
444 | 480 | tls = tls; |
445 | 481 | ctx = self.ctx; |
446 | 482 | sendname = self.sendname ~= nil and self.sendname or host; |
447 | version = self.version; | |
483 | version = version; | |
484 | h2_settings = default_h2_settings; | |
448 | 485 | }, deadline and deadline-monotime()) |
449 | 486 | if connection == nil then |
450 | 487 | sock:close() |
460 | 497 | connection, err, errno = client.connect({ |
461 | 498 | host = host; |
462 | 499 | port = port; |
500 | bind = self.bind; | |
463 | 501 | tls = tls; |
464 | 502 | ctx = self.ctx; |
465 | 503 | sendname = self.sendname; |
466 | version = self.version; | |
504 | version = version; | |
505 | h2_settings = default_h2_settings; | |
467 | 506 | }, deadline and deadline-monotime()) |
468 | 507 | if connection == nil then |
469 | 508 | return nil, err, errno |
579 | 618 | end |
580 | 619 | end |
581 | 620 | |
621 | if self.cookie_store then | |
622 | self.cookie_store:store_from_request(request_headers, headers, self.host, self.site_for_cookies) | |
623 | end | |
624 | ||
582 | 625 | if self.follow_redirects and headers:get(":status"):sub(1,1) == "3" then |
583 | 626 | stream:shutdown() |
584 | 627 | local new_req, err2, errno2 = self:handle_redirect(headers) |
0 | require "http.cookie" | |
1 | require "http.hsts" | |
2 | require "http.proxies" | |
3 | require "http.stream_common" | |
4 | ||
5 | interface request | |
6 | hsts: hsts_store|false | |
7 | proxies: proxies|false | |
8 | cookie_store: cookie_store|false | |
9 | is_top_level: boolean | |
10 | site_for_cookies: string? | |
11 | expect_100_timeout: integer | |
12 | follow_redirects: boolean | |
13 | max_redirects: integer | |
14 | post301: boolean | |
15 | post302: boolean | |
16 | headers: headers | |
17 | const clone: (self) -> (request) | |
18 | const to_uri: (self, boolean?) -> (string) | |
19 | const handle_redirect: (self, headers) -> (request)|(nil, string, integer) | |
20 | const set_body: (self, string|file|()->(string?)) -> () | |
21 | const go: (self, number) -> (headers, stream)|(nil, string, integer) | |
22 | end | |
23 | ||
24 | new_from_uri: (string, headers?) -> (request) | |
25 | new_connect: (string, string) -> (request) |
44 | 44 | -- Wrap a bare cqueues socket in an HTTP connection of a suitable version |
45 | 45 | -- Starts TLS if necessary |
46 | 46 | -- this function *should never throw* |
47 | local function wrap_socket(self, socket, deadline) | |
47 | local function wrap_socket(self, socket, timeout) | |
48 | local deadline = timeout and monotime()+timeout | |
48 | 49 | socket:setmode("b", "b") |
49 | 50 | socket:onerror(onerror) |
50 | 51 | local version = self.version |
61 | 62 | if not ok then |
62 | 63 | return nil, err, errno |
63 | 64 | end |
64 | local ssl = socket:checktls() | |
65 | if ssl and http_tls.has_alpn then | |
65 | local ssl = assert(socket:checktls()) | |
66 | if http_tls.has_alpn then | |
66 | 67 | local proto = ssl:getAlpnSelected() |
67 | if proto == "h2" and (version == nil or version == 2) then | |
68 | version = 2 | |
69 | elseif (proto == "http/1.1") and (version == nil or version < 2) then | |
70 | version = 1.1 | |
71 | elseif proto ~= nil then | |
72 | return nil, "unexpected ALPN protocol: " .. proto, ce.EILSEQNOSUPPORT | |
68 | if proto then | |
69 | if proto == "h2" and (version == nil or version == 2) then | |
70 | version = 2 | |
71 | elseif proto == "http/1.1" and (version == nil or version < 2) then | |
72 | version = 1.1 | |
73 | elseif proto == "http/1.0" and (version == nil or version == 1.0) then | |
74 | version = 1.0 | |
75 | else | |
76 | return nil, "unexpected ALPN protocol: " .. proto, ce.EILSEQNOSUPPORT | |
77 | end | |
73 | 78 | end |
74 | 79 | end |
75 | 80 | end |
78 | 83 | if version == nil then |
79 | 84 | local is_h2, err, errno = h2_connection.socket_has_preface(socket, true, deadline and (deadline-monotime())) |
80 | 85 | if is_h2 == nil then |
81 | return nil, err, errno | |
86 | return nil, err or ce.EPIPE, errno | |
82 | 87 | end |
83 | 88 | version = is_h2 and 2 or 1.1 |
84 | 89 | end |
125 | 130 | |
126 | 131 | local function handle_socket(self, socket) |
127 | 132 | local error_operation, error_context |
128 | local conn, err, errno = wrap_socket(self, socket) | |
133 | local conn, err, errno = wrap_socket(self, socket, self.connection_setup_timeout) | |
129 | 134 | if not conn then |
130 | 135 | socket:close() |
131 | 136 | if err ~= ce.EPIPE -- client closed connection |
137 | 142 | else |
138 | 143 | local cond = cc.new() |
139 | 144 | local idle = true |
145 | local deadline | |
140 | 146 | conn:onidle(function() |
141 | 147 | idle = true |
148 | deadline = self.intra_stream_timeout + monotime() | |
142 | 149 | cond:signal(1) |
143 | 150 | end) |
144 | 151 | while true do |
152 | local timeout = deadline and deadline-monotime() or self.intra_stream_timeout | |
145 | 153 | local stream |
146 | stream, err, errno = conn:get_next_incoming_stream() | |
154 | stream, err, errno = conn:get_next_incoming_stream(timeout) | |
147 | 155 | if stream == nil then |
148 | 156 | if (err ~= nil -- client closed connection |
149 | 157 | and errno ~= ce.ECONNRESET |
150 | and errno ~= ce.ENOTCONN) then | |
158 | and errno ~= ce.ENOTCONN | |
159 | and errno ~= ce.ETIMEDOUT) then | |
151 | 160 | error_operation = "get_next_incoming_stream" |
152 | 161 | error_context = conn |
162 | break | |
163 | elseif errno ~= ce.ETIMEDOUT or not idle or (deadline and deadline <= monotime()) then -- want to go around loop again if deadline not hit | |
164 | break | |
153 | 165 | end |
154 | break | |
166 | else | |
167 | idle = false | |
168 | deadline = nil | |
169 | self:add_stream(stream) | |
155 | 170 | end |
156 | idle = false | |
157 | self.cq:wrap(function() | |
158 | local ok, err2 = http_util.yieldable_pcall(self.onstream, self, stream) | |
159 | stream:shutdown() | |
160 | if not ok then | |
161 | self:onerror()(self, stream, "onstream", err2) | |
162 | end | |
163 | end) | |
164 | 171 | end |
165 | 172 | -- wait for streams to complete |
166 | 173 | if not idle then |
175 | 182 | end |
176 | 183 | end |
177 | 184 | |
185 | local function handle_stream(self, stream) | |
186 | local ok, err = http_util.yieldable_pcall(self.onstream, self, stream) | |
187 | stream:shutdown() | |
188 | if not ok then | |
189 | self:onerror()(self, stream, "onstream", err) | |
190 | end | |
191 | end | |
192 | ||
178 | 193 | -- Prefer whichever comes first |
179 | local function alpn_select_either(ssl, protos) -- luacheck: ignore 212 | |
194 | local function alpn_select(ssl, protos, version) | |
180 | 195 | for _, proto in ipairs(protos) do |
181 | if proto == "h2" then | |
182 | -- HTTP2 only allows >=TLSv1.2 | |
183 | if ssl:getVersion() >= openssl_ssl.TLS1_2_VERSION then | |
196 | if proto == "h2" and (version == nil or version == 2) then | |
197 | -- HTTP2 only allows >= TLSv1.2 | |
198 | -- allow override via version | |
199 | if ssl:getVersion() >= openssl_ssl.TLS1_2_VERSION or version == 2 then | |
184 | 200 | return proto |
185 | 201 | end |
186 | elseif proto == "http/1.1" then | |
187 | return proto | |
188 | end | |
189 | end | |
190 | return nil | |
191 | end | |
192 | ||
193 | local function alpn_select_h2(ssl, protos) -- luacheck: ignore 212 | |
194 | for _, proto in ipairs(protos) do | |
195 | if proto == "h2" then | |
196 | return proto | |
197 | end | |
198 | end | |
199 | return nil | |
200 | end | |
201 | ||
202 | local function alpn_select_h1(ssl, protos) -- luacheck: ignore 212 | |
203 | for _, proto in ipairs(protos) do | |
204 | if proto == "http/1.1" then | |
202 | elseif (proto == "http/1.1" and (version == nil or version == 1.1)) | |
203 | or (proto == "http/1.0" and (version == nil or version == 1.0)) then | |
205 | 204 | return proto |
206 | 205 | end |
207 | 206 | end |
212 | 211 | local function new_ctx(host, version) |
213 | 212 | local ctx = http_tls.new_server_context() |
214 | 213 | if http_tls.has_alpn then |
215 | if version == nil then | |
216 | ctx:setAlpnSelect(alpn_select_either) | |
217 | elseif version == 2 then | |
218 | ctx:setAlpnSelect(alpn_select_h2) | |
219 | elseif version == 1.1 then | |
220 | ctx:setAlpnSelect(alpn_select_h1) | |
221 | end | |
214 | ctx:setAlpnSelect(alpn_select, version) | |
222 | 215 | end |
223 | 216 | if version == 2 then |
224 | 217 | ctx:setOptions(openssl_ctx.OP_NO_TLSv1 + openssl_ctx.OP_NO_TLSv1_1) |
252 | 245 | local server_methods = { |
253 | 246 | version = nil; |
254 | 247 | max_concurrent = math.huge; |
255 | client_timeout = 10; | |
248 | connection_setup_timeout = 10; | |
249 | intra_stream_timeout = 10; | |
256 | 250 | } |
257 | 251 | local server_mt = { |
258 | 252 | __name = "http.server"; |
268 | 262 | |
269 | 263 | Takes a table of options: |
270 | 264 | - `.cq` (optional): A cqueues controller to use |
271 | - `.socket`: A cqueues socket object | |
265 | - `.socket` (optional): A cqueues socket object to accept() from | |
272 | 266 | - `.onstream`: function to call back for each stream read |
273 | 267 | - `.onerror`: function that will be called when an error occurs (default: throw an error) |
274 | 268 | - `.tls`: `nil`: allow both tls and non-tls connections |
278 | 272 | - ` `nil`: a self-signed context will be generated |
279 | 273 | - `.version`: the http version to allow to connect (default: any) |
280 | 274 | - `.max_concurrent`: Maximum number of connections to allow live at a time (default: infinity) |
281 | - `.client_timeout`: Timeout (in seconds) to wait for client to send first bytes and/or complete TLS handshake (default: 10) | |
275 | - `.connection_setup_timeout`: Timeout (in seconds) to wait for client to send first bytes and/or complete TLS handshake (default: 10) | |
276 | - `.intra_stream_timeout`: Timeout (in seoncds) to wait between start of client streams (default: 10) | |
282 | 277 | ]] |
283 | 278 | local function new_server(tbl) |
284 | 279 | local cq = tbl.cq |
287 | 282 | else |
288 | 283 | assert(cqueues.type(cq) == "controller", "optional cq field should be a cqueue controller") |
289 | 284 | end |
290 | local socket = assert(tbl.socket, "missing 'socket'") | |
285 | local socket = tbl.socket | |
286 | if socket ~= nil then | |
287 | assert(cs.type(socket), "optional socket field should be a cqueues socket") | |
288 | end | |
291 | 289 | local onstream = assert(tbl.onstream, "missing 'onstream'") |
292 | ||
293 | 290 | if tbl.ctx == nil and tbl.tls ~= false then |
294 | 291 | error("OpenSSL context required if .tls isn't false") |
295 | 292 | end |
296 | ||
297 | -- Return errors rather than throwing | |
298 | socket:onerror(function(s, op, why, lvl) -- luacheck: ignore 431 212 | |
299 | return why | |
300 | end) | |
301 | 293 | |
302 | 294 | local self = setmetatable({ |
303 | 295 | cq = cq; |
312 | 304 | pause_cond = cc.new(); |
313 | 305 | paused = false; |
314 | 306 | connection_done = cc.new(); -- signalled when connection has been closed |
315 | client_timeout = tbl.client_timeout; | |
307 | connection_setup_timeout = tbl.connection_setup_timeout; | |
308 | intra_stream_timeout = tbl.intra_stream_timeout; | |
316 | 309 | }, server_mt) |
317 | 310 | |
318 | cq:wrap(server_loop, self) | |
311 | if socket then | |
312 | -- Return errors rather than throwing | |
313 | socket:onerror(function(socket, op, why, lvl) -- luacheck: ignore 431 212 | |
314 | return why | |
315 | end) | |
316 | cq:wrap(server_loop, self) | |
317 | end | |
319 | 318 | |
320 | 319 | return self |
321 | 320 | end |
380 | 379 | ctx = ctx; |
381 | 380 | version = tbl.version; |
382 | 381 | max_concurrent = tbl.max_concurrent; |
383 | client_timeout = tbl.client_timeout; | |
382 | connection_setup_timeout = tbl.connection_setup_timeout; | |
383 | intra_stream_timeout = tbl.intra_stream_timeout; | |
384 | 384 | } |
385 | 385 | end |
386 | 386 | |
403 | 403 | -- Actually wait for and *do* the binding |
404 | 404 | -- Don't *need* to call this, as if not it will be done lazily |
405 | 405 | function server_methods:listen(timeout) |
406 | return ca.fileresult(self.socket:listen(timeout)) | |
406 | if self.socket then | |
407 | local ok, err, errno = ca.fileresult(self.socket:listen(timeout)) | |
408 | if not ok then | |
409 | return nil, err, errno | |
410 | end | |
411 | end | |
412 | return true | |
407 | 413 | end |
408 | 414 | |
409 | 415 | function server_methods:localname() |
410 | return self.socket:localname() | |
416 | if self.socket == nil then | |
417 | return | |
418 | end | |
419 | return ca.fileresult(self.socket:localname()) | |
411 | 420 | end |
412 | 421 | |
413 | 422 | function server_methods:pause() |
468 | 477 | return true |
469 | 478 | end |
470 | 479 | |
480 | function server_methods:add_stream(stream) | |
481 | self.cq:wrap(handle_stream, self, stream) | |
482 | return true | |
483 | end | |
484 | ||
471 | 485 | return { |
472 | 486 | new = new_server; |
473 | 487 | listen = listen; |
13 | 13 | local ca = require "cqueues.auxlib" |
14 | 14 | local ce = require "cqueues.errno" |
15 | 15 | local cs = require "cqueues.socket" |
16 | local spack = string.pack or require "compat53.string".pack | |
17 | local sunpack = string.unpack or require "compat53.string".unpack | |
16 | local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 | |
17 | local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143 | |
18 | 18 | local IPv4 = require "lpeg_patterns.IPv4" |
19 | 19 | local IPv6 = require "lpeg_patterns.IPv6" |
20 | 20 | local uri_patts = require "lpeg_patterns.uri" |
151 | 151 | return self:write_chunk(str, true, timeout) |
152 | 152 | end |
153 | 153 | |
154 | function stream_methods:write_body_from_file(file, timeout) | |
154 | function stream_methods:write_body_from_file(options, timeout) | |
155 | 155 | local deadline = timeout and (monotime()+timeout) |
156 | -- Can't use :lines here as in Lua 5.1 it doesn't take a parameter | |
157 | while true do | |
158 | local chunk, err = file:read(CHUNK_SIZE) | |
156 | local file, count | |
157 | if io.type(options) then -- lua-http <= 0.2 took a file handle | |
158 | file = options | |
159 | else | |
160 | file = options.file | |
161 | count = options.count | |
162 | end | |
163 | if count == nil then | |
164 | count = math.huge | |
165 | elseif type(count) ~= "number" or count < 0 or count % 1 ~= 0 then | |
166 | error("invalid .count parameter (expected positive integer)") | |
167 | end | |
168 | while count > 0 do | |
169 | local chunk, err = file:read(math.min(CHUNK_SIZE, count)) | |
159 | 170 | if chunk == nil then |
160 | 171 | if err then |
161 | 172 | error(err) |
173 | elseif count ~= math.huge and count > 0 then | |
174 | error("unexpected EOF") | |
162 | 175 | end |
163 | 176 | break |
164 | 177 | end |
166 | 179 | if not ok then |
167 | 180 | return nil, err2, errno2 |
168 | 181 | end |
182 | count = count - #chunk | |
169 | 183 | end |
170 | 184 | return self:write_chunk("", true, deadline and (deadline-monotime())) |
171 | 185 | end |
0 | require "http.connection_common" | |
1 | ||
2 | interface stream | |
3 | const checktls: (self) -> (nil)|(any) | |
4 | const localname: (self) -> (integer, string, integer?)|(nil)|(nil, string, number) | |
5 | const peername: (self) -> (integer, string, integer?)|(nil)|(nil, string, number) | |
6 | const write_continue: (self, number?) -> (true)|(nil, string, number) | |
7 | const each_chunk: (self) -> ((stream)->(string)|(nil)|(nil, string, number), self) | |
8 | const get_body_as_string: (self, number?) -> (string)|(nil, string, number) | |
9 | const get_body_chars: (self, integer, number?) -> (string)|(nil, string, number) | |
10 | const get_body_until: (self, string, boolean, boolean, number?) -> (string)|(nil, string, number) | |
11 | const save_body_to_file: (self, file, number?) -> (true)|(nil, string, number) | |
12 | const get_body_as_file: (self, number?) -> (file)|(nil, string, number) | |
13 | const write_body_from_string: (self, string, number?) -> (true)|(nil, string, number) | |
14 | const write_body_from_file: (self, {"file":file, "count": integer?}|file, number?) -> (true)|(nil, string, number) | |
15 | ||
16 | -- Not in stream_common.lua | |
17 | const connection: connection | |
18 | const get_headers: (self, number?) -> (headers)|(nil)|(nil, string, number) | |
19 | const get_next_chunk: (self, number?) -> (string)|(nil)|(nil, string, number) | |
20 | const write_headers: (self, headers, boolean, number?) -> (true)|(nil, string, number) | |
21 | const write_chunk: (self, string, boolean, number?) -> (true)|(nil, string, number) | |
22 | const unget: (self, string) -> (true) | |
23 | const shutdown: (self) -> (true) | |
24 | end |
63 | 63 | "AES256-SHA"; |
64 | 64 | "DES-CBC3-SHA"; |
65 | 65 | "!DSS"; |
66 | } | |
67 | ||
68 | -- "Old" cipher list | |
69 | local old_cipher_list = cipher_list { | |
70 | "ECDHE-ECDSA-CHACHA20-POLY1305"; | |
71 | "ECDHE-RSA-CHACHA20-POLY1305"; | |
72 | "ECDHE-RSA-AES128-GCM-SHA256"; | |
73 | "ECDHE-ECDSA-AES128-GCM-SHA256"; | |
74 | "ECDHE-RSA-AES256-GCM-SHA384"; | |
75 | "ECDHE-ECDSA-AES256-GCM-SHA384"; | |
76 | "DHE-RSA-AES128-GCM-SHA256"; | |
77 | "DHE-DSS-AES128-GCM-SHA256"; | |
78 | "kEDH+AESGCM"; | |
79 | "ECDHE-RSA-AES128-SHA256"; | |
80 | "ECDHE-ECDSA-AES128-SHA256"; | |
81 | "ECDHE-RSA-AES128-SHA"; | |
82 | "ECDHE-ECDSA-AES128-SHA"; | |
83 | "ECDHE-RSA-AES256-SHA384"; | |
84 | "ECDHE-ECDSA-AES256-SHA384"; | |
85 | "ECDHE-RSA-AES256-SHA"; | |
86 | "ECDHE-ECDSA-AES256-SHA"; | |
87 | "DHE-RSA-AES128-SHA256"; | |
88 | "DHE-RSA-AES128-SHA"; | |
89 | "DHE-DSS-AES128-SHA256"; | |
90 | "DHE-RSA-AES256-SHA256"; | |
91 | "DHE-DSS-AES256-SHA"; | |
92 | "DHE-RSA-AES256-SHA"; | |
93 | "ECDHE-RSA-DES-CBC3-SHA"; | |
94 | "ECDHE-ECDSA-DES-CBC3-SHA"; | |
95 | "EDH-RSA-DES-CBC3-SHA"; | |
96 | "AES128-GCM-SHA256"; | |
97 | "AES256-GCM-SHA384"; | |
98 | "AES128-SHA256"; | |
99 | "AES256-SHA256"; | |
100 | "AES128-SHA"; | |
101 | "AES256-SHA"; | |
102 | "AES"; | |
103 | "DES-CBC3-SHA"; | |
104 | "HIGH"; | |
105 | "SEED"; | |
106 | "!aNULL"; | |
107 | "!eNULL"; | |
108 | "!EXPORT"; | |
109 | "!DES"; | |
110 | "!RC4"; | |
111 | "!MD5"; | |
112 | "!PSK"; | |
113 | "!RSAPSK"; | |
114 | "!aDH"; | |
115 | "!aECDH"; | |
116 | "!EDH-DSS-DES-CBC3-SHA"; | |
117 | "!KRB5-DES-CBC3-SHA"; | |
118 | "!SRP"; | |
66 | 119 | } |
67 | 120 | |
68 | 121 | -- A map from the cipher identifiers used in specifications to |
697 | 750 | + openssl_ctx.OP_NO_SSLv2 |
698 | 751 | + openssl_ctx.OP_NO_SSLv3 |
699 | 752 | |
700 | local client_params = openssl_verify_param.new() | |
701 | client_params:setPurpose("sslserver") -- the purpose the peer has to present | |
702 | ||
703 | 753 | local function new_client_context() |
704 | 754 | local ctx = openssl_ctx.new("TLS", false) |
705 | 755 | ctx:setCipherList(intermediate_cipher_list) |
706 | 756 | ctx:setOptions(default_tls_options) |
707 | ctx:setParam(client_params) | |
708 | 757 | ctx:setEphemeralKey(openssl_pkey.new{ type = "EC", curve = "prime256v1" }) |
709 | 758 | local store = ctx:getStore() |
710 | 759 | store:addDefaults() |
712 | 761 | return ctx |
713 | 762 | end |
714 | 763 | |
715 | local server_params = openssl_verify_param.new() | |
716 | server_params:setPurpose("sslclient") -- the purpose the peer has to present | |
717 | ||
718 | 764 | local function new_server_context() |
719 | 765 | local ctx = openssl_ctx.new("TLS", true) |
720 | 766 | ctx:setCipherList(intermediate_cipher_list) |
721 | 767 | ctx:setOptions(default_tls_options) |
722 | ctx:setParam(server_params) | |
723 | 768 | ctx:setEphemeralKey(openssl_pkey.new{ type = "EC", curve = "prime256v1" }) |
724 | 769 | return ctx |
725 | 770 | end |
729 | 774 | has_hostname_validation = has_hostname_validation; |
730 | 775 | modern_cipher_list = modern_cipher_list; |
731 | 776 | intermediate_cipher_list = intermediate_cipher_list; |
777 | old_cipher_list = old_cipher_list; | |
732 | 778 | banned_ciphers = banned_ciphers; |
733 | 779 | new_client_context = new_client_context; |
734 | 780 | new_server_context = new_server_context; |
0 | has_alpn: boolean | |
1 | has_hostname_validation: boolean | |
2 | modern_cipher_list: string | |
3 | intermediate_cipher_list: string | |
4 | old_cipher_list: string | |
5 | banned_ciphers: {string: true} | |
6 | -- TODO: luaossl SSL context type | |
7 | new_client_context: any | |
8 | new_server_context: any |
0 | 0 | local lpeg = require "lpeg" |
1 | 1 | local http_patts = require "lpeg_patterns.http" |
2 | local IPv4_patts = require "lpeg_patterns.IPv4" | |
3 | local IPv6_patts = require "lpeg_patterns.IPv6" | |
4 | ||
5 | local EOF = lpeg.P(-1) | |
2 | 6 | |
3 | 7 | -- Encodes a character as a percent encoded string |
4 | 8 | local function char_to_pchar(c) |
19 | 23 | end |
20 | 24 | |
21 | 25 | -- decodeURI unescapes url encoded characters |
22 | -- excluding for characters that are special in urls | |
26 | -- excluding characters that are special in urls | |
23 | 27 | local decodeURI do |
24 | -- Keep the blacklist in numeric form. | |
25 | -- This means we can skip case normalisation of the hex characters | |
26 | 28 | local decodeURI_blacklist = {} |
27 | 29 | for char in ("#$&+,/:;=?@"):gmatch(".") do |
28 | 30 | decodeURI_blacklist[string.byte(char)] = true |
124 | 126 | return table.concat(t, "/", s, i) |
125 | 127 | end |
126 | 128 | |
129 | local safe_methods = { | |
130 | -- RFC 7231 Section 4.2.1: | |
131 | -- Of the request methods defined by this specification, the GET, HEAD, | |
132 | -- OPTIONS, and TRACE methods are defined to be safe. | |
133 | GET = true; | |
134 | HEAD = true; | |
135 | OPTIONS = true; | |
136 | TRACE = true; | |
137 | } | |
138 | local function is_safe_method(method) | |
139 | return safe_methods[method] or false | |
140 | end | |
141 | ||
142 | local IPaddress = (IPv4_patts.IPv4address + IPv6_patts.IPv6addrz) * EOF | |
143 | local function is_ip(str) | |
144 | return IPaddress:match(str) ~= nil | |
145 | end | |
146 | ||
127 | 147 | local scheme_to_port = { |
128 | 148 | http = 80; |
129 | 149 | ws = 80; |
134 | 154 | -- Splits a :authority header (same as Host) into host and port |
135 | 155 | local function split_authority(authority, scheme) |
136 | 156 | local host, port |
137 | local h, p = authority:match("^ *(.-):(%d+) *$") | |
157 | local h, p = authority:match("^[ \t]*(.-):(%d+)[ \t]*$") | |
138 | 158 | if p then |
139 | 159 | authority = h |
140 | port = tonumber(p) | |
160 | port = tonumber(p, 10) | |
141 | 161 | else -- when port missing from host header, it defaults to the default for that scheme |
142 | 162 | port = scheme_to_port[scheme] |
143 | 163 | if port == nil then |
144 | 164 | return nil, "unknown scheme" |
145 | 165 | end |
146 | 166 | end |
147 | local ipv6 = authority:match("%[([:%x]+)%]") | |
167 | local ipv6 = authority:match("^%[([:%x]+)%]$") | |
148 | 168 | if ipv6 then |
149 | 169 | host = ipv6 |
150 | 170 | else |
176 | 196 | return os.date("!%a, %d %b %Y %H:%M:%S GMT", time) |
177 | 197 | end |
178 | 198 | |
179 | -- This pattern checks if it's argument is a valid token, if so, it returns it as is. | |
199 | -- This pattern checks if its argument is a valid token, if so, it returns it as is. | |
180 | 200 | -- Otherwise, it returns it as a quoted string (with any special characters escaped) |
181 | 201 | local maybe_quote do |
182 | local EOF = lpeg.P(-1) | |
183 | 202 | local patt = http_patts.token * EOF |
184 | 203 | + lpeg.Cs(lpeg.Cc'"' * ((lpeg.S"\\\"") / "\\%0" + http_patts.qdtext)^0 * lpeg.Cc'"') * EOF |
185 | 204 | maybe_quote = function (s) |
187 | 206 | end |
188 | 207 | end |
189 | 208 | |
190 | -- A pcall relative that can be yielded over in PUC 5.1 | |
209 | -- A pcall-alike function that can be yielded over even in PUC 5.1 | |
191 | 210 | local yieldable_pcall |
192 | -- See if pcall can be yielded over | |
193 | if coroutine.wrap(function() return pcall(coroutine.yield, true) end)() then | |
211 | --[[ If pcall can already yield, then we want to use that. | |
212 | ||
213 | However, we can't do the feature check straight away, Openresty breaks | |
214 | coroutine.wrap in some contexts. See #98 | |
215 | Openresty nominally only supports LuaJIT, which always supports a yieldable | |
216 | pcall, so we short-circuit the feature check by checking if the 'ngx' library | |
217 | is loaded, plus that jit.version_num indicates LuaJIT 2.0. | |
218 | This combination ensures that we don't take the wrong branch if: | |
219 | - lua-http is being used to mock the openresty environment | |
220 | - openresty is compiled with something other than LuaJIT | |
221 | ]] | |
222 | if ( | |
223 | package.loaded.ngx | |
224 | and type(package.loaded.jit) == "table" | |
225 | and type(package.loaded.jit.version_num) == "number" | |
226 | and package.loaded.jit.version_num >= 20000 | |
227 | ) | |
228 | -- See if pcall can be yielded over | |
229 | or coroutine.wrap(function() | |
230 | return pcall(coroutine.yield, true) end | |
231 | )() then | |
194 | 232 | yieldable_pcall = pcall |
195 | 233 | else |
196 | 234 | local function handle_resume(co, ok, ...) |
220 | 258 | query_args = query_args; |
221 | 259 | dict_to_query = dict_to_query; |
222 | 260 | resolve_relative_path = resolve_relative_path; |
261 | is_safe_method = is_safe_method; | |
262 | is_ip = is_ip; | |
223 | 263 | scheme_to_port = scheme_to_port; |
224 | 264 | split_authority = split_authority; |
225 | 265 | to_authority = to_authority; |
0 | encodeURI: (string) -> (string) | |
1 | encodeURIComponent: (string) -> (string) | |
2 | decodeURI: (string) -> (string) | |
3 | decodeURIComponent: (string) -> (string) | |
4 | query_args: (string) -> ((any) -> (string, string), any, any) | |
5 | dict_to_query: ({string:string}) -> (string) | |
6 | resolve_relative_path: (orig_path, relative_path) -> (string) | |
7 | is_safe_method: (method) -> (boolean) | |
8 | is_ip: (string) -> (boolean) | |
9 | scheme_to_port: {string:integer} | |
10 | split_authority: (string, string) -> (string, integer)|(nil, string) | |
11 | to_authority: (string, integer, string|nil) -> (string) | |
12 | imf_date: (time) -> (string) | |
13 | maybe_quote: (string) -> (string) | |
14 | yieldable_pcall: ((any*) -> (any*), any*) -> (boolean, any*) |
22 | 22 | ]] |
23 | 23 | |
24 | 24 | local basexx = require "basexx" |
25 | local spack = string.pack or require "compat53.string".pack | |
26 | local sunpack = string.unpack or require "compat53.string".unpack | |
27 | local unpack = table.unpack or unpack -- luacheck: ignore 113 | |
25 | local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 | |
26 | local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143 | |
27 | local unpack = table.unpack or unpack -- luacheck: ignore 113 143 | |
28 | 28 | local utf8 = utf8 or require "compat53.utf8" -- luacheck: ignore 113 |
29 | 29 | local cqueues = require "cqueues" |
30 | 30 | local monotime = cqueues.monotime |
34 | 34 | local rand = require "openssl.rand" |
35 | 35 | local digest = require "openssl.digest" |
36 | 36 | local bit = require "http.bit" |
37 | local onerror = require "http.connection_common".onerror | |
37 | 38 | local new_headers = require "http.headers".new |
38 | 39 | local http_request = require "http.request" |
39 | 40 | |
176 | 177 | end |
177 | 178 | |
178 | 179 | local function read_frame(sock, deadline) |
179 | local frame do | |
180 | local first_2, err, errno = sock:xread(2, "b", deadline and (deadline-monotime())) | |
180 | local frame, first_2 do | |
181 | local err, errno | |
182 | first_2, err, errno = sock:xread(2, "b", deadline and (deadline-monotime())) | |
181 | 183 | if not first_2 then |
182 | 184 | return nil, err, errno |
183 | 185 | elseif #first_2 ~= 2 then |
184 | 186 | sock:seterror("r", ce.EILSEQ) |
185 | return nil, ce.strerror(ce.EILSEQ), ce.EILSEQ | |
187 | local ok, errno2 = sock:unget(first_2) | |
188 | if not ok then | |
189 | return nil, onerror(sock, "unget", errno2) | |
190 | end | |
191 | return nil, onerror(sock, "read_frame", ce.EILSEQ) | |
186 | 192 | end |
187 | 193 | local byte1, byte2 = first_2:byte(1, 2) |
188 | 194 | frame = { |
199 | 205 | } |
200 | 206 | end |
201 | 207 | |
208 | local fill_length = frame.length | |
209 | if fill_length == 126 then | |
210 | fill_length = 2 | |
211 | elseif fill_length == 127 then | |
212 | fill_length = 8 | |
213 | end | |
214 | if frame.MASK then | |
215 | fill_length = fill_length + 4 | |
216 | end | |
217 | do | |
218 | local ok, err, errno = sock:fill(fill_length, 0) | |
219 | if not ok then | |
220 | local unget_ok1, unget_errno1 = sock:unget(first_2) | |
221 | if not unget_ok1 then | |
222 | return nil, onerror(sock, "unget", unget_errno1) | |
223 | end | |
224 | if errno == ce.ETIMEDOUT then | |
225 | local timeout = deadline and deadline-monotime() | |
226 | if cqueues.poll(sock, timeout) ~= timeout then | |
227 | -- retry | |
228 | return read_frame(sock, deadline) | |
229 | end | |
230 | elseif err == nil then | |
231 | sock:seterror("r", ce.EILSEQ) | |
232 | return nil, onerror(sock, "read_frame", ce.EILSEQ) | |
233 | end | |
234 | return nil, err, errno | |
235 | end | |
236 | end | |
237 | ||
238 | -- if `fill` succeeded these shouldn't be able to fail | |
239 | local extra_fill_unget | |
202 | 240 | if frame.length == 126 then |
203 | local length, err, errno = sock:xread(2, "b", deadline and (deadline-monotime())) | |
204 | if not length or #length ~= 2 then | |
205 | if err == nil then | |
241 | extra_fill_unget = assert(sock:xread(2, "b", 0)) | |
242 | frame.length = sunpack(">I2", extra_fill_unget) | |
243 | fill_length = fill_length - 2 | |
244 | elseif frame.length == 127 then | |
245 | extra_fill_unget = assert(sock:xread(8, "b", 0)) | |
246 | frame.length = sunpack(">I8", extra_fill_unget) | |
247 | fill_length = fill_length - 8 + frame.length | |
248 | end | |
249 | ||
250 | if extra_fill_unget then | |
251 | local ok, err, errno = sock:fill(fill_length, 0) | |
252 | if not ok then | |
253 | local unget_ok1, unget_errno1 = sock:unget(extra_fill_unget) | |
254 | if not unget_ok1 then | |
255 | return nil, onerror(sock, "unget", unget_errno1) | |
256 | end | |
257 | local unget_ok2, unget_errno2 = sock:unget(first_2) | |
258 | if not unget_ok2 then | |
259 | return nil, onerror(sock, "unget", unget_errno2) | |
260 | end | |
261 | if errno == ce.ETIMEDOUT then | |
262 | local timeout = deadline and deadline-monotime() | |
263 | if cqueues.poll(sock, timeout) ~= timeout then | |
264 | -- retry | |
265 | return read_frame(sock, deadline) | |
266 | end | |
267 | elseif err == nil then | |
206 | 268 | sock:seterror("r", ce.EILSEQ) |
207 | return nil, ce.strerror(ce.EILSEQ), ce.EILSEQ | |
269 | return nil, onerror(sock, "read_frame", ce.EILSEQ) | |
208 | 270 | end |
209 | 271 | return nil, err, errno |
210 | 272 | end |
211 | frame.length = sunpack(">I2", length) | |
212 | elseif frame.length == 127 then | |
213 | local length, err, errno = sock:xread(8, "b", deadline and (deadline-monotime())) | |
214 | if not length or #length ~= 8 then | |
215 | if err == nil then | |
216 | sock:seterror("r", ce.EILSEQ) | |
217 | return nil, ce.strerror(ce.EILSEQ), ce.EILSEQ | |
218 | end | |
219 | return nil, err, errno | |
220 | end | |
221 | frame.length = sunpack(">I8", length) | |
222 | 273 | end |
223 | 274 | |
224 | 275 | if frame.MASK then |
225 | local key, err, errno = sock:xread(4, "b", deadline and (deadline-monotime())) | |
226 | if not key or #key ~= 4 then | |
227 | if err == nil then | |
228 | sock:seterror("r", ce.EILSEQ) | |
229 | return nil, ce.strerror(ce.EILSEQ), ce.EILSEQ | |
230 | end | |
231 | return nil, err, errno | |
232 | end | |
276 | local key = assert(sock:xread(4, "b", 0)) | |
233 | 277 | frame.key = { key:byte(1, 4) } |
234 | 278 | end |
235 | 279 | |
236 | 280 | do |
237 | local data, err, errno = sock:xread(frame.length, "b", deadline and (deadline-monotime())) | |
238 | if data == nil or #data ~= frame.length then | |
239 | if err == nil then | |
240 | sock:seterror("r", ce.EILSEQ) | |
241 | return nil, ce.strerror(ce.EILSEQ), ce.EILSEQ | |
242 | end | |
243 | return nil, err, errno | |
244 | end | |
245 | ||
281 | local data = assert(sock:xread(frame.length, "b", 0)) | |
246 | 282 | if frame.MASK then |
247 | 283 | frame.data = apply_mask(data, frame.key) |
248 | 284 | else |
266 | 302 | |
267 | 303 | function websocket_methods:send_frame(frame, timeout) |
268 | 304 | if self.readyState < 1 then |
269 | return nil, ce.strerror(ce.ENOTCONN), ce.ENOTCONN | |
305 | return nil, onerror(self.socket, "send_frame", ce.ENOTCONN) | |
270 | 306 | elseif self.readyState > 2 then |
271 | return nil, ce.strerror(ce.EPIPE), ce.EPIPE | |
307 | return nil, onerror(self.socket, "send_frame", ce.EPIPE) | |
272 | 308 | end |
273 | 309 | local ok, err, errno = self.socket:xwrite(build_frame(frame), "bn", timeout) |
274 | 310 | if not ok then |
348 | 384 | |
349 | 385 | function websocket_methods:receive(timeout) |
350 | 386 | if self.readyState < 1 then |
351 | return nil, ce.strerror(ce.ENOTCONN), ce.ENOTCONN | |
387 | return nil, onerror(self.socket, "receive", ce.ENOTCONN) | |
352 | 388 | elseif self.readyState > 2 then |
353 | return nil, ce.strerror(ce.EPIPE), ce.EPIPE | |
389 | return nil, onerror(self.socket, "receive", ce.EPIPE) | |
354 | 390 | end |
355 | 391 | local deadline = timeout and (monotime()+timeout) |
356 | 392 | while true do |
357 | local frame, err, errno = read_frame(self.socket, deadline and (deadline-monotime())) | |
393 | local frame, err, errno = read_frame(self.socket, deadline) | |
358 | 394 | if frame == nil then |
359 | 395 | return nil, err, errno |
360 | 396 | end |
508 | 544 | |
509 | 545 | local function new_from_uri(uri, protocols) |
510 | 546 | local request = http_request.new_from_uri(uri) |
511 | local scheme = request.headers:get(":scheme") | |
512 | assert(scheme == "ws" or scheme == "wss", "scheme not websocket") | |
513 | 547 | local self = new("client") |
514 | 548 | self.request = request |
515 | 549 | self.request.version = 1.1 |
637 | 671 | -- Success! |
638 | 672 | assert(self.socket == nil, "websocket:connect called twice") |
639 | 673 | self.socket = assert(stream.connection:take_socket()) |
674 | self.socket:onerror(onerror) | |
640 | 675 | self.request = nil |
641 | 676 | self.headers = headers |
642 | 677 | self.readyState = 1 |
751 | 786 | response_headers:upsert("sec-websocket-accept", base64_sha1(self.key .. magic)) |
752 | 787 | |
753 | 788 | local chosen_protocol |
754 | if self.protocols then | |
755 | if options.protocols then | |
756 | for _, protocol in ipairs(options.protocols) do | |
757 | if self.protocols[protocol] then | |
758 | chosen_protocol = protocol | |
759 | break | |
760 | end | |
761 | end | |
762 | end | |
763 | if not chosen_protocol then | |
764 | return nil, "no matching protocol", ce.EILSEQNOSUPPORT | |
765 | end | |
766 | response_headers:upsert("sec-websocket-protocol", chosen_protocol) | |
789 | if self.protocols and options.protocols then | |
790 | --[[ The |Sec-WebSocket-Protocol| request-header field can be | |
791 | used to indicate what subprotocols (application-level protocols | |
792 | layered over the WebSocket Protocol) are acceptable to the client. | |
793 | The server selects one or none of the acceptable protocols and echoes | |
794 | that value in its handshake to indicate that it has selected that | |
795 | protocol.]] | |
796 | for _, protocol in ipairs(options.protocols) do | |
797 | if self.protocols[protocol] then | |
798 | response_headers:upsert("sec-websocket-protocol", protocol) | |
799 | chosen_protocol = protocol | |
800 | break | |
801 | end | |
802 | end | |
767 | 803 | end |
768 | 804 | |
769 | 805 | do |
774 | 810 | end |
775 | 811 | |
776 | 812 | self.socket = assert(self.stream.connection:take_socket()) |
813 | self.socket:onerror(onerror) | |
777 | 814 | self.stream = nil |
778 | 815 | self.readyState = 1 |
779 | 816 | self.protocol = chosen_protocol |
0 | package = "http" | |
1 | version = "0.1-0" | |
2 | ||
3 | description = { | |
4 | summary = "HTTP library for Lua"; | |
5 | homepage = "https://github.com/daurnimator/lua-http"; | |
6 | license = "MIT"; | |
7 | } | |
8 | ||
9 | source = { | |
10 | url = "https://github.com/daurnimator/lua-http/archive/v0.1.zip"; | |
11 | dir = "lua-http-0.1"; | |
12 | } | |
13 | ||
14 | dependencies = { | |
15 | "lua >= 5.1"; | |
16 | "compat53 >= 0.3"; -- Only if lua < 5.3 | |
17 | "bit32"; -- Only if lua == 5.1 | |
18 | "cqueues >= 20161214"; | |
19 | "luaossl >= 20161208"; | |
20 | "basexx >= 0.2.0"; | |
21 | "lpeg"; | |
22 | "lpeg_patterns >= 0.3"; | |
23 | "fifo"; | |
24 | } | |
25 | ||
26 | build = { | |
27 | type = "builtin"; | |
28 | modules = { | |
29 | ["http.bit"] = "http/bit.lua"; | |
30 | ["http.client"] = "http/client.lua"; | |
31 | ["http.connection_common"] = "http/connection_common.lua"; | |
32 | ["http.h1_connection"] = "http/h1_connection.lua"; | |
33 | ["http.h1_reason_phrases"] = "http/h1_reason_phrases.lua"; | |
34 | ["http.h1_stream"] = "http/h1_stream.lua"; | |
35 | ["http.h2_connection"] = "http/h2_connection.lua"; | |
36 | ["http.h2_error"] = "http/h2_error.lua"; | |
37 | ["http.h2_stream"] = "http/h2_stream.lua"; | |
38 | ["http.headers"] = "http/headers.lua"; | |
39 | ["http.hpack"] = "http/hpack.lua"; | |
40 | ["http.hsts"] = "http/hsts.lua"; | |
41 | ["http.proxies"] = "http/proxies.lua"; | |
42 | ["http.request"] = "http/request.lua"; | |
43 | ["http.server"] = "http/server.lua"; | |
44 | ["http.socks"] = "http/socks.lua"; | |
45 | ["http.stream_common"] = "http/stream_common.lua"; | |
46 | ["http.tls"] = "http/tls.lua"; | |
47 | ["http.util"] = "http/util.lua"; | |
48 | ["http.version"] = "http/version.lua"; | |
49 | ["http.websocket"] = "http/websocket.lua"; | |
50 | ["http.zlib"] = "http/zlib.lua"; | |
51 | ["http.compat.prosody"] = "http/compat/prosody.lua"; | |
52 | ["http.compat.socket"] = "http/compat/socket.lua"; | |
53 | }; | |
54 | } |
0 | package = "http" | |
1 | version = "scm-0" | |
2 | ||
3 | description = { | |
4 | summary = "HTTP library for Lua"; | |
5 | homepage = "https://github.com/daurnimator/lua-http"; | |
6 | license = "MIT"; | |
7 | } | |
8 | ||
9 | source = { | |
10 | url = "git+https://github.com/daurnimator/lua-http.git"; | |
11 | } | |
12 | ||
13 | dependencies = { | |
14 | "lua >= 5.1"; | |
15 | "compat53 >= 0.3"; -- Only if lua < 5.3 | |
16 | "bit32"; -- Only if lua == 5.1 | |
17 | "cqueues >= 20161214"; | |
18 | "luaossl >= 20161208"; | |
19 | "basexx >= 0.2.0"; | |
20 | "lpeg"; | |
21 | "lpeg_patterns >= 0.5"; | |
22 | "binaryheap >= 0.3"; | |
23 | "fifo"; | |
24 | -- "psl"; -- Optional | |
25 | } | |
26 | ||
27 | build = { | |
28 | type = "builtin"; | |
29 | modules = { | |
30 | ["http.bit"] = "http/bit.lua"; | |
31 | ["http.client"] = "http/client.lua"; | |
32 | ["http.connection_common"] = "http/connection_common.lua"; | |
33 | ["http.cookie"] = "http/cookie.lua"; | |
34 | ["http.h1_connection"] = "http/h1_connection.lua"; | |
35 | ["http.h1_reason_phrases"] = "http/h1_reason_phrases.lua"; | |
36 | ["http.h1_stream"] = "http/h1_stream.lua"; | |
37 | ["http.h2_connection"] = "http/h2_connection.lua"; | |
38 | ["http.h2_error"] = "http/h2_error.lua"; | |
39 | ["http.h2_stream"] = "http/h2_stream.lua"; | |
40 | ["http.headers"] = "http/headers.lua"; | |
41 | ["http.hpack"] = "http/hpack.lua"; | |
42 | ["http.hsts"] = "http/hsts.lua"; | |
43 | ["http.proxies"] = "http/proxies.lua"; | |
44 | ["http.request"] = "http/request.lua"; | |
45 | ["http.server"] = "http/server.lua"; | |
46 | ["http.socks"] = "http/socks.lua"; | |
47 | ["http.stream_common"] = "http/stream_common.lua"; | |
48 | ["http.tls"] = "http/tls.lua"; | |
49 | ["http.util"] = "http/util.lua"; | |
50 | ["http.version"] = "http/version.lua"; | |
51 | ["http.websocket"] = "http/websocket.lua"; | |
52 | ["http.zlib"] = "http/zlib.lua"; | |
53 | ["http.compat.prosody"] = "http/compat/prosody.lua"; | |
54 | ["http.compat.socket"] = "http/compat/socket.lua"; | |
55 | }; | |
56 | } |
0 | 0 | describe("http.client module", function() |
1 | 1 | local client = require "http.client" |
2 | local http_connection_common = require "http.connection_common" | |
2 | 3 | local http_h1_connection = require "http.h1_connection" |
3 | 4 | local http_h2_connection = require "http.h2_connection" |
4 | 5 | local http_headers = require "http.headers" |
104 | 105 | return http_h2_connection.new(s, "server", {}) |
105 | 106 | end) |
106 | 107 | end) |
108 | it("reports errors from :starttls", function() | |
109 | -- default settings should fail as it should't allow self-signed | |
110 | local s, c = ca.assert(cs.pair()) | |
111 | local cq = cqueues.new(); | |
112 | cq:wrap(function() | |
113 | local ok, err = client.negotiate(c, { | |
114 | tls = true; | |
115 | }) | |
116 | assert.falsy(ok) | |
117 | assert.truthy(err:match("starttls: ")) | |
118 | end) | |
119 | cq:wrap(function() | |
120 | s:onerror(http_connection_common.onerror) | |
121 | local ok, err = s:starttls() | |
122 | assert.falsy(ok) | |
123 | assert.truthy(err:match("starttls: ")) | |
124 | end) | |
125 | assert_loop(cq, TEST_TIMEOUT) | |
126 | assert.truthy(cq:empty()) | |
127 | c:close() | |
128 | s:close() | |
129 | end) | |
107 | 130 | end) |
7 | 7 | -- in the luasocket example they use 'wrong.host', but 'host' is now a valid TLD. |
8 | 8 | -- use 'wrong.invalid' instead for this test. |
9 | 9 | local r, e = http.request("http://wrong.invalid/") |
10 | assert.same(r, nil) | |
10 | assert.same(nil, r) | |
11 | 11 | -- in luasocket, the error is documented as "host not found", but we allow something else |
12 | 12 | assert.same("string", type(e)) |
13 | 13 | end) |
0 | describe("cookie module", function() | |
1 | local http_cookie = require "http.cookie" | |
2 | local http_headers = require "http.headers" | |
3 | describe(".parse_cookies", function() | |
4 | it("can parse a request with a single cookie headers", function() | |
5 | local h = http_headers.new() | |
6 | h:append("cookie", "foo=FOO; bar=BAR") | |
7 | assert.same({ | |
8 | foo = "FOO"; | |
9 | bar = "BAR"; | |
10 | }, http_cookie.parse_cookies(h)) | |
11 | end) | |
12 | it("can parse a request with a multiple cookie headers", function() | |
13 | local h = http_headers.new() | |
14 | h:append("cookie", "foo=FOO; bar=BAR") | |
15 | h:append("cookie", "baz=BAZ; bar=BAR2") | |
16 | h:append("cookie", "qux=QUX") | |
17 | assert.same({ | |
18 | foo = "FOO"; | |
19 | bar = "BAR2"; -- last occurence should win | |
20 | baz = "BAZ"; | |
21 | qux = "QUX"; | |
22 | }, http_cookie.parse_cookies(h)) | |
23 | end) | |
24 | end) | |
25 | it(":get works", function() | |
26 | local s = http_cookie.new_store() | |
27 | assert.same(nil, s:get("mysite.com", "/", "lang")) | |
28 | local key, value, params = http_cookie.parse_setcookie("lang=en-US; Expires=Wed, 09 Jun 2021 10:18:14 GMT") | |
29 | assert(s:store("mysite.com", "/", true, true, nil, key, value, params)) | |
30 | assert.same("en-US", s:get("mysite.com", "/", "lang")) | |
31 | assert.same(nil, s:get("other.com", "/", "lang")) | |
32 | assert.same(nil, s:get("mysite.com", "/other", "lang")) | |
33 | assert.same(nil, s:get("mysite.com", "/", "other")) | |
34 | end) | |
35 | describe("examples from spec", function() | |
36 | it("can handle basic cookie without parameters", function() | |
37 | local s = http_cookie.new_store() | |
38 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("SID=31d4d96e407aad42"))) | |
39 | assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/", true, true)) | |
40 | assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/other", true, true)) | |
41 | assert.same("", s:lookup("subdomain.example.com", "/", true, true)) | |
42 | assert.same("", s:lookup("other.com", "/", true, true)) | |
43 | end) | |
44 | ||
45 | it("can handle cookie with Path and Domain parameters", function() | |
46 | local s = http_cookie.new_store() | |
47 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("SID=31d4d96e407aad42; Path=/; Domain=example.com"))) | |
48 | assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/", true, true)) | |
49 | assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/other", true, true)) | |
50 | assert.same("SID=31d4d96e407aad42", s:lookup("subdomain.example.com", "/", true, true)) | |
51 | assert.same("", s:lookup("other.com", "/", true, true)) | |
52 | end) | |
53 | ||
54 | it("can handle two cookies with different names and parameters", function() | |
55 | local s = http_cookie.new_store() | |
56 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("SID=31d4d96e407aad42; Path=/; Secure; HttpOnly"))) | |
57 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("lang=en-US; Path=/; Domain=example.com"))) | |
58 | assert.same("SID=31d4d96e407aad42; lang=en-US", s:lookup("example.com", "/other", true, true)) | |
59 | assert.same("lang=en-US", s:lookup("subdomain.example.com", "/", true, true)) | |
60 | assert.same("lang=en-US", s:lookup("example.com", "/", true, false)) | |
61 | assert.same("lang=en-US", s:lookup("example.com", "/", false, true)) | |
62 | assert.same("", s:lookup("other.com", "/", true, true)) | |
63 | end) | |
64 | ||
65 | it("can expire a cookie", function() | |
66 | local s = http_cookie.new_store() | |
67 | s.time = function() return 1234567890 end -- set time to something before the expiry | |
68 | -- in spec this is kept from previous example. | |
69 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("SID=31d4d96e407aad42; Path=/; Secure; HttpOnly"))) | |
70 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("lang=en-US; Expires=Wed, 09 Jun 2021 10:18:14 GMT"))) | |
71 | assert.same("SID=31d4d96e407aad42; lang=en-US", s:lookup("example.com", "/", true, true)) | |
72 | s.time = function() return 9234567890 end -- set time to something after the expiry | |
73 | assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/", true, true)) | |
74 | end) | |
75 | end) | |
76 | describe(":store uses correct domain", function() | |
77 | it("ignores leading '.' in domain", function() | |
78 | local s = http_cookie.new_store() | |
79 | assert.truthy(s:store("subdomain.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=.example.com"))) | |
80 | assert.same("bar", s:get("example.com", "/", "foo")) | |
81 | end) | |
82 | ;(http_cookie.store_methods.psl and it or pending)("checks against public suffix list", function() | |
83 | assert(not http_cookie.store_methods.psl:is_cookie_domain_acceptable("foo.com", "com")) | |
84 | local s = http_cookie.new_store() | |
85 | assert.falsy(s:store("foo.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=com"))) | |
86 | end) | |
87 | ;(http_cookie.store_methods.psl and it or pending)("allows explicit domains even when on the public suffix list", function() | |
88 | assert(http_cookie.store_methods.psl:is_public_suffix("hashbang.sh")) | |
89 | local s = http_cookie.new_store() | |
90 | assert.truthy(s:store("hashbang.sh", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=hashbang.sh"))) | |
91 | -- And check that host_only flag has been set to true | |
92 | assert.same("foo=bar", s:lookup("hashbang.sh", "/", true, true)) | |
93 | assert.same("", s:lookup("sub.hashbang.sh", "/", true, true)) | |
94 | end) | |
95 | it("doesn't domain-match a completely different domain", function() | |
96 | local s = http_cookie.new_store() | |
97 | assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=other.example.com"))) | |
98 | end) | |
99 | it("doesn't domain-match a subdomain when request is at super-domain", function() | |
100 | local s = http_cookie.new_store() | |
101 | assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=subdomain.example.com"))) | |
102 | end) | |
103 | it("doesn't domain-match a partial ip", function() | |
104 | local s = http_cookie.new_store() | |
105 | assert.falsy(s:store("127.0.0.1", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=0.0.1"))) | |
106 | end) | |
107 | end) | |
108 | describe("domain-match on lookup", function() | |
109 | it("matches domains correctly when host_only flag is true", function() | |
110 | local s = http_cookie.new_store() | |
111 | assert.truthy(s:store("s.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar"))) | |
112 | assert.same("bar", s:get("s.example.com", "/", "foo")) | |
113 | ||
114 | assert.same("foo=bar", s:lookup("s.example.com", "/", true, true)) | |
115 | assert.same("", s:lookup("s.s.example.com", "/", true, true)) | |
116 | assert.same("", s:lookup("s.s.s.example.com", "/", true, true)) | |
117 | assert.same("", s:lookup("com", "/", true, true)) | |
118 | assert.same("", s:lookup("example.com", "/", true, true)) | |
119 | assert.same("", s:lookup("other.com", "/", true, true)) | |
120 | assert.same("", s:lookup("s.other.com", "/", true, true)) | |
121 | end) | |
122 | it("matches domains correctly when host_only flag is false", function() | |
123 | local s = http_cookie.new_store() | |
124 | assert.truthy(s:store("s.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=s.example.com"))) | |
125 | assert.same("bar", s:get("s.example.com", "/", "foo")) | |
126 | ||
127 | assert.same("foo=bar", s:lookup("s.example.com", "/", true, true)) | |
128 | assert.same("foo=bar", s:lookup("s.s.example.com", "/", true, true)) | |
129 | assert.same("foo=bar", s:lookup("s.s.s.example.com", "/", true, true)) | |
130 | assert.same("", s:lookup("com", "/", true, true)) | |
131 | assert.same("", s:lookup("example.com", "/", true, true)) | |
132 | assert.same("", s:lookup("other.com", "/", true, true)) | |
133 | assert.same("", s:lookup("s.other.com", "/", true, true)) | |
134 | end) | |
135 | end) | |
136 | describe(":store uses correct path", function() | |
137 | it("handles absolute set-cookie header", function() | |
138 | local s = http_cookie.new_store() | |
139 | assert.truthy(s:store("example.com", "/absolute/path", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=/different/absolute/path"))) | |
140 | assert.same("bar", s:get("example.com", "/different/absolute/path", "foo")) | |
141 | end) | |
142 | it("handles relative set-cookie path", function() | |
143 | local s = http_cookie.new_store() | |
144 | assert.truthy(s:store("example.com", "/absolute/path", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=relative/path"))) | |
145 | -- should trim off last component | |
146 | assert.same("bar", s:get("example.com", "/absolute", "foo")) | |
147 | end) | |
148 | it("handles relative set-cookie path with no request path", function() | |
149 | local s = http_cookie.new_store() | |
150 | assert.truthy(s:store("example.com", "?", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=relative/path"))) | |
151 | -- should default to / | |
152 | assert.same("bar", s:get("example.com", "/", "foo")) | |
153 | end) | |
154 | it("handles absolute set-cookie path with relative request path", function() | |
155 | local s = http_cookie.new_store() | |
156 | assert.truthy(s:store("example.com", "relative/path", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=/absolute/path"))) | |
157 | assert.same("bar", s:get("example.com", "/absolute/path", "foo")) | |
158 | end) | |
159 | it("handles relative request path and relative set-cookie header", function() | |
160 | local s = http_cookie.new_store() | |
161 | assert.truthy(s:store("example.com", "relative/path", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=different/relative/path"))) | |
162 | assert.same("bar", s:get("example.com", "/", "foo")) | |
163 | end) | |
164 | end) | |
165 | it("matches paths correctly", function() | |
166 | local s = http_cookie.new_store() | |
167 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=/path/subpath"))) | |
168 | assert.same("foo=bar", s:lookup("example.com", "/path/subpath/foo", true, true)) | |
169 | assert.same("foo=bar", s:lookup("example.com", "/path/subpath/bar", true, true)) | |
170 | assert.same("foo=bar", s:lookup("example.com", "/path/subpath", true, true)) | |
171 | assert.same("", s:lookup("example.com", "/", true, true)) | |
172 | assert.same("", s:lookup("example.com", "/path", true, true)) | |
173 | assert.same("", s:lookup("example.com", "/path/otherpath/", true, true)) | |
174 | assert.same("", s:lookup("example.com", "/path/otherpath/things", true, true)) | |
175 | end) | |
176 | it("prefers max-age over expires", function() | |
177 | local s = http_cookie.new_store() | |
178 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; max-age=50; Expires=Thu, 01 Jan 1970 00:00:00 GMT"))) | |
179 | assert.truthy(s:get("example.com", "/", "foo")) | |
180 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; max-age=0; Expires=Tue, 19 Jan 2038 03:14:07 GMT"))) | |
181 | assert.falsy(s:get("example.com", "/", "foo")) | |
182 | end) | |
183 | it("supports HttpOnly attribute", function() | |
184 | local s = http_cookie.new_store() | |
185 | assert.falsy(s:store("example.com", "/", false, true, nil, http_cookie.parse_setcookie("foo=bar; HttpOnly"))) | |
186 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; HttpOnly"))) | |
187 | assert.same("", s:lookup("example.com", "/", false, true)) | |
188 | assert.same("foo=bar", s:lookup("example.com", "/", true, true)) | |
189 | -- Now try and overwrite it with non-http :store | |
190 | assert.falsy(s:store("example.com", "/", false, true, nil, http_cookie.parse_setcookie("foo=bar"))) | |
191 | end) | |
192 | it("supports Secure attribute", function() | |
193 | local s = http_cookie.new_store() | |
194 | assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("foo=bar; Secure"))) | |
195 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Secure"))) | |
196 | assert.same("", s:lookup("example.com", "/", true, false)) | |
197 | assert.same("foo=bar", s:lookup("example.com", "/", true, true)) | |
198 | end) | |
199 | describe("tough cookies", function() | |
200 | it("enforces __Secure- prefix", function() | |
201 | local s = http_cookie.new_store() | |
202 | assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("__Secure-foo=bar; Secure"))) | |
203 | assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("__Secure-foo=bar"))) | |
204 | assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Secure-foo=bar;"))) | |
205 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Secure-foo=bar; Secure"))) | |
206 | end) | |
207 | it("enforces __Host- prefix", function() | |
208 | local s = http_cookie.new_store() | |
209 | -- Checks secure flag | |
210 | assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("__Host-foo=bar; Secure"))) | |
211 | assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("__Host-foo=bar"))) | |
212 | assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Host-foo=bar;"))) | |
213 | -- Checks for host only flag | |
214 | assert.falsy(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Host-foo=bar; Secure; Domain=example.com"))) | |
215 | -- Checks that path is / | |
216 | assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Host-foo=bar; Secure; Path=/path"))) | |
217 | -- Success case | |
218 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Host-foo=bar; Secure"))) | |
219 | end) | |
220 | end) | |
221 | describe("cookie fixing mitigation", function() | |
222 | it("ignores already existing path", function() | |
223 | local s = http_cookie.new_store() | |
224 | assert.truthy(s:store("example.com", "/path/subpath/foo", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=/path; Secure"))) | |
225 | assert.falsy(s:store("example.com", "/path/subpath/foo", true, false, nil, http_cookie.parse_setcookie("foo=bar; Path=/path"))) | |
226 | end) | |
227 | end) | |
228 | describe("SameSite attribute", function() | |
229 | it("fails to store if domain and site_for_cookies don't match", function() | |
230 | local s = http_cookie.new_store() | |
231 | assert.falsy(s:store("example.com", "/", true, true, "other.com", http_cookie.parse_setcookie("foo=foo; SameSite=Strict"))) | |
232 | end) | |
233 | ||
234 | it("implements SameSite=Strict", function() | |
235 | local s = http_cookie.new_store() | |
236 | assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("foo=foo; SameSite=Strict"))) | |
237 | assert.same("foo=foo", s:lookup("example.com", "/", true, true, true, "example.com")) | |
238 | assert.same("", s:lookup("example.com", "/", true, true, true, "other.com")) | |
239 | end) | |
240 | ||
241 | it("implements SameSite=Lax", function() | |
242 | local s = http_cookie.new_store() | |
243 | assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("foo=foo; SameSite=Lax"))) | |
244 | assert.same("foo=foo", s:lookup("example.com", "/", true, true, true, "example.com", true)) | |
245 | assert.same("foo=foo", s:lookup("example.com", "/", true, true, true, "other.com", true)) | |
246 | assert.same("", s:lookup("example.com", "/", true, true, false, "other.com", true)) | |
247 | assert.same("", s:lookup("example.com", "/", true, true, true, "other.com", false)) | |
248 | assert.same("", s:lookup("example.com", "/", true, true, false, "other.com", false)) | |
249 | end) | |
250 | end) | |
251 | it("cleans up", function() | |
252 | local s = http_cookie.new_store() | |
253 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo; Expires=Wed, 09 Jun 2021 10:18:14 GMT"))) | |
254 | assert.same("foo", s:get("example.com", "/", "foo")) | |
255 | s.time = function() return 9876543210 end -- set time to something after the expiry | |
256 | s:clean() | |
257 | assert.same(nil, s:get("example.com", "/", "foo")) | |
258 | end) | |
259 | describe(":remove()", function() | |
260 | it("can remove cookies by domain", function() | |
261 | local s = http_cookie.new_store() | |
262 | -- Try remove on empty store | |
263 | s:remove("example.com") | |
264 | ||
265 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) | |
266 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=other; Path=/subpath"))) | |
267 | assert.truthy(s:store("other.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) | |
268 | assert.same("foo", s:get("example.com", "/", "foo")) | |
269 | assert.same("other", s:get("example.com", "/subpath", "foo")) | |
270 | assert.same("bar", s:get("other.com", "/", "bar")) | |
271 | ||
272 | s:remove("example.com") | |
273 | assert.same(nil, s:get("example.com", "/", "foo")) | |
274 | assert.same(nil, s:get("example.com", "/subpath", "foo")) | |
275 | assert.same("bar", s:get("other.com", "/", "bar")) | |
276 | end) | |
277 | it("can remove cookies by path", function() | |
278 | local s = http_cookie.new_store() | |
279 | -- Try remove on empty store | |
280 | s:remove("example.com", "/") | |
281 | ||
282 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) | |
283 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=other; Path=/subpath"))) | |
284 | assert.truthy(s:store("other.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) | |
285 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("qux=qux"))) | |
286 | assert.same("foo", s:get("example.com", "/", "foo")) | |
287 | assert.same("other", s:get("example.com", "/subpath", "foo")) | |
288 | assert.same("bar", s:get("other.com", "/", "bar")) | |
289 | assert.same("qux", s:get("example.com", "/", "qux")) | |
290 | ||
291 | -- Remove all names under "/" path | |
292 | s:remove("example.com", "/") | |
293 | assert.same(nil, s:get("example.com", "/", "foo")) | |
294 | assert.same("other", s:get("example.com", "/subpath", "foo")) | |
295 | assert.same("bar", s:get("other.com", "/", "bar")) | |
296 | assert.same(nil, s:get("example.com", "/", "qux")) | |
297 | ||
298 | -- Remove last path in domain (making domain empty) | |
299 | s:remove("example.com", "/subpath") | |
300 | assert.same(nil, s:get("example.com", "/", "foo")) | |
301 | assert.same(nil, s:get("example.com", "/subpath", "foo")) | |
302 | assert.same("bar", s:get("other.com", "/", "bar")) | |
303 | assert.same(nil, s:get("example.com", "/", "qux")) | |
304 | end) | |
305 | it("can remove cookies by name", function() | |
306 | local s = http_cookie.new_store() | |
307 | -- Try remove on empty store | |
308 | s:remove("example.com", "/", "foo") | |
309 | ||
310 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) | |
311 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=other; Path=/subpath"))) | |
312 | assert.truthy(s:store("other.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) | |
313 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("qux=qux"))) | |
314 | assert.same("foo", s:get("example.com", "/", "foo")) | |
315 | assert.same("other", s:get("example.com", "/subpath", "foo")) | |
316 | assert.same("bar", s:get("other.com", "/", "bar")) | |
317 | assert.same("qux", s:get("example.com", "/", "qux")) | |
318 | ||
319 | -- Remove just one name | |
320 | s:remove("example.com", "/", "foo") | |
321 | assert.same(nil, s:get("example.com", "/", "foo")) | |
322 | assert.same("other", s:get("example.com", "/subpath", "foo")) | |
323 | assert.same("bar", s:get("other.com", "/", "bar")) | |
324 | assert.same("qux", s:get("example.com", "/", "qux")) | |
325 | ||
326 | -- Remove last name in path (making path empty) | |
327 | s:remove("example.com", "/", "qux") | |
328 | assert.same(nil, s:get("example.com", "/", "foo")) | |
329 | assert.same("other", s:get("example.com", "/subpath", "foo")) | |
330 | assert.same("bar", s:get("other.com", "/", "bar")) | |
331 | assert.same(nil, s:get("example.com", "/", "qux")) | |
332 | ||
333 | -- Remove last name in domain (making domain empty) | |
334 | s:remove("example.com", "/subpath", "foo") | |
335 | assert.same(nil, s:get("example.com", "/", "foo")) | |
336 | assert.same(nil, s:get("example.com", "/subpath", "foo")) | |
337 | assert.same("bar", s:get("other.com", "/", "bar")) | |
338 | assert.same(nil, s:get("example.com", "/", "qux")) | |
339 | end) | |
340 | end) | |
341 | describe("cookie order", function() | |
342 | it("returns in order for simple cookies", function() -- used as assumed base case for future tests in this section | |
343 | local s = http_cookie.new_store() | |
344 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=basic"))) | |
345 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=basic"))) | |
346 | assert.same("bar=basic; foo=basic", s:lookup("example.com", "/", true, true)) | |
347 | end) | |
348 | it("returns in order for domain differing cookies", function() -- spec doesn't care about this case | |
349 | local s = http_cookie.new_store() | |
350 | assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=domain; Domain=sub.example.com"))) | |
351 | assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=domain; Domain=example.com"))) | |
352 | assert.same("bar=domain; foo=domain", s:lookup("sub.example.com", "/", true, true)) | |
353 | end) | |
354 | it("returns in order for different length paths", function() | |
355 | local s = http_cookie.new_store() | |
356 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=path; Path=/path/longerpath"))) | |
357 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=path; Path=/path/"))) | |
358 | assert.same("foo=path; bar=path", s:lookup("example.com", "/path/longerpath", true, true)) | |
359 | end) | |
360 | it("returns in order for different creation times", function() | |
361 | local s = http_cookie.new_store() | |
362 | s.time = function() return 0 end | |
363 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=time"))) | |
364 | s.time = function() return 50 end | |
365 | assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=time"))) | |
366 | assert.same("foo=time; bar=time", s:lookup("example.com", "/path/longerpath", true, true)) | |
367 | end) | |
368 | it("returns in order when all together!", function() | |
369 | local s = http_cookie.new_store() | |
370 | assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=basic"))) | |
371 | assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=basic"))) | |
372 | assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=path; Path=/path/longerpath"))) | |
373 | assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=path; Path=/path/"))) | |
374 | -- foo=domain case would get overridden below | |
375 | assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=domain; Domain=example.com"))) | |
376 | s.time = function() return 0 end | |
377 | assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=time"))) | |
378 | s.time = function() return 50 end | |
379 | assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=time"))) | |
380 | assert.same("foo=path; bar=path; bar=domain; bar=time; foo=time", s:lookup("sub.example.com", "/path/longerpath", true, true)) | |
381 | end) | |
382 | end) | |
383 | it("can store cookies from a request+response", function() | |
384 | local s = http_cookie.new_store() | |
385 | local req_headers = http_headers.new() | |
386 | req_headers:append(":scheme", "http") | |
387 | req_headers:append(":method", "GET") | |
388 | req_headers:append(":path", "/") | |
389 | local resp_headers = http_headers.new() | |
390 | resp_headers:append(":status", "200") | |
391 | resp_headers:append("set-cookie", http_cookie.bake("foo", "FOO")) | |
392 | resp_headers:append("set-cookie", http_cookie.bake("bar", "BAR", 0)) | |
393 | assert.truthy(s:store_from_request(req_headers, resp_headers, "my.host", "my.host")) | |
394 | assert.same("FOO", s:get("my.host", "/", "foo")) | |
395 | assert.same(nil, s:get("my.host", "/", "bar")) | |
396 | -- Now with an :authority header | |
397 | req_headers:append(":authority", "my.host") | |
398 | resp_headers:append("set-cookie", http_cookie.bake("baz", "BAZ")) | |
399 | assert.truthy(s:store_from_request(req_headers, resp_headers, "my.host", "my.host")) | |
400 | assert.same("FOO", s:get("my.host", "/", "foo")) | |
401 | assert.same(nil, s:get("my.host", "/", "bar")) | |
402 | assert.same("BAZ", s:get("my.host", "/", "baz")) | |
403 | end) | |
404 | it("enforces store.max_cookie_length", function() | |
405 | local s = http_cookie.new_store() | |
406 | s.max_cookie_length = 3 | |
407 | assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) | |
408 | s.max_cookie_length = 8 | |
409 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) | |
410 | assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=longervalue"))) | |
411 | end) | |
412 | it("enforces store.max_cookies", function() | |
413 | local s = http_cookie.new_store() | |
414 | s.max_cookies = 0 | |
415 | assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) | |
416 | s.max_cookies = 1 | |
417 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) | |
418 | assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) | |
419 | s:remove("example.com", "/", "foo") | |
420 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) | |
421 | end) | |
422 | it("enforces store.max_cookies_per_domain", function() | |
423 | local s = http_cookie.new_store() | |
424 | s.max_cookies_per_domain = 0 | |
425 | assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) | |
426 | s.max_cookies_per_domain = 1 | |
427 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) | |
428 | assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) | |
429 | assert.truthy(s:store("other.com", "/", true, true, nil, http_cookie.parse_setcookie("baz=baz"))) | |
430 | s:remove("example.com", "/", "foo") | |
431 | assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) | |
432 | end) | |
433 | it("can bake cookies", function() | |
434 | assert.same("foo=bar", http_cookie.bake("foo", "bar")) | |
435 | assert.same("foo=bar; Max-Age=0", http_cookie.bake("foo", "bar", -math.huge)) | |
436 | assert.same("foo=bar; Expires=Thu, 01 Jan 1970 00:00:00 GMT", http_cookie.bake("foo", "bar", 0)) | |
437 | assert.same("foo=bar; Max-Age=0; Domain=example.com; Path=/path; Secure; HttpOnly; SameSite=Strict", | |
438 | http_cookie.bake("foo", "bar", -math.huge, "example.com", "/path", true, true, "strict")) | |
439 | assert.same("foo=bar; Max-Age=0; Domain=example.com; Path=/path; Secure; HttpOnly; SameSite=Lax", | |
440 | http_cookie.bake("foo", "bar", -math.huge, "example.com", "/path", true, true, "lax")) | |
441 | assert.has.errors(function() | |
442 | http_cookie.bake("foo", "bar", -math.huge, "example.com", "/path", true, true, "somethingelse") | |
443 | end, [[invalid value for same_site, expected "strict" or "lax"]]) | |
444 | end) | |
445 | it("can dump a netscape format cookiejar", function() | |
446 | local s = http_cookie.new_store() | |
447 | assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("foo=FOO;"))) | |
448 | assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("bar=BAR; HttpOnly"))) | |
449 | assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("baz=BAZ; Path=/someplace"))) | |
450 | assert(s:store("sub.example.com", "/", true, true, "sub.example.com", http_cookie.parse_setcookie("subdomain=matched; Domain=sub.example.com"))) | |
451 | assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("qux=QUX; SameSite=Lax"))) | |
452 | assert(s:store("other.com", "/", true, true, "other.com", http_cookie.parse_setcookie("foo=somethingelse; HttpOnly"))) | |
453 | local file = io.tmpfile() | |
454 | assert(s:save_to_file(file)) | |
455 | assert(file:seek("set")) | |
456 | -- preamble | |
457 | assert.truthy(assert(file:read("*l")):match"^#.*HTTP Cookie File") | |
458 | assert.truthy(assert(file:read("*l")):match"^#") | |
459 | assert.same("", assert(file:read("*l"))) | |
460 | local lines = {} | |
461 | for line in file:lines() do | |
462 | table.insert(lines, line) | |
463 | end | |
464 | table.sort(lines) | |
465 | assert.same({ | |
466 | "#HttpOnly_example.com TRUE / FALSE 2147483647 bar BAR"; | |
467 | "#HttpOnly_other.com TRUE / FALSE 2147483647 foo somethingelse"; | |
468 | "example.com TRUE / FALSE 2147483647 foo FOO"; | |
469 | "example.com TRUE / FALSE 2147483647 qux QUX"; | |
470 | "example.com TRUE /someplace FALSE 2147483647 baz BAZ"; | |
471 | "sub.example.com FALSE / FALSE 2147483647 subdomain matched"; | |
472 | }, lines) | |
473 | end) | |
474 | it("can load a netscape format cookiejar", function() | |
475 | local s = http_cookie.new_store() | |
476 | local file = io.tmpfile() | |
477 | assert(file:write([[ | |
478 | # Netscape HTTP Cookie File | |
479 | # https://curl.haxx.se/docs/http-cookies.html | |
480 | # This file was generated by libcurl! Edit at your own risk. | |
481 | ||
482 | #HttpOnly_other.com TRUE / FALSE 2147483647 foo somethingelse | |
483 | sub.example.com FALSE / FALSE 2147483647 subdomain matched | |
484 | example.com TRUE / TRUE 2147483647 qux QUX | |
485 | #HttpOnly_example.com TRUE / FALSE 2147483647 bar BAR | |
486 | example.com TRUE / FALSE 2147483647 foo FOO | |
487 | example.com TRUE /someplace FALSE 2147483647 baz BAZ | |
488 | ]])) | |
489 | assert(file:seek("set")) | |
490 | assert(s:load_from_file(file)) | |
491 | assert.same("bar=BAR; foo=FOO; qux=QUX", s:lookup("example.com", "/", true, true)) | |
492 | end) | |
493 | it("can load a netscape format cookiejar with invalid lines", function() | |
494 | local s = http_cookie.new_store() | |
495 | local file = io.tmpfile() | |
496 | assert(file:write([[ | |
497 | example.com TRUE / TRUE 2147483647 qux QUX | |
498 | not a valid line | |
499 | example.com INVALID_BOOLEAN / FALSE 2147483647 should fail | |
500 | example.com TRUE / INVALID_BOOLEAN 2147483647 should fail | |
501 | example.com TRUE / FALSE not_a_number should fail | |
502 | #HttpOnly_example.com TRUE / FALSE 2147483647 bar BAR | |
503 | example.com TRUE / FALSE 2147483647 foo FOO | |
504 | ]])) | |
505 | assert(file:seek("set")) | |
506 | assert(s:load_from_file(file)) | |
507 | assert.same("bar=BAR; foo=FOO; qux=QUX", s:lookup("example.com", "/", true, true)) | |
508 | end) | |
509 | end) |
112 | 112 | local function test(chunk) |
113 | 113 | local s, c = new_pair(1.1) |
114 | 114 | s = s:take_socket() |
115 | assert(s:write(chunk, "\r\n")) | |
116 | assert(s:flush()) | |
117 | assert.same(ce.EILSEQ, select(3, c:read_request_line())) | |
118 | s:close() | |
119 | c:close() | |
120 | end | |
121 | test("invalid request line") | |
122 | test(" / HTTP/1.1") | |
123 | test("HTTP/1.1") | |
124 | test("GET HTTP/1.0") | |
125 | test("GET HTTP/1.0") | |
126 | test("GET HTTP/1.0") | |
127 | test("GET / HTP/1.1") | |
128 | test("GET / HTTP 1.1") | |
129 | test("GET / HTTP/1") | |
130 | test("GET / HTTP/2.0") | |
131 | test("GET / HTTP/1.1\nHeader: value") -- missing \r | |
115 | assert(s:xwrite(chunk, "n", TEST_TIMEOUT)) | |
116 | s:close() | |
117 | assert.same(ce.EILSEQ, select(3, c:read_request_line(TEST_TIMEOUT))) | |
118 | c:close() | |
119 | end | |
120 | test("GET") -- no \r\n | |
121 | test("\r\nGET") -- no \r\n with preceeding \r\n | |
122 | test("invalid request line\r\n") | |
123 | test(" / HTTP/1.1\r\n") | |
124 | test("\r\n / HTTP/1.1\r\n") | |
125 | test("HTTP/1.1\r\n") | |
126 | test("GET HTTP/1.0\r\n") | |
127 | test("GET HTTP/1.0\r\n") | |
128 | test("GET HTTP/1.0\r\n") | |
129 | test("GET / HTP/1.1\r\n") | |
130 | test("GET / HTTP 1.1\r\n") | |
131 | test("GET / HTTP/1\r\n") | |
132 | test("GET / HTTP/2.0\r\n") | |
133 | test("GET / HTTP/1.1\nHeader: value\r\n") -- missing \r | |
132 | 134 | end) |
133 | 135 | it(":read_request_line should allow a leading CRLF", function() |
134 | 136 | local function test(chunk) |
140 | 142 | c:close() |
141 | 143 | end |
142 | 144 | test("\r\nGET / HTTP/1.1\r\n") |
145 | end) | |
146 | describe("overlong lines", function() | |
147 | it(":read_request_line", function() | |
148 | local s, c = new_pair(1.1) | |
149 | s = s:take_socket() | |
150 | assert(s:xwrite(("a"):rep(10000), "n")) | |
151 | assert.same(ce.EILSEQ, select(3, c:read_request_line(TEST_TIMEOUT))) | |
152 | s:close() | |
153 | c:close() | |
154 | end) | |
155 | it(":read_status_line", function() | |
156 | local s, c = new_pair(1.1) | |
157 | s = s:take_socket() | |
158 | assert(s:xwrite(("a"):rep(10000), "n")) | |
159 | assert.same(ce.EILSEQ, select(3, c:read_status_line(TEST_TIMEOUT))) | |
160 | s:close() | |
161 | c:close() | |
162 | end) | |
163 | it(":read_header", function() | |
164 | local s, c = new_pair(1.1) | |
165 | s = s:take_socket() | |
166 | assert(s:xwrite(("a"):rep(10000), "n")) | |
167 | assert.same(ce.EILSEQ, select(3, c:read_header(TEST_TIMEOUT))) | |
168 | s:close() | |
169 | c:close() | |
170 | end) | |
171 | it(":read_body_chunk", function() | |
172 | local s, c = new_pair(1.1) | |
173 | s = s:take_socket() | |
174 | assert(s:xwrite(("a"):rep(10000), "n")) | |
175 | assert.same(ce.EILSEQ, select(3, c:read_body_chunk(TEST_TIMEOUT))) | |
176 | s:close() | |
177 | c:close() | |
178 | end) | |
143 | 179 | end) |
144 | 180 | it("status line should round trip", function() |
145 | 181 | local function test(req_version, req_status, req_reason) |
491 | 527 | assert(c:xwrite("\n", "n")) |
492 | 528 | assert.same({false}, {s:read_body_chunk(0.001)}) |
493 | 529 | s:close() |
530 | c:close() | |
531 | end) | |
532 | it(":read_body_chunk fails on invalid chunk", function() | |
533 | local function test(chunk, expected_errno) | |
534 | local s, c = new_pair(1.1) | |
535 | s = s:take_socket() | |
536 | assert(s:xwrite(chunk, "n", TEST_TIMEOUT)) | |
537 | s:close() | |
538 | local data, _, errno = c:read_body_chunk(TEST_TIMEOUT) | |
539 | assert.same(nil, data) | |
540 | assert.same(expected_errno, errno) | |
541 | c:close() | |
542 | end | |
543 | test("", nil) | |
544 | test("5", ce.EILSEQ) | |
545 | test("5\r", ce.EILSEQ) | |
546 | test("fffffffffffffff\r\n", ce.E2BIG) | |
547 | test("not a number\r\n", ce.EILSEQ) | |
548 | test("4\r\n1", ce.EILSEQ) | |
549 | test("4\r\nfour\n", ce.EILSEQ) | |
550 | test("4\r\nlonger than four", ce.EILSEQ) | |
551 | test("4\r\nfour\nmissing \r", ce.EILSEQ) | |
552 | end) | |
553 | it(":read_body_chunk is cqueues thread-safe", function() | |
554 | local s, c = new_pair(1.1) | |
555 | s = s:take_socket() | |
556 | local cq = cqueues.new() | |
557 | cq:wrap(function() | |
558 | local chunk = assert(c:read_body_chunk()) | |
559 | assert.same("bytes", chunk) | |
560 | end) | |
561 | cq:wrap(function() | |
562 | assert(s:xwrite("5\r\n", "bn")) | |
563 | cqueues.sleep(0.001) -- let other thread block on reading chunk body | |
564 | assert(s:xwrite("chars\r\n", "bn")) | |
565 | local chunk = assert(c:read_body_chunk()) | |
566 | assert.same("chars", chunk) | |
567 | -- send a 2nd frame | |
568 | assert(s:xwrite("5\r\nbytes\r\n", "bn")) | |
569 | s:close() | |
570 | end) | |
571 | assert_loop(cq, TEST_TIMEOUT) | |
572 | assert.truthy(cq:empty()) | |
494 | 573 | c:close() |
495 | 574 | end) |
496 | 575 | end) |
11 | 11 | c = h1_connection.new(c, "client", version) |
12 | 12 | return s, c |
13 | 13 | end |
14 | it("allows resuming :read_headers", function() | |
15 | local server, client = new_pair(1.1) | |
16 | client = client:take_socket() | |
17 | assert(client:xwrite("GET / HTTP/1.1\r\n", "n")) | |
18 | local stream = server:get_next_incoming_stream() | |
19 | assert.same(ce.ETIMEDOUT, select(3, stream:read_headers(0.001))) | |
20 | assert(client:xwrite("Foo: bar\r\n", "n")) | |
21 | assert.same(ce.ETIMEDOUT, select(3, stream:read_headers(0.001))) | |
22 | assert(client:xwrite("\r\n", "n")) | |
23 | local h = assert(stream:read_headers(0.01)) | |
24 | assert.same("/", h:get(":path")) | |
25 | assert.same("bar", h:get("foo")) | |
26 | end) | |
14 | 27 | it("Writing to a shutdown connection returns EPIPE", function() |
15 | 28 | local server, client = new_pair(1.1) |
16 | 29 | local stream = client:new_stream() |
17 | 30 | client:shutdown() |
18 | 31 | local headers = new_headers() |
32 | headers:append(":method", "GET") | |
33 | headers:append(":scheme", "http") | |
19 | 34 | headers:append(":authority", "myauthority") |
20 | headers:append(":method", "GET") | |
21 | 35 | headers:append(":path", "/a") |
22 | 36 | assert.same(ce.EPIPE, select(3, stream:write_headers(headers, true))) |
23 | 37 | client:close() |
29 | 43 | cq:wrap(function() |
30 | 44 | local stream = client:new_stream() |
31 | 45 | local req_headers = new_headers() |
46 | req_headers:append(":method", "GET") | |
47 | req_headers:append(":scheme", "http") | |
32 | 48 | req_headers:append(":authority", "myauthority") |
33 | req_headers:append(":method", "GET") | |
34 | 49 | req_headers:append(":path", "/a") |
35 | 50 | assert(stream:write_headers(req_headers, true)) |
36 | 51 | local res_headers = assert(stream:get_headers()) |
77 | 92 | local server, client = new_pair(1.1) |
78 | 93 | local stream = client:new_stream() |
79 | 94 | local headers = new_headers() |
95 | headers:append(":method", "GET") | |
96 | headers:append(":scheme", "http") | |
80 | 97 | headers:append(":authority", "myauthority") |
81 | headers:append(":method", "GET") | |
82 | 98 | headers:append(":path", "/a") |
83 | 99 | assert(stream:write_headers(headers, true)) |
84 | 100 | local cq = cqueues.new():wrap(function() |
95 | 111 | cq:wrap(function() |
96 | 112 | local stream = client:new_stream() |
97 | 113 | local req_headers = new_headers() |
114 | req_headers:append(":method", "GET") | |
115 | req_headers:append(":scheme", "http") | |
98 | 116 | req_headers:append(":authority", "myauthority") |
99 | req_headers:append(":method", "GET") | |
100 | 117 | req_headers:append(":path", "/a") |
101 | 118 | assert(stream:write_headers(req_headers, true)) |
102 | 119 | local res_headers = assert(stream:get_headers()) |
121 | 138 | cq:wrap(function() |
122 | 139 | local stream = client:new_stream() |
123 | 140 | local req_headers = new_headers() |
141 | req_headers:append(":method", "GET") | |
142 | req_headers:append(":scheme", "http") | |
124 | 143 | req_headers:append(":authority", "myauthority") |
125 | req_headers:append(":method", "GET") | |
126 | 144 | req_headers:append(":path", "/a") |
127 | 145 | assert(stream:write_headers(req_headers, true)) |
128 | 146 | assert(stream:get_headers()) |
152 | 170 | cq:wrap(function() |
153 | 171 | local stream = client:new_stream() |
154 | 172 | local headers = new_headers() |
173 | headers:append(":method", "GET") | |
174 | headers:append(":scheme", "http") | |
155 | 175 | headers:append(":authority", "myauthority") |
156 | headers:append(":method", "GET") | |
157 | 176 | headers:append(":path", "/a") |
158 | 177 | headers:append("transfer-encoding", "chunked") |
159 | 178 | assert(stream:write_headers(headers, false)) |
175 | 194 | server:close() |
176 | 195 | client:close() |
177 | 196 | end) |
197 | it("doesn't return from last get_next_chunk until trailers are read", function() | |
198 | local server, client = new_pair(1.1) | |
199 | assert(client:write_request_line("GET", "/a", client.version, TEST_TIMEOUT)) | |
200 | assert(client:write_header("transfer-encoding", "chunked", TEST_TIMEOUT)) | |
201 | assert(client:write_headers_done(TEST_TIMEOUT)) | |
202 | assert(client:write_body_chunk("foo", nil, TEST_TIMEOUT)) | |
203 | assert(client:write_body_last_chunk(nil, TEST_TIMEOUT)) | |
204 | assert(client:write_header("sometrailer", "bar", TEST_TIMEOUT)) | |
205 | assert(client:flush(TEST_TIMEOUT)) | |
206 | local server_stream = server:get_next_incoming_stream(0.01) | |
207 | assert(server_stream:get_headers(0.01)) | |
208 | assert.same("foo", server_stream:get_next_chunk(0.01)) | |
209 | -- Shouldn't return `nil` (indicating EOF) until trailers are completely read. | |
210 | assert.same(ce.ETIMEDOUT, select(3, server_stream:get_next_chunk(0.01))) | |
211 | assert.same(ce.ETIMEDOUT, select(3, server_stream:get_headers(0.01))) | |
212 | assert(client:write_headers_done(TEST_TIMEOUT)) | |
213 | assert.same({}, {server_stream:get_next_chunk(0.01)}) | |
214 | local trailers = assert(server_stream:get_headers(0)) | |
215 | assert.same("bar", trailers:get("sometrailer")) | |
216 | server:close() | |
217 | client:close() | |
218 | end) | |
178 | 219 | it("waits for trailers when :get_headers is run in a second thread", function() |
179 | 220 | local server, client = new_pair(1.1) |
180 | 221 | local cq = cqueues.new() |
181 | 222 | cq:wrap(function() |
182 | 223 | local stream = client:new_stream() |
183 | 224 | local headers = new_headers() |
225 | headers:append(":method", "GET") | |
226 | headers:append(":scheme", "http") | |
184 | 227 | headers:append(":authority", "myauthority") |
185 | headers:append(":method", "GET") | |
186 | 228 | headers:append(":path", "/a") |
187 | 229 | headers:append("transfer-encoding", "chunked") |
188 | 230 | assert(stream:write_headers(headers, false)) |
212 | 254 | do |
213 | 255 | local stream = client:new_stream() |
214 | 256 | local headers = new_headers() |
257 | headers:append(":method", "GET") | |
258 | headers:append(":scheme", "http") | |
215 | 259 | headers:append(":authority", "myauthority") |
216 | headers:append(":method", "GET") | |
217 | 260 | headers:append(":path", "/a") |
218 | 261 | headers:append("content-length", "100") |
219 | 262 | assert(stream:write_headers(headers, false)) |
222 | 265 | do |
223 | 266 | local stream = client:new_stream() |
224 | 267 | local headers = new_headers() |
268 | headers:append(":method", "GET") | |
269 | headers:append(":scheme", "http") | |
225 | 270 | headers:append(":authority", "myauthority") |
226 | headers:append(":method", "GET") | |
227 | 271 | headers:append(":path", "/b") |
228 | 272 | headers:append("content-length", "0") |
229 | 273 | assert(stream:write_headers(headers, true)) |
272 | 316 | while z:get_next_chunk() do end |
273 | 317 | streams[zh:get(":path")] = z |
274 | 318 | end) |
275 | cq:wrap(function() | |
319 | local client_sync = cc.new() | |
320 | cq:wrap(function() | |
321 | if client_sync then client_sync:wait() end | |
276 | 322 | local a = client:new_stream() |
277 | 323 | local ah = new_headers() |
324 | ah:append(":method", "GET") | |
325 | ah:append(":scheme", "http") | |
278 | 326 | ah:append(":authority", "myauthority") |
279 | ah:append(":method", "GET") | |
280 | 327 | ah:append(":path", "/a") |
281 | 328 | assert(a:write_headers(ah, true)) |
329 | end) | |
330 | cq:wrap(function() | |
331 | client_sync:signal(); client_sync = nil; | |
282 | 332 | local b = client:new_stream() |
283 | 333 | local bh = new_headers() |
334 | bh:append(":method", "POST") | |
335 | bh:append(":scheme", "http") | |
284 | 336 | bh:append(":authority", "myauthority") |
285 | bh:append(":method", "POST") | |
286 | 337 | bh:append(":path", "/b") |
287 | 338 | assert(b:write_headers(bh, false)) |
339 | cqueues.sleep(0.01) | |
288 | 340 | assert(b:write_chunk("this is some POST data", true)) |
341 | end) | |
342 | cq:wrap(function() | |
289 | 343 | local c = client:new_stream() |
290 | 344 | local ch = new_headers() |
345 | ch:append(":method", "GET") | |
346 | ch:append(":scheme", "http") | |
291 | 347 | ch:append(":authority", "myauthority") |
292 | ch:append(":method", "GET") | |
293 | 348 | ch:append(":path", "/c") |
294 | 349 | assert(c:write_headers(ch, true)) |
295 | 350 | end) |
297 | 352 | assert.truthy(cq:empty()) |
298 | 353 | -- All requests read; now for responses |
299 | 354 | -- Don't want /a to be first. |
300 | local sync = cc.new() | |
301 | cq:wrap(function() | |
302 | if sync then sync:wait() end | |
355 | local server_sync = cc.new() | |
356 | cq:wrap(function() | |
357 | if server_sync then server_sync:wait() end | |
303 | 358 | local h = new_headers() |
304 | 359 | h:append(":status", "200") |
305 | 360 | assert(streams["/a"]:write_headers(h, true)) |
306 | 361 | end) |
307 | 362 | cq:wrap(function() |
308 | sync:signal(1); sync = nil; | |
363 | server_sync:signal(); server_sync = nil; | |
309 | 364 | local h = new_headers() |
310 | 365 | h:append(":status", "200") |
311 | 366 | assert(streams["/b"]:write_headers(h, true)) |
312 | 367 | end) |
313 | 368 | cq:wrap(function() |
369 | if server_sync then server_sync:wait() end | |
314 | 370 | local h = new_headers() |
315 | 371 | h:append(":status", "200") |
316 | 372 | assert(streams["/c"]:write_headers(h, true)) |
320 | 376 | server:close() |
321 | 377 | client:close() |
322 | 378 | end) |
379 | it("modifying pipelined headers doesn't affect what's sent", function() | |
380 | local server, client = new_pair(1.1) | |
381 | local cq = cqueues.new() | |
382 | cq:wrap(function() | |
383 | local a = client:new_stream() | |
384 | local b = client:new_stream() | |
385 | local c = client:new_stream() | |
386 | ||
387 | do | |
388 | local h = new_headers() | |
389 | h:append(":method", "POST") | |
390 | h:append(":scheme", "http") | |
391 | h:append(":authority", "myauthority") | |
392 | h:append(":path", "/") | |
393 | h:upsert("id", "a") | |
394 | assert(a:write_headers(h, false)) | |
395 | cq:wrap(function() | |
396 | cq:wrap(function() | |
397 | cq:wrap(function() | |
398 | assert(a:write_chunk("a", true)) | |
399 | end) | |
400 | h:upsert("id", "c") | |
401 | assert(c:write_headers(h, false)) | |
402 | assert(c:write_chunk("c", true)) | |
403 | end) | |
404 | h:upsert("id", "b") | |
405 | assert(b:write_headers(h, false)) | |
406 | assert(b:write_chunk("b", true)) | |
407 | end) | |
408 | end | |
409 | do | |
410 | local h = assert(a:get_headers()) | |
411 | assert.same("a", h:get "id") | |
412 | end | |
413 | do | |
414 | local h = assert(b:get_headers()) | |
415 | assert.same("b", h:get "id") | |
416 | end | |
417 | do | |
418 | local h = assert(c:get_headers()) | |
419 | assert.same("c", h:get "id") | |
420 | end | |
421 | end) | |
422 | cq:wrap(function() | |
423 | local h = new_headers() | |
424 | h:append(":status", "200") | |
425 | ||
426 | local a = assert(server:get_next_incoming_stream()) | |
427 | assert.same("a", assert(a:get_headers()):get "id") | |
428 | assert.same("a", a:get_body_as_string()) | |
429 | cq:wrap(function() | |
430 | h:upsert("id", "a") | |
431 | assert(a:write_headers(h, true)) | |
432 | end) | |
433 | ||
434 | local b = assert(server:get_next_incoming_stream()) | |
435 | assert.same("b", assert(b:get_headers()):get "id") | |
436 | assert.same("b", b:get_body_as_string()) | |
437 | h:upsert("id", "b") | |
438 | assert(b:write_headers(h, true)) | |
439 | ||
440 | local c = assert(server:get_next_incoming_stream()) | |
441 | assert.same("c", assert(c:get_headers()):get "id") | |
442 | assert.same("c", c:get_body_as_string()) | |
443 | assert(c:get_headers()) | |
444 | h:upsert("id", "c") | |
445 | assert(c:write_headers(h, true)) | |
446 | end) | |
447 | assert_loop(cq, TEST_TIMEOUT) | |
448 | assert.truthy(cq:empty()) | |
449 | server:close() | |
450 | client:close() | |
451 | end) | |
323 | 452 | it("allows 100 continue", function() |
324 | 453 | local server, client = new_pair(1.1) |
325 | 454 | local cq = cqueues.new() |
326 | 455 | cq:wrap(function() |
327 | 456 | local a = client:new_stream() |
328 | 457 | local h = new_headers() |
458 | h:append(":method", "POST") | |
459 | h:append(":scheme", "http") | |
329 | 460 | h:append(":authority", "myauthority") |
330 | h:append(":method", "POST") | |
331 | 461 | h:append(":path", "/a") |
332 | 462 | h:append("expect", "100-continue") |
333 | 463 | assert(a:write_headers(h, false)) |
359 | 489 | cq:wrap(function() |
360 | 490 | local a = client:new_stream() |
361 | 491 | local h = new_headers() |
492 | h:append(":method", "GET") | |
493 | h:append(":scheme", "http") | |
362 | 494 | h:append(":authority", "myauthority") |
363 | h:append(":method", "GET") | |
364 | 495 | h:append(":path", "/") |
365 | 496 | assert(a:write_headers(h, true)) |
366 | 497 | end) |
53 | 53 | test_preface("PRI * HTTP/2.0\r\n\r\nSM\r\n\r") -- missing last \n |
54 | 54 | test_preface(("long string"):rep(1000)) |
55 | 55 | end) |
56 | it("Doesn't busy-loop looking for #preface", function() | |
57 | local s, c = ca.assert(cs.pair()) | |
58 | s = assert(h2_connection.new(s, "server")) | |
59 | assert(s:step(0)) | |
60 | assert.not_same(0, (s:timeout())) | |
61 | c:close() | |
62 | s:close() | |
63 | end) | |
56 | 64 | it("read_http2_frame fails with EILSEQ on corrupt frame", function() |
57 | local spack = string.pack or require "compat53.string".pack | |
65 | local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 | |
58 | 66 | local s, c = ca.assert(cs.pair()) |
59 | 67 | local cq = cqueues.new() |
60 | 68 | cq:wrap(function() |
69 | 77 | end) |
70 | 78 | assert_loop(cq, TEST_TIMEOUT) |
71 | 79 | assert.truthy(cq:empty()) |
80 | end) | |
81 | it("read_http2_frame is cqueues thread-safe", function() | |
82 | local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 | |
83 | local s, c = ca.assert(cs.pair()) | |
84 | c = assert(h2_connection.new(c, "client")) | |
85 | local cq = cqueues.new() | |
86 | cq:wrap(function() | |
87 | local typ, flags, id, payload = assert(c:read_http2_frame()) | |
88 | assert.same(0, typ) | |
89 | assert.same(0, flags) | |
90 | assert.same(0, id) | |
91 | assert.same("ninebytes", payload) | |
92 | end) | |
93 | cq:wrap(function() | |
94 | local frame_header = spack(">I3 B B I4", 9, 0, 0, 0) | |
95 | assert(s:xwrite(frame_header .. "nine", "bn")) | |
96 | cqueues.sleep(0.001) -- let other thread block on reading frame body | |
97 | assert(s:xwrite("chars", "bn")) | |
98 | local typ, flags, id, payload = assert(c:read_http2_frame()) | |
99 | assert.same(0, typ) | |
100 | assert.same(0, flags) | |
101 | assert.same(0, id) | |
102 | assert.same("ninechars", payload) | |
103 | -- send a 2nd frame | |
104 | assert(s:xwrite(frame_header .. "ninebytes", "bn")) | |
105 | s:close() | |
106 | end) | |
107 | assert_loop(cq, TEST_TIMEOUT) | |
108 | assert.truthy(cq:empty()) | |
109 | c:close() | |
72 | 110 | end) |
73 | 111 | it("Can #ping back and forth", function() |
74 | 112 | local s, c = new_pair() |
109 | 147 | assert_loop(cq, TEST_TIMEOUT) |
110 | 148 | assert.truthy(cq:empty()) |
111 | 149 | end) |
112 | it("can send a body", function() | |
113 | local s, c = new_pair() | |
114 | local cq = cqueues.new() | |
115 | cq:wrap(function() | |
116 | local client_stream = c:new_stream() | |
150 | it("streams used out of order", function() | |
151 | local s, c = new_pair() | |
152 | local cq = cqueues.new() | |
153 | cq:wrap(function() | |
154 | local client_stream1 = c:new_stream() | |
155 | local client_stream2 = c:new_stream() | |
117 | 156 | local req_headers = new_headers() |
118 | 157 | req_headers:append(":method", "GET") |
119 | 158 | req_headers:append(":scheme", "http") |
120 | req_headers:append(":path", "/") | |
121 | -- use non-integer timeouts to catch errors with integer vs number | |
122 | assert(client_stream:write_headers(req_headers, false, 1.1)) | |
123 | assert(client_stream:write_chunk("some body", false, 1.1)) | |
124 | assert(client_stream:write_chunk("more body", true, 1.1)) | |
125 | assert(c:close()) | |
126 | end) | |
127 | cq:wrap(function() | |
128 | local stream = assert(s:get_next_incoming_stream()) | |
129 | local body = assert(stream:get_body_as_string(1.1)) | |
130 | assert.same("some bodymore body", body) | |
159 | req_headers:append(":path", "/2") | |
160 | assert(client_stream2:write_headers(req_headers, true)) | |
161 | req_headers:upsert(":path", "/1") | |
162 | assert(client_stream1:write_headers(req_headers, true)) | |
163 | assert(c:close()) | |
164 | end) | |
165 | cq:wrap(function() | |
166 | for i=1, 2 do | |
167 | local stream = assert(s:get_next_incoming_stream()) | |
168 | local headers = assert(stream:get_headers()) | |
169 | assert(string.format("/%d", i), headers:get(":path")) | |
170 | end | |
131 | 171 | assert(s:close()) |
132 | 172 | end) |
133 | 173 | assert_loop(cq, TEST_TIMEOUT) |
148 | 188 | cq:wrap(function() |
149 | 189 | ok = ok + 1 |
150 | 190 | if ok == 2 then cond:signal() end |
151 | assert(c.peer_flow_credits_increase:wait(TEST_TIMEOUT/2), "no connection credits") | |
191 | assert(c.peer_flow_credits_change:wait(TEST_TIMEOUT/2), "no connection credits") | |
152 | 192 | end) |
153 | 193 | cq:wrap(function() |
154 | 194 | ok = ok + 1 |
155 | 195 | if ok == 2 then cond:signal() end |
156 | assert(client_stream.peer_flow_credits_increase:wait(TEST_TIMEOUT/2), "no stream credits") | |
196 | assert(client_stream.peer_flow_credits_change:wait(TEST_TIMEOUT/2), "no stream credits") | |
157 | 197 | end) |
158 | 198 | cond:wait() -- wait for above threads to get scheduled |
159 | 199 | assert(client_stream:write_chunk(("really long string"):rep(1e4), true)) |
251 | 291 | c:close() |
252 | 292 | end) |
253 | 293 | end) |
254 | describe("correct state transitions", function() | |
255 | it("closes a stream when writing headers to a half-closed stream", function() | |
256 | local s, c = new_pair() | |
257 | local cq = cqueues.new() | |
258 | cq:wrap(function() | |
259 | local client_stream = c:new_stream() | |
260 | local req_headers = new_headers() | |
261 | req_headers:append(":method", "GET") | |
262 | req_headers:append(":scheme", "http") | |
263 | req_headers:append(":path", "/") | |
264 | req_headers:append(":authority", "example.com") | |
265 | assert(client_stream:write_headers(req_headers, false)) | |
266 | assert(client_stream:get_headers()) | |
267 | assert(c:close()) | |
268 | end) | |
269 | cq:wrap(function() | |
270 | local stream = assert(s:get_next_incoming_stream()) | |
271 | assert(stream:get_headers()) | |
272 | local res_headers = new_headers() | |
273 | res_headers:append(":status", "200") | |
274 | assert(stream:write_headers(res_headers, true)) | |
275 | assert("closed", stream.state) | |
276 | assert(s:close()) | |
277 | end) | |
278 | assert_loop(cq, TEST_TIMEOUT) | |
279 | assert.truthy(cq:empty()) | |
280 | end) | |
281 | end) | |
282 | describe("push_promise", function() | |
283 | it("permits a simple push promise from server => client", function() | |
284 | local s, c = new_pair() | |
285 | local cq = cqueues.new() | |
286 | cq:wrap(function() | |
287 | local client_stream = c:new_stream() | |
288 | local req_headers = new_headers() | |
289 | req_headers:append(":method", "GET") | |
290 | req_headers:append(":scheme", "http") | |
291 | req_headers:append(":path", "/") | |
292 | req_headers:append(":authority", "example.com") | |
293 | assert(client_stream:write_headers(req_headers, true)) | |
294 | local pushed_stream = assert(c:get_next_incoming_stream()) | |
295 | do | |
296 | local h = assert(pushed_stream:get_headers()) | |
297 | assert.same("GET", h:get(":method")) | |
298 | assert.same("http", h:get(":scheme")) | |
299 | assert.same("/foo", h:get(":path")) | |
300 | assert.same(req_headers:get(":authority"), h:get(":authority")) | |
301 | assert.same(nil, pushed_stream:get_next_chunk()) | |
302 | end | |
303 | assert(c:close()) | |
304 | end) | |
305 | cq:wrap(function() | |
306 | local stream = assert(s:get_next_incoming_stream()) | |
307 | do | |
308 | local h = assert(stream:get_headers()) | |
309 | assert.same("GET", h:get(":method")) | |
310 | assert.same("http", h:get(":scheme")) | |
311 | assert.same("/", h:get(":path")) | |
312 | assert.same("example.com", h:get(":authority")) | |
313 | assert.same(nil, stream:get_next_chunk()) | |
314 | end | |
315 | local pushed_stream do | |
316 | local req_headers = new_headers() | |
317 | req_headers:append(":method", "GET") | |
318 | req_headers:append(":scheme", "http") | |
319 | req_headers:append(":path", "/foo") | |
320 | req_headers:append(":authority", "example.com") | |
321 | pushed_stream = assert(stream:push_promise(req_headers)) | |
322 | end | |
323 | do | |
324 | local req_headers = new_headers() | |
325 | req_headers:append(":status", "200") | |
326 | assert(pushed_stream:write_headers(req_headers, true)) | |
327 | end | |
328 | assert(s:close()) | |
329 | end) | |
330 | assert_loop(cq, TEST_TIMEOUT) | |
331 | assert.truthy(cq:empty()) | |
332 | end) | |
333 | end) | |
334 | 294 | end) |
0 | describe("http.h2_stream", function() | |
1 | local h2_connection = require "http.h2_connection" | |
2 | local h2_error = require "http.h2_error" | |
3 | local new_headers = require "http.headers".new | |
4 | local cqueues = require "cqueues" | |
5 | local ca = require "cqueues.auxlib" | |
6 | local cs = require "cqueues.socket" | |
7 | local function new_pair() | |
8 | local s, c = ca.assert(cs.pair()) | |
9 | s = assert(h2_connection.new(s, "server")) | |
10 | c = assert(h2_connection.new(c, "client")) | |
11 | return s, c | |
12 | end | |
13 | it("rejects header fields with uppercase characters", function() | |
14 | local s, c = new_pair() | |
15 | local client_stream = c:new_stream() | |
16 | local req_headers = new_headers() | |
17 | req_headers:append(":method", "GET") | |
18 | req_headers:append(":scheme", "http") | |
19 | req_headers:append(":path", "/") | |
20 | req_headers:append("Foo", "bar") | |
21 | assert.has.errors(function() | |
22 | client_stream:write_headers(req_headers, false, 0) | |
23 | end) | |
24 | c:close() | |
25 | s:close() | |
26 | end) | |
27 | it("breaks up a large header block into continuation frames", function() | |
28 | local s, c = new_pair() | |
29 | local cq = cqueues.new() | |
30 | local req_headers = new_headers() | |
31 | req_headers:append(":method", "GET") | |
32 | req_headers:append(":scheme", "http") | |
33 | req_headers:append(":path", "/") | |
34 | req_headers:append("unknown", ("a"):rep(16384*3)) -- at least 3 frames worth | |
35 | cq:wrap(function() | |
36 | local client_stream = c:new_stream() | |
37 | assert(client_stream:write_headers(req_headers, true)) | |
38 | assert(c:close()) | |
39 | end) | |
40 | cq:wrap(function() | |
41 | local stream = assert(s:get_next_incoming_stream()) | |
42 | local response_headers = assert(stream:get_headers()) | |
43 | assert.same(req_headers, response_headers) | |
44 | assert(s:close()) | |
45 | end) | |
46 | assert_loop(cq, TEST_TIMEOUT) | |
47 | assert.truthy(cq:empty()) | |
48 | end) | |
49 | it("can send a body", function() | |
50 | local s, c = new_pair() | |
51 | local cq = cqueues.new() | |
52 | cq:wrap(function() | |
53 | local client_stream = c:new_stream() | |
54 | local req_headers = new_headers() | |
55 | req_headers:append(":method", "GET") | |
56 | req_headers:append(":scheme", "http") | |
57 | req_headers:append(":path", "/") | |
58 | -- use non-integer timeouts to catch errors with integer vs number | |
59 | assert(client_stream:write_headers(req_headers, false, 1.1)) | |
60 | assert(client_stream:write_chunk("some body", false, 1.1)) | |
61 | assert(client_stream:write_chunk("more body", true, 1.1)) | |
62 | assert(c:close()) | |
63 | end) | |
64 | cq:wrap(function() | |
65 | local stream = assert(s:get_next_incoming_stream()) | |
66 | local body = assert(stream:get_body_as_string(1.1)) | |
67 | assert.same("some bodymore body", body) | |
68 | assert(s:close()) | |
69 | end) | |
70 | assert_loop(cq, TEST_TIMEOUT) | |
71 | assert.truthy(cq:empty()) | |
72 | end) | |
73 | it("errors if content-length is exceeded", function() | |
74 | local s, c = new_pair() | |
75 | local cq = cqueues.new() | |
76 | cq:wrap(function() | |
77 | local client_stream = c:new_stream() | |
78 | local req_headers = new_headers() | |
79 | req_headers:append(":method", "GET") | |
80 | req_headers:append(":scheme", "http") | |
81 | req_headers:append(":path", "/") | |
82 | req_headers:append("content-length", "2") | |
83 | assert(client_stream:write_headers(req_headers, false)) | |
84 | assert(client_stream:write_chunk("body longer than 2 bytes", true)) | |
85 | end) | |
86 | cq:wrap(function() | |
87 | local stream = assert(s:get_next_incoming_stream()) | |
88 | local ok, err = stream:get_body_as_string() | |
89 | assert.falsy(ok) | |
90 | assert.truthy(h2_error.is(err)) | |
91 | assert.same(h2_error.errors.PROTOCOL_ERROR.code, err.code) | |
92 | assert.same("content-length exceeded", err.message) | |
93 | assert(s:close()) | |
94 | end) | |
95 | assert_loop(cq, TEST_TIMEOUT) | |
96 | assert.truthy(cq:empty()) | |
97 | c:close() | |
98 | end) | |
99 | describe("correct state transitions", function() | |
100 | it("closes a stream when writing headers to a half-closed stream", function() | |
101 | local s, c = new_pair() | |
102 | local cq = cqueues.new() | |
103 | cq:wrap(function() | |
104 | local client_stream = c:new_stream() | |
105 | local req_headers = new_headers() | |
106 | req_headers:append(":method", "GET") | |
107 | req_headers:append(":scheme", "http") | |
108 | req_headers:append(":path", "/") | |
109 | req_headers:append(":authority", "example.com") | |
110 | assert(client_stream:write_headers(req_headers, false)) | |
111 | assert(client_stream:get_headers()) | |
112 | assert(c:close()) | |
113 | end) | |
114 | cq:wrap(function() | |
115 | local stream = assert(s:get_next_incoming_stream()) | |
116 | assert(stream:get_headers()) | |
117 | local res_headers = new_headers() | |
118 | res_headers:append(":status", "200") | |
119 | assert(stream:write_headers(res_headers, true)) | |
120 | assert("closed", stream.state) | |
121 | assert(s:close()) | |
122 | end) | |
123 | assert_loop(cq, TEST_TIMEOUT) | |
124 | assert.truthy(cq:empty()) | |
125 | end) | |
126 | it("ignores delayed RST_STREAM on already closed stream", function() | |
127 | local s, c = new_pair() | |
128 | local cq = cqueues.new() | |
129 | cq:wrap(function() | |
130 | local client_stream = c:new_stream() | |
131 | local req_headers = new_headers() | |
132 | req_headers:append(":method", "GET") | |
133 | req_headers:append(":scheme", "http") | |
134 | req_headers:append(":path", "/") | |
135 | req_headers:append(":authority", "example.com") | |
136 | assert(client_stream:write_headers(req_headers, true)) | |
137 | assert(client_stream:get_headers()) | |
138 | assert("closed", client_stream.state) | |
139 | -- both sides now have stream in closed state | |
140 | -- send server a RST_STREAM: it should get ignored | |
141 | assert(client_stream:rst_stream("post-closed rst_stream")) | |
142 | assert(c:close()) | |
143 | end) | |
144 | cq:wrap(function() | |
145 | local stream = assert(s:get_next_incoming_stream()) | |
146 | assert(stream:get_headers()) | |
147 | local res_headers = new_headers() | |
148 | res_headers:append(":status", "200") | |
149 | assert(stream:write_headers(res_headers, true)) | |
150 | -- both sides now have stream in closed state | |
151 | assert("closed", stream.state) | |
152 | -- process incoming frames until EOF (i.e. drain RST_STREAM) | |
153 | -- the RST_STREAM frame should be ignored. | |
154 | assert(s:loop()) | |
155 | assert(s:close()) | |
156 | end) | |
157 | cq:wrap(function() | |
158 | assert(s:loop()) | |
159 | end) | |
160 | assert_loop(cq, TEST_TIMEOUT) | |
161 | assert.truthy(cq:empty()) | |
162 | end) | |
163 | end) | |
164 | describe("push_promise", function() | |
165 | it("permits a simple push promise from server => client", function() | |
166 | local s, c = new_pair() | |
167 | local cq = cqueues.new() | |
168 | cq:wrap(function() | |
169 | local client_stream = c:new_stream() | |
170 | local req_headers = new_headers() | |
171 | req_headers:append(":method", "GET") | |
172 | req_headers:append(":scheme", "http") | |
173 | req_headers:append(":path", "/") | |
174 | req_headers:append(":authority", "example.com") | |
175 | assert(client_stream:write_headers(req_headers, true)) | |
176 | local pushed_stream = assert(c:get_next_incoming_stream()) | |
177 | do | |
178 | local h = assert(pushed_stream:get_headers()) | |
179 | assert.same("GET", h:get(":method")) | |
180 | assert.same("http", h:get(":scheme")) | |
181 | assert.same("/foo", h:get(":path")) | |
182 | assert.same(req_headers:get(":authority"), h:get(":authority")) | |
183 | assert.same(nil, pushed_stream:get_next_chunk()) | |
184 | end | |
185 | assert(c:close()) | |
186 | end) | |
187 | cq:wrap(function() | |
188 | local stream = assert(s:get_next_incoming_stream()) | |
189 | do | |
190 | local h = assert(stream:get_headers()) | |
191 | assert.same("GET", h:get(":method")) | |
192 | assert.same("http", h:get(":scheme")) | |
193 | assert.same("/", h:get(":path")) | |
194 | assert.same("example.com", h:get(":authority")) | |
195 | assert.same(nil, stream:get_next_chunk()) | |
196 | end | |
197 | local pushed_stream do | |
198 | local req_headers = new_headers() | |
199 | req_headers:append(":method", "GET") | |
200 | req_headers:append(":scheme", "http") | |
201 | req_headers:append(":path", "/foo") | |
202 | req_headers:append(":authority", "example.com") | |
203 | pushed_stream = assert(stream:push_promise(req_headers)) | |
204 | end | |
205 | do | |
206 | local req_headers = new_headers() | |
207 | req_headers:append(":status", "200") | |
208 | assert(pushed_stream:write_headers(req_headers, true)) | |
209 | end | |
210 | assert(s:close()) | |
211 | end) | |
212 | assert_loop(cq, TEST_TIMEOUT) | |
213 | assert.truthy(cq:empty()) | |
214 | end) | |
215 | it("handles large header blocks", function() | |
216 | local s, c = new_pair() | |
217 | local cq = cqueues.new() | |
218 | cq:wrap(function() | |
219 | local client_stream = c:new_stream() | |
220 | local req_headers = new_headers() | |
221 | req_headers:append(":method", "GET") | |
222 | req_headers:append(":scheme", "http") | |
223 | req_headers:append(":path", "/") | |
224 | req_headers:append(":authority", "example.com") | |
225 | assert(client_stream:write_headers(req_headers, true)) | |
226 | local pushed_stream = assert(c:get_next_incoming_stream()) | |
227 | do | |
228 | local h = assert(pushed_stream:get_headers()) | |
229 | assert.same("GET", h:get(":method")) | |
230 | assert.same("http", h:get(":scheme")) | |
231 | assert.same("/foo", h:get(":path")) | |
232 | assert.same(req_headers:get(":authority"), h:get(":authority")) | |
233 | assert.same(nil, pushed_stream:get_next_chunk()) | |
234 | end | |
235 | assert(c:close()) | |
236 | end) | |
237 | cq:wrap(function() | |
238 | local stream = assert(s:get_next_incoming_stream()) | |
239 | do | |
240 | local h = assert(stream:get_headers()) | |
241 | assert.same("GET", h:get(":method")) | |
242 | assert.same("http", h:get(":scheme")) | |
243 | assert.same("/", h:get(":path")) | |
244 | assert.same("example.com", h:get(":authority")) | |
245 | assert.same(nil, stream:get_next_chunk()) | |
246 | end | |
247 | local pushed_stream do | |
248 | local req_headers = new_headers() | |
249 | req_headers:append(":method", "GET") | |
250 | req_headers:append(":scheme", "http") | |
251 | req_headers:append(":path", "/foo") | |
252 | req_headers:append(":authority", "example.com") | |
253 | req_headers:append("unknown", ("a"):rep(16384*3)) -- at least 3 frames worth | |
254 | pushed_stream = assert(stream:push_promise(req_headers)) | |
255 | end | |
256 | do | |
257 | local req_headers = new_headers() | |
258 | req_headers:append(":status", "200") | |
259 | assert(pushed_stream:write_headers(req_headers, true)) | |
260 | end | |
261 | assert(s:close()) | |
262 | end) | |
263 | assert_loop(cq, TEST_TIMEOUT) | |
264 | assert.truthy(cq:empty()) | |
265 | end) | |
266 | end) | |
267 | end) |
18 | 18 | return wrap(self, func, ...) |
19 | 19 | end) |
20 | 20 | end |
21 | ||
22 | -- Allow tests to pick up configured locale | |
23 | local locale = os.getenv("LOCALE") | |
24 | if locale then | |
25 | os.setlocale(locale) | |
26 | if locale ~= os.setlocale(locale) then | |
27 | print("Locale " .. locale .. " is not available.") | |
28 | os.exit(1) -- busted doesn't fail if helper script throws errors: https://github.com/Olivine-Labs/busted/issues/549 | |
29 | end | |
30 | end |
8 | 8 | end) |
9 | 9 | it("can be cloned", function() |
10 | 10 | local s = http_hsts.new_store() |
11 | assert.same(s, s:clone()) | |
11 | do | |
12 | local clone = s:clone() | |
13 | local old_heap = s.expiry_heap | |
14 | s.expiry_heap = nil | |
15 | clone.expiry_heap = nil | |
16 | assert.same(s, clone) | |
17 | s.expiry_heap = old_heap | |
18 | end | |
12 | 19 | assert.truthy(s:store("foo.example.com", { |
13 | 20 | ["max-age"] = "100"; |
14 | 21 | })) |
22 | do | |
23 | local clone = s:clone() | |
24 | local old_heap = s.expiry_heap | |
25 | s.expiry_heap = nil | |
26 | clone.expiry_heap = nil | |
27 | assert.same(s, clone) | |
28 | s.expiry_heap = old_heap | |
29 | end | |
15 | 30 | local clone = s:clone() |
16 | assert.same(s, clone) | |
17 | 31 | assert.truthy(s:check("foo.example.com")) |
18 | 32 | assert.truthy(clone:check("foo.example.com")) |
19 | 33 | end) |
92 | 106 | assert.falsy(s:check("example.com")) |
93 | 107 | assert.truthy(s:check("keep.me")) |
94 | 108 | end) |
109 | it("enforces .max_items", function() | |
110 | local s = http_hsts.new_store() | |
111 | s.max_items = 0 | |
112 | assert.falsy(s:store("example.com", { | |
113 | ["max-age"] = "100"; | |
114 | })) | |
115 | s.max_items = 1 | |
116 | assert.truthy(s:store("example.com", { | |
117 | ["max-age"] = "100"; | |
118 | })) | |
119 | assert.falsy(s:store("other.com", { | |
120 | ["max-age"] = "100"; | |
121 | })) | |
122 | s:remove("example.com", "/", "foo") | |
123 | assert.truthy(s:store("other.com", { | |
124 | ["max-age"] = "100"; | |
125 | })) | |
126 | end) | |
95 | 127 | end) |
628 | 628 | stream:shutdown() |
629 | 629 | end) |
630 | 630 | end) |
631 | it("works with a proxy server with a path component", function() | |
632 | test(function(stream) | |
633 | local h = assert(stream:get_headers()) | |
634 | local _, host, port = stream:localname() | |
635 | local authority = http_util.to_authority(host, port, "http") | |
636 | assert.same(authority, h:get ":authority") | |
637 | assert.same("http://" .. authority .. "/", h:get(":path")) | |
638 | local resp_headers = new_headers() | |
639 | resp_headers:append(":status", "200") | |
640 | assert(stream:write_headers(resp_headers, false)) | |
641 | assert(stream:write_chunk("hello world", true)) | |
642 | end, function(req) | |
643 | req.proxy = { | |
644 | scheme = "http"; | |
645 | host = req.host; | |
646 | port = req.port; | |
647 | path = "/path"; | |
648 | } | |
649 | local headers, stream = assert(req:go()) | |
650 | assert.same("200", headers:get(":status")) | |
651 | assert.same("hello world", assert(stream:get_body_as_string())) | |
652 | stream:shutdown() | |
653 | end) | |
654 | end) | |
631 | 655 | it("works with http proxies on OPTIONS requests", function() |
632 | 656 | test(function(stream) |
633 | 657 | local h = assert(stream:get_headers()) |
708 | 732 | host = req.host; |
709 | 733 | port = req.port; |
710 | 734 | userinfo = "user:pass"; |
735 | } | |
736 | local headers, stream = assert(req:go()) | |
737 | assert.same("200", headers:get(":status")) | |
738 | assert.same("hello world", assert(stream:get_body_as_string())) | |
739 | stream:shutdown() | |
740 | end) | |
741 | end) | |
742 | it("CONNECT proxy with path component", function() | |
743 | test(function(stream, s) | |
744 | local h = assert(stream:get_headers()) | |
745 | local resp_headers = new_headers() | |
746 | resp_headers:append(":status", "200") | |
747 | assert(stream:write_headers(resp_headers, false)) | |
748 | if h:get(":method") == "CONNECT" then | |
749 | assert(stream.connection.version < 2) | |
750 | local sock = assert(stream.connection:take_socket()) | |
751 | s:add_socket(sock) | |
752 | return true | |
753 | else | |
754 | assert(stream:write_chunk("hello world", true)) | |
755 | end | |
756 | end, function(req) | |
757 | req.tls = true | |
758 | req.proxy = { | |
759 | scheme = "http"; | |
760 | host = req.host; | |
761 | port = req.port; | |
762 | path = "/path"; | |
711 | 763 | } |
712 | 764 | local headers, stream = assert(req:go()) |
713 | 765 | assert.same("200", headers:get(":status")) |
804 | 856 | end) |
805 | 857 | cq:wrap(function() -- SOCKS server |
806 | 858 | local sock = socks_server:accept() |
859 | sock:setmode("b", "b") | |
807 | 860 | assert.same("\5", sock:read(1)) |
808 | 861 | local n = assert(sock:read(1)):byte() |
809 | 862 | local available_auth = assert(sock:read(n)) |
0 | -- This file is used for linting .tld files with typedlua | |
1 | ||
2 | require "http.bit" | |
3 | require "http.client" | |
4 | require "http.connection_common" | |
5 | require "http.cookie" | |
6 | require "http.h1_connection" | |
7 | require "http.h1_reason_phrases" | |
8 | require "http.h1_stream" | |
9 | require "http.h2_connection" | |
10 | require "http.h2_error" | |
11 | require "http.h2_stream" | |
12 | require "http.headers" | |
13 | require "http.hpack" | |
14 | require "http.hsts" | |
15 | require "http.proxies" | |
16 | require "http.request" | |
17 | require "http.server" | |
18 | require "http.socks" | |
19 | require "http.stream_common" | |
20 | require "http.tls" | |
21 | require "http.util" | |
22 | require "http.version" | |
23 | require "http.websocket" | |
24 | require "http.zlib" | |
25 | require "http.compat.prosody" | |
26 | require "http.compat.socket" |
0 | 0 | describe("http.server module", function() |
1 | local server = require "http.server" | |
2 | local client = require "http.client" | |
1 | local http_server = require "http.server" | |
2 | local http_client = require "http.client" | |
3 | 3 | local http_tls = require "http.tls" |
4 | local new_headers = require "http.headers".new | |
4 | local http_headers = require "http.headers" | |
5 | 5 | local cqueues = require "cqueues" |
6 | 6 | local ca = require "cqueues.auxlib" |
7 | 7 | local ce = require "cqueues.errno" |
12 | 12 | it("rejects missing 'ctx' field", function() |
13 | 13 | local s, c = ca.assert(cs.pair()) |
14 | 14 | assert.has.errors(function() |
15 | server.new { | |
15 | http_server.new { | |
16 | 16 | socket = s; |
17 | 17 | onstream = error; |
18 | 18 | } |
23 | 23 | it("rejects invalid 'cq' field", function() |
24 | 24 | local s, c = ca.assert(cs.pair()) |
25 | 25 | assert.has.errors(function() |
26 | server.new { | |
26 | http_server.new { | |
27 | 27 | socket = s; |
28 | 28 | tls = false; |
29 | 29 | onstream = error; |
35 | 35 | end) |
36 | 36 | it("__tostring works", function() |
37 | 37 | local s, c = ca.assert(cs.pair()) |
38 | s = server.new { | |
38 | s = http_server.new { | |
39 | 39 | socket = s; |
40 | 40 | tls = false; |
41 | 41 | onstream = error; |
46 | 46 | end) |
47 | 47 | it(":onerror with no arguments doesn't clear", function() |
48 | 48 | local s, c = ca.assert(cs.pair()) |
49 | s = server.new { | |
49 | s = http_server.new { | |
50 | 50 | socket = s; |
51 | 51 | tls = false; |
52 | 52 | onstream = error; |
81 | 81 | s:close() |
82 | 82 | end) |
83 | 83 | options.onstream = onstream |
84 | local s = assert(server.listen(options)) | |
84 | local s = assert(http_server.listen(options)) | |
85 | 85 | assert(s:listen()) |
86 | 86 | cq:wrap(function() |
87 | 87 | assert_loop(s) |
102 | 102 | ctx = non_verifying_tls_context; |
103 | 103 | version = client_version; |
104 | 104 | } |
105 | local conn = assert(client.connect(client_options)) | |
105 | local conn = assert(http_client.connect(client_options)) | |
106 | 106 | local stream = conn:new_stream() |
107 | local headers = new_headers() | |
107 | local headers = http_headers.new() | |
108 | 108 | headers:append(":authority", "myauthority") |
109 | 109 | headers:append(":method", "GET") |
110 | 110 | headers:append(":path", "/") |
178 | 178 | end) |
179 | 179 | it("taking socket from underlying connection is handled well by server", function() |
180 | 180 | local cq = cqueues.new() |
181 | local onstream = spy.new(function(s, stream) | |
181 | local onstream = spy.new(function(server, stream) | |
182 | 182 | local sock = stream.connection:take_socket() |
183 | s:close() | |
183 | server:close() | |
184 | 184 | assert.same("test", sock:read("*a")) |
185 | 185 | sock:close() |
186 | 186 | end); |
187 | local s = assert(server.listen { | |
188 | host = "localhost"; | |
189 | port = 0; | |
187 | local server = assert(http_server.new { | |
188 | tls = false; | |
190 | 189 | onstream = onstream; |
191 | 190 | }) |
192 | assert(s:listen()) | |
193 | local _, host, port = s:localname() | |
194 | cq:wrap(function() | |
195 | assert_loop(s) | |
196 | end) | |
197 | cq:wrap(function() | |
198 | local sock = cs.connect { | |
199 | host = host; | |
200 | port = port; | |
201 | } | |
202 | assert(sock:write("test")) | |
203 | assert(sock:flush()) | |
204 | sock:close() | |
191 | local s, c = ca.assert(cs.pair()) | |
192 | server:add_socket(s) | |
193 | cq:wrap(function() | |
194 | assert_loop(server) | |
195 | end) | |
196 | cq:wrap(function() | |
197 | assert(c:write("test")) | |
198 | assert(c:flush()) | |
199 | c:close() | |
205 | 200 | end) |
206 | 201 | assert_loop(cq, TEST_TIMEOUT) |
207 | 202 | assert.truthy(cq:empty()) |
208 | 203 | assert.spy(onstream).was.called() |
209 | 204 | end) |
210 | 205 | it("an idle http2 stream doesn't block the server", function() |
211 | local s = assert(server.listen { | |
212 | host = "localhost"; | |
213 | port = 0; | |
206 | local server = assert(http_server.new { | |
207 | tls = false; | |
208 | version = 2; | |
214 | 209 | onstream = function(_, stream) |
215 | 210 | if stream.id == 1 then |
216 | 211 | stream:get_next_chunk() |
217 | 212 | else |
218 | 213 | assert.same(3, stream.id) |
219 | 214 | assert.same({}, {stream:get_next_chunk()}) |
220 | local headers = new_headers() | |
215 | local headers = http_headers.new() | |
221 | 216 | headers:append(":status", "200") |
222 | 217 | assert(stream:write_headers(headers, true)) |
223 | 218 | end |
224 | 219 | end; |
225 | 220 | }) |
226 | assert(s:listen()) | |
227 | local client_family, client_host, client_port = s:localname() | |
228 | local conn = assert(client.connect({ | |
229 | family = client_family; | |
230 | host = client_host; | |
231 | port = client_port; | |
232 | version = 2; | |
233 | })) | |
234 | local cq = cqueues.new() | |
235 | cq:wrap(function() | |
236 | assert_loop(s) | |
237 | end) | |
238 | cq:wrap(function() | |
239 | local headers = new_headers() | |
221 | local s, c = ca.assert(cs.pair()) | |
222 | server:add_socket(s) | |
223 | local cq = cqueues.new() | |
224 | cq:wrap(function() | |
225 | assert_loop(server) | |
226 | end) | |
227 | cq:wrap(function() | |
228 | local conn = assert(http_client.negotiate(c, { | |
229 | version = 2; | |
230 | })) | |
231 | local headers = http_headers.new() | |
240 | 232 | headers:append(":authority", "myauthority") |
241 | 233 | headers:append(":method", "GET") |
242 | 234 | headers:append(":path", "/") |
247 | 239 | assert(stream2:write_headers(headers, true)) |
248 | 240 | assert(stream2:get_headers()) |
249 | 241 | conn:close() |
250 | s:close() | |
242 | server:close() | |
243 | end) | |
244 | assert_loop(cq, TEST_TIMEOUT) | |
245 | assert.truthy(cq:empty()) | |
246 | end) | |
247 | it("times out clients if intra_stream_timeout is exceeded", function() | |
248 | local server = assert(http_server.new { | |
249 | tls = false; | |
250 | onstream = function(_, stream) | |
251 | assert(stream:get_headers()) | |
252 | local headers = http_headers.new() | |
253 | headers:append(":status", "200") | |
254 | assert(stream:write_headers(headers, true)) | |
255 | end; | |
256 | intra_stream_timeout = 0.1; | |
257 | }) | |
258 | local s, c = ca.assert(cs.pair()) | |
259 | server:add_socket(s) | |
260 | local cq = cqueues.new() | |
261 | cq:wrap(function() | |
262 | assert_loop(server) | |
263 | end) | |
264 | cq:wrap(function() | |
265 | local conn = assert(http_client.negotiate(c, { | |
266 | version = 1.1; | |
267 | })) | |
268 | local headers = http_headers.new() | |
269 | headers:append(":method", "GET") | |
270 | headers:append(":scheme", "http") | |
271 | headers:append(":path", "/") | |
272 | headers:append(":authority", "foo") | |
273 | -- Normal request | |
274 | local stream1 = conn:new_stream() | |
275 | assert(stream1:write_headers(headers, true)) | |
276 | assert(stream1:get_headers()) | |
277 | -- Wait for less than intra_stream_timeout: should work as normal | |
278 | cqueues.sleep(0.05) | |
279 | local stream2 = conn:new_stream() | |
280 | assert(stream2:write_headers(headers, true)) | |
281 | assert(stream2:get_headers()) | |
282 | -- Wait for more then intra_stream_timeout: server should have closed connection | |
283 | cqueues.sleep(0.2) | |
284 | local stream3 = conn:new_stream() | |
285 | assert.same(ce.EPIPE, select(3, stream3:write_headers(headers, true))) | |
251 | 286 | end) |
252 | 287 | assert_loop(cq, TEST_TIMEOUT) |
253 | 288 | assert.truthy(cq:empty()) |
254 | 289 | end) |
255 | 290 | it("allows pausing+resuming the server", function() |
256 | local s = assert(server.listen { | |
291 | local s = assert(http_server.listen { | |
257 | 292 | host = "localhost"; |
258 | 293 | port = 0; |
259 | 294 | onstream = function(_, stream) |
260 | 295 | assert(stream:get_headers()) |
261 | local headers = new_headers() | |
296 | local headers = http_headers.new() | |
262 | 297 | headers:append(":status", "200") |
263 | 298 | assert(stream:write_headers(headers, true)) |
264 | 299 | end; |
270 | 305 | host = client_host; |
271 | 306 | port = client_port; |
272 | 307 | } |
273 | local headers = new_headers() | |
308 | local headers = http_headers.new() | |
274 | 309 | headers:append(":authority", "myauthority") |
275 | 310 | headers:append(":method", "GET") |
276 | 311 | headers:append(":path", "/") |
281 | 316 | assert_loop(s) |
282 | 317 | end) |
283 | 318 | local function do_req(timeout) |
284 | local conn = assert(client.connect(client_options)) | |
319 | local conn = assert(http_client.connect(client_options)) | |
285 | 320 | local stream = assert(conn:new_stream()) |
286 | 321 | assert(stream:write_headers(headers, true)) |
287 | 322 | local ok, err, errno = stream:get_headers(timeout) |
302 | 337 | assert_loop(cq, TEST_TIMEOUT) |
303 | 338 | assert.truthy(cq:empty()) |
304 | 339 | end) |
340 | it("shouldn't throw an error calling :listen() after :close()", function() | |
341 | local s = assert(http_server.listen { | |
342 | host = "localhost"; | |
343 | port = 0; | |
344 | onstream = function() end; | |
345 | }) | |
346 | s:close() | |
347 | s:listen() | |
348 | end) | |
349 | it("shouldn't throw an error calling :localname() after :close()", function() | |
350 | local s = assert(http_server.listen { | |
351 | host = "localhost"; | |
352 | port = 0; | |
353 | onstream = function() end; | |
354 | }) | |
355 | s:close() | |
356 | s:localname() | |
357 | end) | |
305 | 358 | end) |
9 | 9 | c = h1_connection.new(c, "client", version) |
10 | 10 | return s, c |
11 | 11 | end |
12 | local function new_request_headers() | |
13 | local headers = new_headers() | |
14 | headers:append(":method", "GET") | |
15 | headers:append(":scheme", "http") | |
16 | headers:append(":authority", "myauthority") | |
17 | headers:append(":path", "/") | |
18 | return headers | |
19 | end | |
12 | 20 | it("Can read a number of characters", function() |
13 | 21 | local server, client = new_pair(1.1) |
14 | 22 | local cq = cqueues.new() |
15 | 23 | cq:wrap(function() |
16 | 24 | local stream = client:new_stream() |
17 | local headers = new_headers() | |
18 | headers:append(":authority", "myauthority") | |
19 | headers:append(":method", "GET") | |
20 | headers:append(":path", "/") | |
21 | assert(stream:write_headers(headers, false)) | |
25 | assert(stream:write_headers(new_request_headers(), false)) | |
22 | 26 | assert(stream:write_chunk("foo", false)) |
23 | 27 | assert(stream:write_chunk("\nb", false)) |
24 | 28 | assert(stream:write_chunk("ar\n", true)) |
46 | 50 | local cq = cqueues.new() |
47 | 51 | cq:wrap(function() |
48 | 52 | local stream = client:new_stream() |
49 | local headers = new_headers() | |
50 | headers:append(":authority", "myauthority") | |
51 | headers:append(":method", "GET") | |
52 | headers:append(":path", "/") | |
53 | assert(stream:write_headers(headers, false)) | |
53 | assert(stream:write_headers(new_request_headers(), false)) | |
54 | 54 | assert(stream:write_chunk("foo", false)) |
55 | 55 | assert(stream:write_chunk("\nb", false)) |
56 | 56 | assert(stream:write_chunk("ar\n", true)) |
71 | 71 | local cq = cqueues.new() |
72 | 72 | cq:wrap(function() |
73 | 73 | local stream = client:new_stream() |
74 | local headers = new_headers() | |
75 | headers:append(":authority", "myauthority") | |
76 | headers:append(":method", "GET") | |
77 | headers:append(":path", "/") | |
78 | assert(stream:write_headers(headers, false)) | |
74 | assert(stream:write_headers(new_request_headers(), false)) | |
79 | 75 | assert(stream:write_chunk("hello world!", true)) |
80 | 76 | end) |
81 | 77 | cq:wrap(function() |
88 | 84 | client:close() |
89 | 85 | server:close() |
90 | 86 | end) |
91 | it("can write body from temporary file", function() | |
92 | local server, client = new_pair(1.1) | |
93 | local cq = cqueues.new() | |
94 | cq:wrap(function() | |
95 | local file = io.tmpfile() | |
96 | assert(file:write("hello world!")) | |
97 | assert(file:seek("set")) | |
98 | local stream = client:new_stream() | |
99 | local headers = new_headers() | |
100 | headers:append(":authority", "myauthority") | |
101 | headers:append(":method", "GET") | |
102 | headers:append(":path", "/") | |
103 | assert(stream:write_headers(headers, false)) | |
104 | assert(stream:write_body_from_file(file)) | |
105 | end) | |
106 | cq:wrap(function() | |
107 | local stream = assert(server:get_next_incoming_stream()) | |
108 | assert.same("hello world!", assert(stream:get_body_as_string())) | |
109 | end) | |
110 | assert_loop(cq, TEST_TIMEOUT) | |
111 | assert.truthy(cq:empty()) | |
112 | client:close() | |
113 | server:close() | |
87 | describe("write_body_from_file", function() | |
88 | it("works with a temporary file", function() | |
89 | local server, client = new_pair(1.1) | |
90 | local cq = cqueues.new() | |
91 | cq:wrap(function() | |
92 | local file = io.tmpfile() | |
93 | assert(file:write("hello world!")) | |
94 | assert(file:seek("set")) | |
95 | local stream = client:new_stream() | |
96 | assert(stream:write_headers(new_request_headers(), false)) | |
97 | assert(stream:write_body_from_file(file)) | |
98 | end) | |
99 | cq:wrap(function() | |
100 | local stream = assert(server:get_next_incoming_stream()) | |
101 | assert.same("hello world!", assert(stream:get_body_as_string())) | |
102 | end) | |
103 | assert_loop(cq, TEST_TIMEOUT) | |
104 | assert.truthy(cq:empty()) | |
105 | client:close() | |
106 | server:close() | |
107 | end) | |
108 | it("works using the options form", function() | |
109 | local server, client = new_pair(1.1) | |
110 | local cq = cqueues.new() | |
111 | cq:wrap(function() | |
112 | local file = io.tmpfile() | |
113 | assert(file:write("hello world!")) | |
114 | assert(file:seek("set")) | |
115 | local stream = client:new_stream() | |
116 | assert(stream:write_headers(new_request_headers(), false)) | |
117 | assert(stream:write_body_from_file({ | |
118 | file = file; | |
119 | })) | |
120 | end) | |
121 | cq:wrap(function() | |
122 | local stream = assert(server:get_next_incoming_stream()) | |
123 | assert.same("hello world!", assert(stream:get_body_as_string())) | |
124 | end) | |
125 | assert_loop(cq, TEST_TIMEOUT) | |
126 | assert.truthy(cq:empty()) | |
127 | client:close() | |
128 | server:close() | |
129 | end) | |
130 | it("validates .count option", function() | |
131 | local server, client = new_pair(1.1) | |
132 | local cq = cqueues.new() | |
133 | cq:wrap(function() | |
134 | local stream = client:new_stream() | |
135 | assert(stream:write_headers(new_request_headers(), false)) | |
136 | assert.has_error(function() | |
137 | stream:write_body_from_file({ | |
138 | file = io.tmpfile(); | |
139 | count = "invalid count field"; | |
140 | }) | |
141 | end) | |
142 | end) | |
143 | cq:wrap(function() | |
144 | assert(server:get_next_incoming_stream()) | |
145 | end) | |
146 | assert_loop(cq, TEST_TIMEOUT) | |
147 | assert.truthy(cq:empty()) | |
148 | client:close() | |
149 | server:close() | |
150 | end) | |
151 | it("limits number of bytes when using .count option", function() | |
152 | local server, client = new_pair(1.1) | |
153 | local cq = cqueues.new() | |
154 | cq:wrap(function() | |
155 | local file = io.tmpfile() | |
156 | assert(file:write("hello world!")) | |
157 | assert(file:seek("set")) | |
158 | local stream = client:new_stream() | |
159 | assert(stream:write_headers(new_request_headers(), false)) | |
160 | assert(stream:write_body_from_file({ | |
161 | file = file; | |
162 | count = 5; | |
163 | })) | |
164 | end) | |
165 | cq:wrap(function() | |
166 | local stream = assert(server:get_next_incoming_stream()) | |
167 | assert.same("hello", assert(stream:get_body_as_string())) | |
168 | end) | |
169 | assert_loop(cq, TEST_TIMEOUT) | |
170 | assert.truthy(cq:empty()) | |
171 | client:close() | |
172 | server:close() | |
173 | end) | |
174 | it("reports an error on early EOF", function() | |
175 | local server, client = new_pair(1.1) | |
176 | local cq = cqueues.new() | |
177 | cq:wrap(function() | |
178 | local file = io.tmpfile() | |
179 | assert(file:write("hello world!")) | |
180 | assert(file:seek("set")) | |
181 | local stream = client:new_stream() | |
182 | assert(stream:write_headers(new_request_headers(), false)) | |
183 | assert.has_error(function() | |
184 | assert(stream:write_body_from_file({ | |
185 | file = file; | |
186 | count = 50; -- longer than the file | |
187 | })) | |
188 | end) | |
189 | end) | |
190 | cq:wrap(function() | |
191 | assert(server:get_next_incoming_stream()) | |
192 | end) | |
193 | assert_loop(cq, TEST_TIMEOUT) | |
194 | assert.truthy(cq:empty()) | |
195 | client:close() | |
196 | server:close() | |
197 | end) | |
114 | 198 | end) |
115 | 199 | end) |
0 | 0 | describe("http.util module", function() |
1 | local unpack = table.unpack or unpack -- luacheck: ignore 113 | |
1 | local unpack = table.unpack or unpack -- luacheck: ignore 113 143 | |
2 | 2 | local util = require "http.util" |
3 | 3 | it("decodeURI works", function() |
4 | 4 | assert.same("Encoded string", util.decodeURI("Encoded%20string")) |
54 | 54 | end |
55 | 55 | assert.same(t, r) |
56 | 56 | end |
57 | end) | |
58 | it("is_safe_method works", function() | |
59 | assert.same(true, util.is_safe_method "GET") | |
60 | assert.same(true, util.is_safe_method "HEAD") | |
61 | assert.same(true, util.is_safe_method "OPTIONS") | |
62 | assert.same(true, util.is_safe_method "TRACE") | |
63 | assert.same(false, util.is_safe_method "POST") | |
64 | assert.same(false, util.is_safe_method "PUT") | |
65 | end) | |
66 | it("is_ip works", function() | |
67 | assert.same(true, util.is_ip "127.0.0.1") | |
68 | assert.same(true, util.is_ip "192.168.1.1") | |
69 | assert.same(true, util.is_ip "::") | |
70 | assert.same(true, util.is_ip "::1") | |
71 | assert.same(true, util.is_ip "2001:0db8:85a3:0042:1000:8a2e:0370:7334") | |
72 | assert.same(true, util.is_ip "::FFFF:204.152.189.116") | |
73 | assert.same(false, util.is_ip "not an ip") | |
74 | assert.same(false, util.is_ip "0x80") | |
75 | assert.same(false, util.is_ip "::FFFF:0.0.0") | |
57 | 76 | end) |
58 | 77 | it("split_authority works", function() |
59 | 78 | assert.same({"example.com", 80}, {util.split_authority("example.com", "http")}) |
91 | 91 | end |
92 | 92 | local correct_headers = http_headers.new() |
93 | 93 | correct_headers:append(":method", "GET") |
94 | correct_headers:append(":scheme", "http") | |
94 | 95 | correct_headers:append(":authority", "example.com") |
95 | 96 | correct_headers:append(":path", "/") |
96 | 97 | correct_headers:append("upgrade", "websocket") |
168 | 169 | end) |
169 | 170 | end) |
170 | 171 | describe("http.websocket module two sided tests", function() |
172 | local onerror = require "http.connection_common".onerror | |
171 | 173 | local server = require "http.server" |
172 | 174 | local util = require "http.util" |
173 | 175 | local websocket = require "http.websocket" |
174 | 176 | local cqueues = require "cqueues" |
175 | 177 | local ca = require "cqueues.auxlib" |
178 | local ce = require "cqueues.errno" | |
176 | 179 | local cs = require "cqueues.socket" |
177 | 180 | local function new_pair() |
178 | 181 | local s, c = ca.assert(cs.pair()) |
182 | s:onerror(onerror) | |
183 | c:onerror(onerror) | |
179 | 184 | local ws_server = websocket.new("server") |
180 | 185 | ws_server.socket = s |
181 | 186 | ws_server.readyState = 1 |
187 | 192 | it("works with a socketpair", function() |
188 | 193 | local cq = cqueues.new() |
189 | 194 | local c, s = new_pair() |
195 | cq:wrap(function() | |
196 | assert(c:send("hello")) | |
197 | assert.same("world", c:receive()) | |
198 | assert(c:close()) | |
199 | end) | |
200 | cq:wrap(function() | |
201 | assert.same("hello", s:receive()) | |
202 | assert(s:send("world")) | |
203 | assert(s:close()) | |
204 | end) | |
205 | assert_loop(cq, TEST_TIMEOUT) | |
206 | assert.truthy(cq:empty()) | |
207 | end) | |
208 | it("timeouts return nil, err, errno", function() | |
209 | local cq = cqueues.new() | |
210 | local c, s = new_pair() | |
211 | local ok, _, errno = c:receive(0) | |
212 | assert.same(nil, ok) | |
213 | assert.same(ce.ETIMEDOUT, errno) | |
214 | -- Check it still works afterwards | |
190 | 215 | cq:wrap(function() |
191 | 216 | assert(c:send("hello")) |
192 | 217 | assert.same("world", c:receive()) |
350 | 375 | port = 0; |
351 | 376 | onstream = function(s, stream) |
352 | 377 | local headers = assert(stream:get_headers()) |
378 | assert.same("http", headers:get(":scheme")) | |
353 | 379 | local ws = websocket.new_from_stream(stream, headers) |
354 | 380 | assert(ws:accept()) |
355 | 381 | assert(ws:close()) |