diff --git a/.gitignore b/.gitignore
index 950811f..9f43d73 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,3 @@
 /luacov.report.out
 /luacov.stats.out
+/*.rock
diff --git a/.luacheckrc b/.luacheckrc
index e19f128..37fc879 100644
--- a/.luacheckrc
+++ b/.luacheckrc
@@ -6,3 +6,4 @@ files["spec"] = {
 		"assert_loop";
 	};
 }
+max_line_length = false
diff --git a/.travis.yml b/.travis.yml
index 4c70ad2..4e65d6d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -14,6 +14,7 @@ env:
     - LUA="lua 5.3"    ZLIB=lzlib
     - LUA="lua 5.3"    ZLIB=lua-zlib
     - LUA="lua 5.3"    COMPAT53=no
+    - LUA="lua 5.3"    LUA_CFLAGS="-DLUA_INT_TYPE=LUA_INT_INT"
     - LUA="luajit @"
     - LUA="luajit @"   ZLIB=lzlib
     - LUA="luajit @"   ZLIB=lua-zlib
@@ -30,12 +31,11 @@ branches:
 
 before_install:
   - pip install hererocks
-  - hererocks ~/hererocks -r^ --$LUA # Install latest LuaRocks version
-                              # plus the Lua version for this build job
-                              # into 'here' subdirectory
-  - export PATH=$PATH:~/hererocks/bin # Add directory with all installed binaries to PATH
-  - eval `luarocks path --bin`
+  - hererocks ~/hererocks -r^ --$LUA --cflags=$LUA_CFLAGS
+  - export PATH=$PATH:~/hererocks/bin
+  - eval $(luarocks path --bin)
   - luarocks install luacheck
+  - luarocks install https://raw.githubusercontent.com/andremm/typedlua/master/typedlua-scm-1.rockspec
   - luarocks install luacov-coveralls
   - luarocks install busted
 
@@ -47,6 +47,7 @@ install:
 
 script:
   - luacheck .
+  - tlc -o /dev/null spec/require-all.lua
   - busted -c
 
 after_success:
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..a0f2de4
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+Hello and thank-you for considering contributing to lua-http!
+
+If you haven't already, see the [getting started](https://github.com/daurnimator/lua-http#getting-started) section of the main readme.
+
+# Contributing
+
+To submit your code for inclusion, please [send a "pull request" using github](https://github.com/daurnimator/lua-http/pulls).
+For a speedy approval, please:
+
+  - Follow the [coding style](#coding-style)
+  - Run [`luacheck`](https://github.com/mpeterv/luacheck) to lint your code
+  - Include [tests](#tests)
+	  - Bug fixes should add a test exhibiting the issue
+	  - Enhancements must add tests for the new feature
+  - [Sign off](#dco) your code
+
+
+If you are requested by a project maintainer to fix an issue with your pull request, please edit your existing commits (using e.g. `git commit --amend` or [`git fixup`](https://github.com/hashbang/dotfiles/blob/master/git/.local/bin/git-fixup)) rather than pushing new commits on top of the old ones.
+
+All commits *should* have the project in an operational state.
+
+
+# Coding Style
+
+When editing an existing file, please follow the coding style used in that file.
+If not clear from context or if you're starting a new file:
+
+  - Indent with tabs
+  - Alignment should not be done; when unavoidable, align with spaces
+  - Remove any trailing whitespace (unless whitespace is significant as it can be in e.g. markdown)
+  - Things (e.g. table fields) should be ordered by:
+	 1. Required vs optional
+	 2. Importance
+	 3. Lexographically (alphabetically)
+
+
+## Lua conventions
+
+  - Add a `__name` field to metatables
+  - Use a separate table than the metatable itself for `__index`
+  - Single-line table definitions should use commas (`,`) for delimiting elements
+  - Multi-line table definitions should use semicolons (`;`) for delimiting elements
+
+
+## Markdown conventions
+
+  - Files should have two blank lines at the end of a section
+  - Repository information files (e.g. README.md/CONTRIBUTING.md) should use github compatible markdown features
+  - Files used to generate documentation can use any `pandoc` features they want
+
+
+# Tests
+
+The project has a test suite using the [`busted`](https://github.com/Olivine-Labs/busted) framework.
+Coverage is measured using [`luacov`](https://github.com/keplerproject/luacov).
+
+Tests can be found in the `spec/` directory at the root of the repository. Each source file should have its own file full of tests.
+
+Tests should avoid running any external processes. Use `cqueues` to start up various test servers and clients in-process.
+
+A successful test should close any file handles and sockets to avoid resource exhaustion.
+
+
+# Legal
+
+All code in the repository is covered by `LICENSE.md`.
+
+## DCO
+
+A git `Signed-off-by` statement in a commit message in this repository refers to the [Developer Certificate of Origin](https://developercertificate.org/) (DCO).
+By signing off your commit you are making a legal statement that the work is contributed under the license of this project.
+You can add the statement to your commit by passing `-s` to `git commit`
+
+
+# Security
+
+If you find a security vulnerabilities in the project and do not wish to file it publically on the [issue tracker](https://github.com/daurnimator/lua-http/issues) then you may email [lua-http-security@daurnimator.com](mailto:lua-http-security@daurnimator.com). You may encrypt your mail using PGP to the key with fingerprint [954A3772D62EF90E4B31FBC6C91A9911192C187A](https://daurnimator.com/post/109075829529/gpg-key).
diff --git a/LICENSE.md b/LICENSE.md
index 0adb5f6..5db52c0 100644
--- a/LICENSE.md
+++ b/LICENSE.md
@@ -1,6 +1,6 @@
 The MIT License (MIT)
 
-Copyright (c) 2015-2016 Daurnimator
+Copyright (c) 2015-2019 Daurnimator
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
diff --git a/NEWS b/NEWS
index 78cb496..d8e4129 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,27 @@
+0.3 - 2019-02-13
+
+  - Fix incorrect Sec-WebSocket-Protocol negotiation
+  - Fix incorrect timeout handling in `websocket:receive()`
+  - Add workaround to allow being required in openresty (#98)
+  - Add http.tls.old_cipher_list (#112)
+  - Add http.cookie module (#117)
+  - Improvements to http.hsts module (#119)
+  - Add `options` argument form to `stream:write_body_from_file()` (#125)
+
+
+0.2 - 2017-05-28
+
+  - Remove broken http.server `.client_timeout` option (replaced with `.connection_setup_timeout`)
+  - Fix http1 pipelining locks
+  - Miscellaneous http2 fixes
+  - HTTP 2 streams no longer have to be used in order of creation
+  - No longer raise decode errors in hpack module
+  - Fix `hpack:lookup_index()` to treat static entries without values as empty string
+  - Fix HTTP 1 client in locales with non-"." decimal separator
+  - Add h1_stream.max_header_lines property to prevent infinite list of headers
+  - New '.bind' option for requests and http.client module
+
+
 0.1 - 2016-12-17
 
   - Support for HTTP versions 1, 1.1 and 2
diff --git a/README.md b/README.md
index 3180c1a..dbf9883 100644
--- a/README.md
+++ b/README.md
@@ -5,6 +5,7 @@
   - Optionally asynchronous (including DNS lookups and TLS)
   - Supports HTTP(S) version 1.0, 1.1 and 2
   - Functionality for both client and server
+  - Cookie Management
   - Websockets
   - Compatible with Lua 5.1, 5.2, 5.3 and [LuaJIT](http://luajit.org/)
 
@@ -18,6 +19,7 @@ Can be found at [https://daurnimator.github.io/lua-http/](https://daurnimator.gi
 
 [![Build Status](https://travis-ci.org/daurnimator/lua-http.svg)](https://travis-ci.org/daurnimator/lua-http)
 [![Coverage Status](https://coveralls.io/repos/daurnimator/lua-http/badge.svg?branch=master&service=github)](https://coveralls.io/github/daurnimator/lua-http?branch=master)
+[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/108/badge)](https://bestpractices.coreinfrastructure.org/projects/108)
 
 
 # Installation
@@ -29,17 +31,22 @@ This will automatically install run-time lua dependencies for you.
 
 ## Dependencies
 
-  - [cqueues](http://25thandclement.com/~william/projects/cqueues.html) >= 20161214
+  - [cqueues](http://25thandclement.com/~william/projects/cqueues.html) >= 20161214 (Note: cqueues currently doesn't support Microsoft Windows operating systems)
   - [luaossl](http://25thandclement.com/~william/projects/luaossl.html) >= 20161208
   - [basexx](https://github.com/aiq/basexx/) >= 0.2.0
   - [lpeg](http://www.inf.puc-rio.br/~roberto/lpeg/lpeg.html)
-  - [lpeg_patterns](https://github.com/daurnimator/lpeg_patterns) >= 0.3
+  - [lpeg_patterns](https://github.com/daurnimator/lpeg_patterns) >= 0.5
+  - [binaryheap](https://github.com/Tieske/binaryheap.lua) >= 0.3
   - [fifo](https://github.com/daurnimator/fifo.lua)
 
 To use gzip compression you need **one** of:
 
   - [lzlib](https://github.com/LuaDist/lzlib) or [lua-zlib](https://github.com/brimworks/lua-zlib)
 
+To check cookies against a public suffix list:
+
+  - [lua-psl](https://github.com/daurnimator/lua-psl)
+
 If using lua < 5.3 you will need
 
   - [compat-5.3](https://github.com/keplerproject/lua-compat-5.3) >= 0.3
diff --git a/debian/changelog b/debian/changelog
index b9a316b..3432eae 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+lua-http (0.1+git20190916.47225d0-1) UNRELEASED; urgency=medium
+
+  * New upstream snapshot.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Sun, 24 Nov 2019 23:04:41 +0000
+
 lua-http (0.1-2) unstable; urgency=medium
 
   * New lua-http breaks knot-resolver-module-http and not knot-resolver
diff --git a/doc/Makefile b/doc/Makefile
index 5faff7a..e65f9f3 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -5,6 +5,7 @@ INTERFACES = \
 MODULES = \
 	http.bit.md \
 	http.client.md \
+	http.cookie.md \
 	http.h1_connection.md \
 	http.h1_reason_phrases.md \
 	http.h1_stream.md \
diff --git a/doc/interfaces.md b/doc/interfaces.md
index 8147692..8452ab9 100644
--- a/doc/interfaces.md
+++ b/doc/interfaces.md
@@ -1,3 +1,5 @@
 # Interfaces
 
+lua-http has separate modules for HTTP 1 vs HTTP 2 protocols, yet the different versions share many common concepts. lua-http provides a common interface for operations that make sense for both protocol versions (as well as any future developments).
+
 The following sections outline the interfaces exposed by the lua-http library.
diff --git a/doc/interfaces/connection.md b/doc/interfaces/connection.md
index 0c14618..433a34b 100644
--- a/doc/interfaces/connection.md
+++ b/doc/interfaces/connection.md
@@ -1,14 +1,15 @@
 ## connection
 
-lua-http has separate libraries for both HTTP 1 and HTTP 2 type communications. Future protocols will also be supported and exposed as new modules. As HTTP 1 and 2 share common concepts at the connection and stream level, the _[connection](#connection)_ and _[stream](#stream)_ modules have been written to contain common interfaces wherever possible. All _[connection](#connection)_ types expose the following fields:
+A connection encapsulates a socket and provides protocol specific operations. A connection may have [*streams*](#stream) which encapsulate the requests/responses happening over a conenction. Alternatively, you can ignore streams entirely and use low level protocol specific operations to read and write to the socket.
 
+All *connection* types expose the following fields:
 
 ### `connection.type` <!-- --> {#connection.type}
 
 The mode of use for the connection object. Valid values are:
 
-  - `"client"` - Connects to a remote URI
-  - `"server"` - Listens for connection on a local URI
+  - `"client"`: Acts as a client; this connection type is used by entities who want to make requests
+  - `"server"`: Acts as a server; this conenction type is used by entities who want to respond to requests
 
 
 ### `connection.version` <!-- --> {#connection.version}
@@ -16,6 +17,15 @@ The mode of use for the connection object. Valid values are:
 The HTTP version number of the connection as a number.
 
 
+### `connection:pollfd()` <!-- --> {#connection:pollfd}
+
+
+### `connection:events()` <!-- --> {#connection:events}
+
+
+### `connection:timeout()` <!-- --> {#connection:timeout}
+
+
 ### `connection:connect(timeout)` <!-- --> {#connection:connect}
 
 Completes the connection to the remote server using the address specified, HTTP version and any options specified in the `connection.new` constructor. The `connect` function will yield until the connection attempt finishes (success or failure) or until `timeout` is exceeded. Connecting may include DNS lookups, TLS negotiation and HTTP2 settings exchange. Returns `true` on success. On error, returns `nil`, an error message and an error number.
@@ -40,7 +50,7 @@ Returns the connection information for the socket *peer* (as in, the next hop).
 
 ### `connection:flush(timeout)` <!-- --> {#connection:flush}
 
-Flushes all buffered outgoing data on the socket. Returns `true` on success. Returns `false` and the error if the socket fails to flush.
+Flushes buffered outgoing data on the socket to the operating system. Returns `true` on success. On error, returns `nil`, an error message and an error number.
 
 
 ### `connection:shutdown()` <!-- --> {#connection:shutdown}
diff --git a/doc/interfaces/stream.md b/doc/interfaces/stream.md
index f8f2e5b..348025d 100644
--- a/doc/interfaces/stream.md
+++ b/doc/interfaces/stream.md
@@ -89,7 +89,12 @@ Writes the string `chunk` to the stream. If `end_stream` is true, the body will
 Writes the string `str` to the stream and ends the stream. On error, returns `nil`, an error message and an error number.
 
 
-### `stream:write_body_from_file(file, timeout)` <!-- --> {#stream:write_body_from_file}
+### `stream:write_body_from_file(options|file, timeout)` <!-- --> {#stream:write_body_from_file}
+
+  - `options` is a table containing:
+	- `.file` (file)
+	- `.count` (positive integer): number of bytes of `file` to write  
+	  defaults to infinity (the whole file will be written)
 
 Writes the contents of file `file` to the stream and ends the stream. `file` will not be automatically seeked, so ensure it is at the correct offset before calling. On error, returns `nil`, an error message and an error number.
 
diff --git a/doc/introduction.md b/doc/introduction.md
index 7b39344..db2764b 100644
--- a/doc/introduction.md
+++ b/doc/introduction.md
@@ -99,6 +99,12 @@ The following is a list of API conventions and general reference:
   - Some HTTP 2 operations return/throw special [http 2 error objects](#http.h2_error).
 
 
+### Timeouts
+
+All operations that may block the current thread take a `timeout` argument.
+This argument is always the number of seconds to allow before returning `nil, err_msg, ETIMEDOUT` where `err_msg` is a localised error message such as `"connection timed out"`.
+
+
 ## Terminology
 
 Much lua-http terminology is borrowed from HTTP 2.
diff --git a/doc/modules/http.client.md b/doc/modules/http.client.md
index 76a357c..9f0995f 100644
--- a/doc/modules/http.client.md
+++ b/doc/modules/http.client.md
@@ -37,6 +37,7 @@ This function returns a new connection to an HTTP server. Once a connection has
 		e.g. `"80"` or `80`
 	  - `path` (string): path to connect to (UNIX sockets)
 	  - `v6only` (boolean, optional): if the `IPV6_V6ONLY` flag should be set on the underlying socket.
+	  - `bind` (string, optional): the local outgoing address and optionally port to bind in the form of `"address[:port]"`, IPv6 addresses may be specified via square bracket notation. e.g. `"127.0.0.1"`, `"127.0.0.1:50000"`, `"[::1]:30000"`.
   - `timeout` (optional) is the maximum amount of time (in seconds) to allow for connection to be established.  
 	This includes time for DNS lookup, connection, TLS negotiation (if TLS enabled) and in the case of HTTP 2: settings exchange.
 
diff --git a/doc/modules/http.cookie.md b/doc/modules/http.cookie.md
new file mode 100644
index 0000000..a6cd9f5
--- /dev/null
+++ b/doc/modules/http.cookie.md
@@ -0,0 +1,175 @@
+## http.cookie
+
+A module for working with cookies.
+
+### `bake(name, value, expiry_time, domain, path, secure_only, http_only, same_site)` <!-- --> {#http.cookie.bake}
+
+Returns a string suitable for use in a `Set-Cookie` header with the passed parameters.
+
+
+### `parse_cookie(cookie)` <!-- --> {#http.cookie.parse_cookie}
+
+Parses the `Cookie` header contents `cookie`.
+
+Returns a table containing `name` and `value` pairs as strings.
+
+
+### `parse_cookies(req_headers)` <!-- --> {#http.cookie.parse_cookies}
+
+Parses all `Cookie` headers in the [*http.headers*](#http.headers) object `req_headers`.
+
+Returns a table containing `name` and `value` pairs as strings.
+
+
+### `parse_setcookie(setcookie)` <!-- --> {#http.cookie.parse_setcookie}
+
+Parses the `Set-Cookie` header contents `setcookie`.
+
+Returns `name`, `value` and `params` where:
+
+  - `name` is a string containing the cookie name
+  - `value` is a string containing the cookie value
+  - `params` is the a table where the keys are cookie attribute names and values are cookie attribute values
+
+
+### `new_store()` <!-- --> {#http.cookie.new_store}
+
+Creates a new cookie store.
+
+Cookies are unique for a tuple of domain, path and name;
+although multiple cookies with the same name may exist in a request due to overlapping paths or domains.
+
+
+### `store.psl` <!-- --> {#http.cookie.store.psl}
+
+A [lua-psl](https://github.com/daurnimator/lua-psl) object to use for checking against the Public Suffix List.
+Set the field to `false` to skip checking the suffix list.
+
+Defaults to the [latest](https://rockdaboot.github.io/libpsl/libpsl-Public-Suffix-List-functions.html#psl-latest) PSL on the system. If lua-psl is not installed then it will be `nil`.
+
+
+### `store.time()` <!-- --> {#http.cookie.store.time}
+
+A function used by the `store` to get the current time for expiries and such.
+
+Defaults to a function based on [`os.time`](https://www.lua.org/manual/5.3/manual.html#pdf-os.time).
+
+
+### `store.max_cookie_length` <!-- --> {#http.cookie.store.max_cookie_length}
+
+The maximum length (in bytes) of cookies in the store; this value is also used as default maximum cookie length for `:lookup()`.
+Decreasing this value will only prevent new cookies from being added, it will not remove old cookies.
+
+Defaults to infinity (no maximum size).
+
+
+### `store.max_cookies` <!-- --> {#http.cookie.store.max_cookies}
+
+The maximum number of cookies allowed in the `store`.
+Decreasing this value will only prevent new cookies from being added, it will not remove old cookies.
+
+Defaults to infinity (any number of cookies is allowed).
+
+
+### `store.max_cookies_per_domain` <!-- --> {#http.cookie.store.max_cookies_per_domain}
+
+The maximum number of cookies allowed in the `store` per domain.
+Decreasing this value will only prevent new cookies from being added, it will not remove old cookies.
+
+Defaults to infinity (any number of cookies is allowed).
+
+
+### `store:store(req_domain, req_path, req_is_http, req_is_secure, req_site_for_cookies, name, value, params)` <!-- --> {#http.cookie.store:store}
+
+Attempts to add a cookie to the `store`.
+
+  - `req_domain` is the domain that the cookie was obtained from
+  - `req_path` is the path that the cookie was obtained from
+  - `req_is_http` is a boolean flag indicating if the cookie was obtained from a "non-HTTP" API
+  - `req_is_secure` is a boolean flag indicating if the cookie was obtained from a "secure" protocol
+  - `req_site_for_cookies` is a string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown.
+  - `name` is a string containing the cookie name
+  - `value` is a string containing the cookie value
+  - `params` is the a table where the keys are cookie attribute names and values are cookie attribute values
+
+Returns a boolean indicating if a cookie was stored.
+
+
+### `store:store_from_request(req_headers, resp_headers, req_host, req_site_for_cookies)` <!-- --> {#http.cookie.store:store_from_request}
+
+Attempt to store any cookies found in the response headers.
+
+  - `req_headers` is the [*http.headers*](#http.headers) object for the outgoing request
+  - `resp_headers` is the [*http.headers*](#http.headers) object received in response
+  - `req_host` is the host that your query was directed at (only used if `req_headers` is missing a `Host` header)
+  - `req_site_for_cookies` is a string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown.
+
+
+### `store:get(domain, path, name)` <!-- --> {#http.cookie.store:get}
+
+Returns the cookie value for the cookie stored for the passed `domain`, `path` and `name`.
+
+
+### `store:remove(domain, path, name)` <!-- --> {#http.cookie.store:remove}
+
+Deletes the cookie stored for the passed `domain`, `path` and `name`.
+
+If `name` is `nil` or not passed then all cookies for the `domain` and `path` are removed.
+
+If `path` is `nil` or not passed (in addition to `name`) then all cookies for the `domain` are removed.
+
+
+### `store:lookup(domain, path, is_http, is_secure, is_safe_method, site_for_cookies, is_top_level, max_cookie_length)` <!-- --> {#http.cookie.store:lookup}
+
+Finds cookies visible to suitable for passing to an entity.
+
+  - `domain` is the domain that will be sent the cookie
+  - `path` is the path that will be sent the cookie
+  - `is_http` is a boolean flag indicating if the destination is a "non-HTTP" API
+  - `is_secure` is a boolean flag indicating if the destination will be communicated with over a "secure" protocol
+  - `is_safe_method` is a boolean flag indicating if the cookie will be sent via a safe HTTP method (See also [http.util.is_safe_method](#http.util.is_safe_method))
+  - `site_for_cookies` is a string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown.
+  - `is_top_level` is a boolean flag indicating if this request is a "top level" request (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2))
+  - `max_cookie_length` is the maximum cookie length to allow (See also [`store.max_cookie_length`](#http.cookie.store.max_cookie_length))
+
+Returns a string suitable for use in a `Cookie` header.
+
+
+### `store:lookup_for_request(headers, host, site_for_cookies, is_top_level, max_cookie_length)` <!-- --> {#http.cookie.store:lookup_for_request}
+
+Finds cookies suitable for adding to a request.
+
+  - `headers` is the [*http.headers*](#http.headers) object for the outgoing request
+  - `host` is the host that your query was directed at (only used if `headers` is missing a `Host` header)
+  - `site_for_cookies` is a string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown.
+  - `is_top_level` is a boolean flag indicating if this request is a "top level" request (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2))
+  - `max_cookie_length` is the maximum cookie length to allow (See also [`store.max_cookie_length`](#http.cookie.store.max_cookie_length))
+
+Returns a string suitable for use in a `Cookie` header.
+
+
+### `store:clean_due()` <!-- --> {#http.cookie.store:clean_due}
+
+Returns the number of seconds until the next cookie in the `store` expires.
+
+
+### `store:clean()` <!-- --> {#http.cookie.store:clean}
+
+Remove all expired cookies from the `store`.
+
+
+### `store:load_from_file(file)` <!-- --> {#http.cookie.store:load_from_file}
+
+Loads cookie data from the file object `file` into `store`.
+The file should be in the Netscape Cookiejar format.
+Invalid lines in the file are ignored.
+
+Returns `true` on success or passes along `nil, err, errno` if a `:read` call fails.
+
+
+### `store:save_to_file(file)` <!-- --> {#http.cookie.store:save_to_file}
+
+Writes the cookie data from `store` into the file object `file` in the Netscape Cookiejar format.
+`file` is not `seek`-ed or truncated before writing.
+
+Returns `true` on success or passes along `nil, err, errno` if a `:write` call fails.
diff --git a/doc/modules/http.h1_connection.md b/doc/modules/http.h1_connection.md
index e90ed65..e3fdd69 100644
--- a/doc/modules/http.h1_connection.md
+++ b/doc/modules/http.h1_connection.md
@@ -17,6 +17,21 @@ Specifies the HTTP version used for the connection handshake. Valid values are:
 See [`connection.version`](#connection.version)
 
 
+### `h1_connection:pollfd()` <!-- --> {#http.h1_connection:pollfd}
+
+See [`connection:pollfd()`](#connection:pollfd)
+
+
+### `h1_connection:events()` <!-- --> {#http.h1_connection:events}
+
+See [`connection:events()`](#connection:events)
+
+
+### `h1_connection:timeout()` <!-- --> {#http.h1_connection:timeout}
+
+See [`connection:timeout()`](#connection:timeout)
+
+
 ### `h1_connection:connect(timeout)` <!-- --> {#http.h1_connection:connect}
 
 See [`connection:connect(timeout)`](#connection:connect)
@@ -42,7 +57,11 @@ See [`connection:peername()`](#connection:peername)
 See [`connection:flush(timeout)`](#connection:flush)
 
 
-### `h1_connection:shutdown()` <!-- --> {#http.h1_connection:shutdown}
+### `h1_connection:shutdown(dir)` <!-- --> {#http.h1_connection:shutdown}
+
+Shut down is as graceful as possible: pipelined streams are [shutdown](#http.h1_stream:shutdown), then the underlying socket is shut down in the appropriate direction(s).
+
+`dir` is a string representing the direction of communication to shut down communication in. If it contains `"r"` it will shut down reading, if it contains `"w"` it will shut down writing. The default is `"rw"`, i.e. to shutdown communication in both directions.
 
 See [`connection:shutdown()`](#connection:shutdown)
 
@@ -69,6 +88,14 @@ See [`connection:get_next_incoming_stream(timeout)`](#connection:get_next_incomi
 See [`connection:onidle(new_handler)`](#connection:onidle)
 
 
+### `h1_connection:setmaxline(read_length)` <!-- --> {#http.h1_connection:setmaxline}
+
+Sets the maximum read buffer size (in bytes) to `read_length`. i.e. sets the maximum length lines (such as headers).
+
+The default comes from the underlying socket, which gets the (changable) cqueues default at time of construction.
+The default cqueues default is 4096 bytes.
+
+
 ### `h1_connection:clearerr(...)` <!-- --> {#http.h1_connection:clearerr}
 
 Clears errors to allow for further read or write operations on the connection. Returns the error number of existing errors. This function is used to recover from known errors.
@@ -86,7 +113,7 @@ Used to hand the reference of the connection socket to another object. Resets th
 
 ### `h1_connection:read_request_line(timeout)` <!-- --> {#http.h1_connection:read_request_line}
 
-Reads a request line from the socket. Returns the request method, requested path and HTTP version for an incoming request. `:read_request_line()` yields until a `"\r\n"` terminated chunk is received, or `timeout` is exceeded. If the incoming chunk is not a valid HTTP request line, `nil` is returned. On error, returns `nil`, an error message and an error number.
+Reads a request line from the socket. Returns the request method, request target and HTTP version for an incoming request. `:read_request_line()` yields until a `"\r\n"` terminated chunk is received, or `timeout` is exceeded. If the incoming chunk is not a valid HTTP request line, `nil` is returned. On error, returns `nil`, an error message and an error number.
 
 
 ### `h1_connection:read_status_line(timeout)` <!-- --> {#http.h1_connection:read_status_line}
@@ -119,7 +146,7 @@ Reads the entire request body. This function will yield until the body is comple
 Reads the next available line of data from the request and returns the chunk and any chunk extensions. This function will yield until chunk size is received or `timeout` is exceeded. If the chunk size is indicated as `0` then `false` and any chunk extensions are returned. Returns `nil`, an error message and an error number if there was an error reading reading the chunk header or the socket.
 
 
-### `h1_connection:write_request_line(method, path, httpversion, timeout)` <!-- --> {#http.h1_connection:write_request_line}
+### `h1_connection:write_request_line(method, target, httpversion, timeout)` <!-- --> {#http.h1_connection:write_request_line}
 
 Writes the opening HTTP 1.x request line for a new request to the socket buffer. Yields until success or `timeout`. If the write fails, returns `nil`, an error message and an error number.
 
@@ -149,12 +176,12 @@ Terminates a header block by writing a blank line (`"\r\n"`) to the socket. This
 
 Writes a chunk of data to the socket. `chunk_ext` must be `nil` as chunk extensions are not supported. Will yield until complete or `timeout` is exceeded. Returns true on success. Returns `nil`, an error message and an error number if the write fails.
 
-*Note that `chunk` will not be flushed to the remote server until [`write_body_last_chunk`](#http.h1_connection:write_body_last_chunk) is called.*
-
 
 ### `h1_connection:write_body_last_chunk(chunk_ext, timeout)` <!-- --> {#http.h1_connection:write_body_last_chunk}
 
-Writes the chunked body terminator `"0\r\n"` to the socket and flushes the socket output buffer. `chunk_ext` must be `nil` as chunk extensions are not supported. Will yield until complete or `timeout` is exceeded. Returns `nil`, an error message and an error number if the write fails.
+Writes the chunked body terminator `"0\r\n"` to the socket. `chunk_ext` must be `nil` as chunk extensions are not supported. Will yield until complete or `timeout` is exceeded. Returns `nil`, an error message and an error number if the write fails.
+
+*Note that the connection will not be immediately flushed to the remote server; normally this will occur when trailers are written.*
 
 
 ### `h1_connection:write_body_plain(body, timeout)` <!-- --> {#http.h1_connection:write_body_plain}
diff --git a/doc/modules/http.h1_stream.md b/doc/modules/http.h1_stream.md
index 1023c0f..eca29c5 100644
--- a/doc/modules/http.h1_stream.md
+++ b/doc/modules/http.h1_stream.md
@@ -4,97 +4,102 @@ The *h1_stream* module adheres to the [*stream*](#stream) interface and provides
 
 The gzip transfer encoding is supported transparently.
 
-### `h1_stream.connection` <!-- --> {#h1_stream.connection}
+### `h1_stream.connection` <!-- --> {#http.h1_stream.connection}
 
 See [`stream.connection`](#stream.connection)
 
 
-### `h1_stream:checktls()` <!-- --> {#h1_stream:checktls}
+### `h1_stream.max_header_lines` <!-- --> {#http.h1_stream.max_header_lines}
+
+The maximum number of header lines to read. Default is `100`.
+
+
+### `h1_stream:checktls()` <!-- --> {#http.h1_stream:checktls}
 
 See [`stream:checktls()`](#stream:checktls)
 
 
-### `h1_stream:localname()` <!-- --> {#h1_stream:localname}
+### `h1_stream:localname()` <!-- --> {#http.h1_stream:localname}
 
 See [`stream:localname()`](#stream:localname)
 
 
-### `h1_stream:peername()` <!-- --> {#h1_stream:peername}
+### `h1_stream:peername()` <!-- --> {#http.h1_stream:peername}
 
 See [`stream:peername()`](#stream:peername)
 
 
-### `h1_stream:get_headers(timeout)` <!-- --> {#h1_stream:get_headers}
+### `h1_stream:get_headers(timeout)` <!-- --> {#http.h1_stream:get_headers}
 
 See [`stream:get_headers(timeout)`](#stream:get_headers)
 
 
-### `h1_stream:write_headers(headers, end_stream, timeout)` <!-- --> {#h1_stream:write_headers}
+### `h1_stream:write_headers(headers, end_stream, timeout)` <!-- --> {#http.h1_stream:write_headers}
 
 See [`stream:write_headers(headers, end_stream, timeout)`](#stream:write_headers)
 
 
-### `h1_stream:write_continue(timeout)` <!-- --> {#h1_stream:write_continue}
+### `h1_stream:write_continue(timeout)` <!-- --> {#http.h1_stream:write_continue}
 
 See [`stream:write_continue(timeout)`](#stream:write_continue)
 
 
-### `h1_stream:get_next_chunk(timeout)` <!-- --> {#h1_stream:get_next_chunk}
+### `h1_stream:get_next_chunk(timeout)` <!-- --> {#http.h1_stream:get_next_chunk}
 
 See [`stream:get_next_chunk(timeout)`](#stream:get_next_chunk)
 
 
-### `h1_stream:each_chunk()` <!-- --> {#h1_stream:each_chunk}
+### `h1_stream:each_chunk()` <!-- --> {#http.h1_stream:each_chunk}
 
 See [`stream:each_chunk()`](#stream:each_chunk)
 
 
-### `h1_stream:get_body_as_string(timeout)` <!-- --> {#h1_stream:get_body_as_string}
+### `h1_stream:get_body_as_string(timeout)` <!-- --> {#http.h1_stream:get_body_as_string}
 
 See [`stream:get_body_as_string(timeout)`](#stream:get_body_as_string)
 
 
-### `h1_stream:get_body_chars(n, timeout)` <!-- --> {#h1_stream:get_body_chars}
+### `h1_stream:get_body_chars(n, timeout)` <!-- --> {#http.h1_stream:get_body_chars}
 
 See [`stream:get_body_chars(n, timeout)`](#stream:get_body_chars)
 
 
-### `h1_stream:get_body_until(pattern, plain, include_pattern, timeout)` <!-- --> {#h1_stream:get_body_until}
+### `h1_stream:get_body_until(pattern, plain, include_pattern, timeout)` <!-- --> {#http.h1_stream:get_body_until}
 
 See [`stream:get_body_until(pattern, plain, include_pattern, timeout)`](#stream:get_body_until)
 
 
-### `h1_stream:save_body_to_file(file, timeout)` <!-- --> {#h1_stream:save_body_to_file}
+### `h1_stream:save_body_to_file(file, timeout)` <!-- --> {#http.h1_stream:save_body_to_file}
 
 See [`stream:save_body_to_file(file, timeout)`](#stream:save_body_to_file)
 
 
-### `h1_stream:get_body_as_file(timeout)` <!-- --> {#h1_stream:get_body_as_file}
+### `h1_stream:get_body_as_file(timeout)` <!-- --> {#http.h1_stream:get_body_as_file}
 
 See [`stream:get_body_as_file(timeout)`](#stream:get_body_as_file)
 
 
-### `h1_stream:unget(str)` <!-- --> {#h1_stream:unget}
+### `h1_stream:unget(str)` <!-- --> {#http.h1_stream:unget}
 
 See [`stream:unget(str)`](#stream:unget)
 
 
-### `h1_stream:write_chunk(chunk, end_stream, timeout)` <!-- --> {#h1_stream:write_chunk}
+### `h1_stream:write_chunk(chunk, end_stream, timeout)` <!-- --> {#http.h1_stream:write_chunk}
 
 See [`stream:write_chunk(chunk, end_stream, timeout)`](#stream:write_chunk)
 
 
-### `h1_stream:write_body_from_string(str, timeout)` <!-- --> {#h1_stream:write_body_from_string}
+### `h1_stream:write_body_from_string(str, timeout)` <!-- --> {#http.h1_stream:write_body_from_string}
 
 See [`stream:write_body_from_string(str, timeout)`](#stream:write_body_from_string)
 
 
-### `h1_stream:write_body_from_file(file, timeout)` <!-- --> {#h1_stream:write_body_from_file}
+### `h1_stream:write_body_from_file(options|file, timeout)` <!-- --> {#http.h1_stream:write_body_from_file}
 
-See [`stream:write_body_from_file(file, timeout)`](#stream:write_body_from_file)
+See [`stream:write_body_from_file(options|file, timeout)`](#stream:write_body_from_file)
 
 
-### `h1_stream:shutdown()` <!-- --> {#h1_stream:shutdown}
+### `h1_stream:shutdown()` <!-- --> {#http.h1_stream:shutdown}
 
 See [`stream:shutdown()`](#stream:shutdown)
 
@@ -113,6 +118,13 @@ Not all state transitions are allowed.
 
 ### `h1_stream:read_headers(timeout)` <!-- --> {#http.h1_stream:read_headers}
 
-Reads and returns a table containing the request line and all HTTP headers as key value pairs.
+Reads and returns a [header block](#http.headers) from the underlying connection. Does *not* take into account buffered header blocks. On error, returns `nil`, an error message and an error number.
+
+This function should rarely be used, you're probably looking for [`:get_headers()`](#http.h1_stream:get_headers).
+
+
+### `h1_stream:read_next_chunk(timeout)` <!-- --> {#http.h1_stream:read_next_chunk}
+
+Reads and returns the next chunk as a string from the underlying connection. Does *not* take into account buffered chunks. On error, returns `nil`, an error message and an error number.
 
-This function should rarely be used, you're probably looking for [`:get_headers()`](#h1_stream:get_headers).
+This function should rarely be used, you're probably looking for [`:get_next_chunk()`](#http.h1_stream:get_next_chunk).
diff --git a/doc/modules/http.h2_connection.md b/doc/modules/http.h2_connection.md
index feccd33..77de2c1 100644
--- a/doc/modules/http.h2_connection.md
+++ b/doc/modules/http.h2_connection.md
@@ -17,12 +17,18 @@ See [`connection.version`](#connection.version)
 
 ### `h2_connection:pollfd()` <!-- --> {#http.h2_connection:pollfd}
 
+See [`connection:pollfd()`](#connection:pollfd)
+
 
 ### `h2_connection:events()` <!-- --> {#http.h2_connection:events}
 
+See [`connection:events()`](#connection:events)
+
 
 ### `h2_connection:timeout()` <!-- --> {#http.h2_connection:timeout}
 
+See [`connection:timeout()`](#connection:timeout)
+
 
 ### `h2_connection:empty()` <!-- --> {#http.h2_connection:empty}
 
@@ -89,7 +95,7 @@ See [`connection:onidle(new_handler)`](#connection:onidle)
 ### `h2_connection:read_http2_frame(timeout)` <!-- --> {#http.h2_connection:read_http2_frame}
 
 
-### `h2_connection:write_http2_frame(typ, flags, streamid, payload, timeout)` <!-- --> {#http.h2_connection:write_http2_frame}
+### `h2_connection:write_http2_frame(typ, flags, streamid, payload, timeout, flush)` <!-- --> {#http.h2_connection:write_http2_frame}
 
 
 ### `h2_connection:ping(timeout)` <!-- --> {#http.h2_connection:ping}
diff --git a/doc/modules/http.h2_stream.md b/doc/modules/http.h2_stream.md
index 9889894..3b55976 100644
--- a/doc/modules/http.h2_stream.md
+++ b/doc/modules/http.h2_stream.md
@@ -2,130 +2,136 @@
 
 An h2_stream represents an HTTP 2 stream. The module follows the [*stream*](#stream) interface as well as HTTP 2 specific functions.
 
-### `h2_stream.connection` <!-- --> {#h2_stream.connection}
+### `h2_stream.connection` <!-- --> {#http.h2_stream.connection}
 
 See [`stream.connection`](#stream.connection)
 
 
-### `h2_stream:checktls()` <!-- --> {#h2_stream:checktls}
+### `h2_stream:checktls()` <!-- --> {#http.h2_stream:checktls}
 
 See [`stream:checktls()`](#stream:checktls)
 
 
-### `h2_stream:localname()` <!-- --> {#h2_stream:localname}
+### `h2_stream:localname()` <!-- --> {#http.h2_stream:localname}
 
 See [`stream:localname()`](#stream:localname)
 
 
-### `h2_stream:peername()` <!-- --> {#h2_stream:peername}
+### `h2_stream:peername()` <!-- --> {#http.h2_stream:peername}
 
 See [`stream:peername()`](#stream:peername)
 
 
-### `h2_stream:get_headers(timeout)` <!-- --> {#h2_stream:get_headers}
+### `h2_stream:get_headers(timeout)` <!-- --> {#http.h2_stream:get_headers}
 
 See [`stream:get_headers(timeout)`](#stream:get_headers)
 
 
-### `h2_stream:write_headers(headers, end_stream, timeout)` <!-- --> {#h2_stream:write_headers}
+### `h2_stream:write_headers(headers, end_stream, timeout)` <!-- --> {#http.h2_stream:write_headers}
 
 See [`stream:write_headers(headers, end_stream, timeout)`](#stream:write_headers)
 
 
-### `h2_stream:write_continue(timeout)` <!-- --> {#h2_stream:write_continue}
+### `h2_stream:write_continue(timeout)` <!-- --> {#http.h2_stream:write_continue}
 
 See [`stream:write_continue(timeout)`](#stream:write_continue)
 
 
-### `h2_stream:get_next_chunk(timeout)` <!-- --> {#h2_stream:get_next_chunk}
+### `h2_stream:get_next_chunk(timeout)` <!-- --> {#http.h2_stream:get_next_chunk}
 
 See [`stream:get_next_chunk(timeout)`](#stream:get_next_chunk)
 
 
-### `h2_stream:each_chunk()` <!-- --> {#h2_stream:each_chunk}
+### `h2_stream:each_chunk()` <!-- --> {#http.h2_stream:each_chunk}
 
 See [`stream:each_chunk()`](#stream:each_chunk)
 
 
-### `h2_stream:get_body_as_string(timeout)` <!-- --> {#h2_stream:get_body_as_string}
+### `h2_stream:get_body_as_string(timeout)` <!-- --> {#http.h2_stream:get_body_as_string}
 
 See [`stream:get_body_as_string(timeout)`](#stream:get_body_as_string)
 
 
-### `h2_stream:get_body_chars(n, timeout)` <!-- --> {#h2_stream:get_body_chars}
+### `h2_stream:get_body_chars(n, timeout)` <!-- --> {#http.h2_stream:get_body_chars}
 
 See [`stream:get_body_chars(n, timeout)`](#stream:get_body_chars)
 
 
-### `h2_stream:get_body_until(pattern, plain, include_pattern, timeout)` <!-- --> {#h2_stream:get_body_until}
+### `h2_stream:get_body_until(pattern, plain, include_pattern, timeout)` <!-- --> {#http.h2_stream:get_body_until}
 
 See [`stream:get_body_until(pattern, plain, include_pattern, timeout)`](#stream:get_body_until)
 
 
-### `h2_stream:save_body_to_file(file, timeout)` <!-- --> {#h2_stream:save_body_to_file}
+### `h2_stream:save_body_to_file(file, timeout)` <!-- --> {#http.h2_stream:save_body_to_file}
 
 See [`stream:save_body_to_file(file, timeout)`](#stream:save_body_to_file)
 
 
-### `h2_stream:get_body_as_file(timeout)` <!-- --> {#h2_stream:get_body_as_file}
+### `h2_stream:get_body_as_file(timeout)` <!-- --> {#http.h2_stream:get_body_as_file}
 
 See [`stream:get_body_as_file(timeout)`](#stream:get_body_as_file)
 
 
-### `h2_stream:unget(str)` <!-- --> {#h2_stream:unget}
+### `h2_stream:unget(str)` <!-- --> {#http.h2_stream:unget}
 
 See [`stream:unget(str)`](#stream:unget)
 
 
-### `h2_stream:write_chunk(chunk, end_stream, timeout)` <!-- --> {#h2_stream:write_chunk}
+### `h2_stream:write_chunk(chunk, end_stream, timeout)` <!-- --> {#http.h2_stream:write_chunk}
 
 See [`stream:write_chunk(chunk, end_stream, timeout)`](#stream:write_chunk)
 
 
-### `h2_stream:write_body_from_string(str, timeout)` <!-- --> {#h2_stream:write_body_from_string}
+### `h2_stream:write_body_from_string(str, timeout)` <!-- --> {#http.h2_stream:write_body_from_string}
 
 See [`stream:write_body_from_string(str, timeout)`](#stream:write_body_from_string)
 
 
-### `h2_stream:write_body_from_file(file, timeout)` <!-- --> {#h2_stream:write_body_from_file}
+### `h2_stream:write_body_from_file(options|file, timeout)` <!-- --> {#http.h2_stream:write_body_from_file}
 
-See [`stream:write_body_from_file(file, timeout)`](#stream:write_body_from_file)
+See [`stream:write_body_from_file(options|file, timeout)`](#stream:write_body_from_file)
 
 
-### `h2_stream:shutdown()` <!-- --> {#h2_stream:shutdown}
+### `h2_stream:shutdown()` <!-- --> {#http.h2_stream:shutdown}
 
 See [`stream:shutdown()`](#stream:shutdown)
 
 
+### `h2_stream:pick_id(id)` <!-- --> {#http.h2_stream:pick_id}
+
+
 ### `h2_stream:set_state(new)` <!-- --> {#http.h2_stream:set_state}
 
 
 ### `h2_stream:reprioritise(child, exclusive)` <!-- --> {#http.h2_stream:reprioritise}
 
 
-### `h2_stream:write_http2_frame(typ, flags, payload, timeout)` <!-- --> {#http.h2_stream:write_http2_frame}
+### `h2_stream:write_http2_frame(typ, flags, payload, timeout, flush)` <!-- --> {#http.h2_stream:write_http2_frame}
 
 Writes a frame with `h2_stream`'s stream id.
 
-See [`h2_connection:write_http2_frame(typ, flags, streamid, payload, timeout)`](#http.h2_connection:write_http2_frame)
+See [`h2_connection:write_http2_frame(typ, flags, streamid, payload, timeout, flush)`](#http.h2_connection:write_http2_frame)
+
+
+### `h2_stream:write_data_frame(payload, end_stream, padded, timeout, flush)` <!-- --> {#http.h2_stream:write_data_frame}
 
 
-### `h2_stream:write_data_frame(payload, end_stream, padded, timeout)` <!-- --> {#http.h2_stream:write_data_frame}
+### `h2_stream:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, timeout, flush)` <!-- --> {#http.h2_stream:write_headers_frame}
 
 
-### `h2_stream:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, timeout)` <!-- --> {#http.h2_stream:write_headers_frame}
+### `h2_stream:write_priority_frame(exclusive, stream_dep, weight, timeout, flush)` <!-- --> {#http.h2_stream:write_priority_frame}
 
 
-### `h2_stream:write_priority_frame(exclusive, stream_dep, weight, timeout)` <!-- --> {#http.h2_stream:write_priority_frame}
+### `h2_stream:write_rst_stream_frame(err_code, timeout, flush)` <!-- --> {#http.h2_stream:write_rst_stream}
 
 
-### `h2_stream:write_rst_stream(err_code, timeout)` <!-- --> {#http.h2_stream:write_rst_stream}
+### `h2_stream:rst_stream(err, timeout)` <!-- --> {#http.h2_stream:rst_stream}
 
 
-### `h2_stream:write_settings_frame(ACK, settings, timeout)` <!-- --> {#http.h2_stream:write_settings_frame}
+### `h2_stream:write_settings_frame(ACK, settings, timeout, flush)` <!-- --> {#http.h2_stream:write_settings_frame}
 
 
-### `h2_stream:write_push_promise_frame(promised_stream_id, payload, end_headers, padded, timeout)` <!-- --> {#http.h2_stream:write_push_promise_frame}
+### `h2_stream:write_push_promise_frame(promised_stream_id, payload, end_headers, padded, timeout, flush)` <!-- --> {#http.h2_stream:write_push_promise_frame}
 
 
 ### `h2_stream:push_promise(headers, timeout)` <!-- --> {#http.h2_stream:push_promise}
@@ -135,16 +141,16 @@ Pushes a new promise to the client.
 Returns the new stream as a [h2_stream](#http.h2_stream).
 
 
-### `h2_stream:write_ping_frame(ACK, payload, timeout)` <!-- --> {#http.h2_stream:write_ping_frame}
+### `h2_stream:write_ping_frame(ACK, payload, timeout, flush)` <!-- --> {#http.h2_stream:write_ping_frame}
 
 
-### `h2_stream:write_goaway_frame(last_streamid, err_code, debug_msg, timeout)` <!-- --> {#http.h2_stream:write_goaway_frame}
+### `h2_stream:write_goaway_frame(last_streamid, err_code, debug_msg, timeout, flush)` <!-- --> {#http.h2_stream:write_goaway_frame}
 
 
-### `h2_stream:write_window_update_frame(inc, timeout)` <!-- --> {#http.h2_stream:write_window_update_frame}
+### `h2_stream:write_window_update_frame(inc, timeout, flush)` <!-- --> {#http.h2_stream:write_window_update_frame}
 
 
 ### `h2_stream:write_window_update(inc, timeout)` <!-- --> {#http.h2_stream:write_window_update}
 
 
-### `h2_stream:write_continuation_frame(payload, end_headers, timeout)` <!-- --> {#http.h2_stream:write_continuation_frame}
+### `h2_stream:write_continuation_frame(payload, end_headers, timeout, flush)` <!-- --> {#http.h2_stream:write_continuation_frame}
diff --git a/doc/modules/http.hpack.md b/doc/modules/http.hpack.md
index 2c49b08..46d14e6 100644
--- a/doc/modules/http.hpack.md
+++ b/doc/modules/http.hpack.md
@@ -39,7 +39,7 @@
 ### `hpack_context:lookup_name_index(name)` <!-- --> {#http.hpack:lookup_name_index}
 
 
-### `hpack_context:lookup_index(index, allow_single)` <!-- --> {#http.hpack:lookup_index}
+### `hpack_context:lookup_index(index)` <!-- --> {#http.hpack:lookup_index}
 
 
 ### `hpack_context:add_header_indexed(name, value, huffman)` <!-- --> {#http.hpack:add_header_indexed}
diff --git a/doc/modules/http.hsts.md b/doc/modules/http.hsts.md
index ac893c4..857d6ae 100644
--- a/doc/modules/http.hsts.md
+++ b/doc/modules/http.hsts.md
@@ -7,6 +7,14 @@ Data structures useful for HSTS (HTTP Strict Transport Security)
 Creates and returns a new HSTS store.
 
 
+### `hsts_store.max_items` <!-- --> {#http.hsts.max_items}
+
+The maximum number of items allowed in the store.
+Decreasing this value will only prevent new items from being added, it will not remove old items.
+
+Defaults to infinity (any number of items is allowed).
+
+
 ### `hsts_store:clone()` <!-- --> {#http.hsts:clone}
 
 Creates and returns a copy of a store.
@@ -16,12 +24,24 @@ Creates and returns a copy of a store.
 
 Add new directives to the store about the given `host`. `directives` should be a table of directives, which *must* include the key `"max-age"`.
 
+Returns a boolean indicating if the item was accepted.
+
+
+### `hsts_store:remove(host)` <!-- --> {#http.hsts:remove}
+
+Removes the entry for `host` from the store (if it exists).
+
 
 ### `hsts_store:check(host)` <!-- --> {#http.hsts:check}
 
 Returns a boolean indicating if the given `host` is a known HSTS host.
 
 
+### `hsts_store:clean_due()` <!-- --> {#http.hsts:clean_due}
+
+Returns the number of seconds until the next item in the store expires.
+
+
 ### `hsts_store:clean()` <!-- --> {#http.hsts:clean}
 
 Removes expired entries from the store.
diff --git a/doc/modules/http.request.md b/doc/modules/http.request.md
index b4f1171..d1055ef 100644
--- a/doc/modules/http.request.md
+++ b/doc/modules/http.request.md
@@ -22,6 +22,15 @@ The host this request should be sent to.
 The port this request should be sent to.
 
 
+### `request.bind` <!-- --> {#http.request.bind}
+
+The local outgoing address and optionally port to bind in the form of `"address[:port]"`. Default is to allow the kernel to choose an address+port.
+
+IPv6 addresses may be specified via square bracket notation. e.g. `"127.0.0.1"`, `"127.0.0.1:50000"`, `"[::1]:30000"`.
+
+This option is rarely needed. Supplying an address can be used to manually select the network interface to make the request from, while supplying a port is only really used to interoperate with firewalls or devices that demand use of a certain port.
+
+
 ### `request.tls` <!-- --> {#http.request.tls}
 
 A boolean indicating if TLS should be used.
@@ -54,6 +63,42 @@ The value should be a URI or `false` to turn off proxying for the request.
 A [*http.headers*](#http.headers) object of headers that will be sent in the request.
 
 
+### `request.hsts` <!-- --> {#http.request.hsts}
+
+The [*http.hsts*](#http.hsts) store that will be used to enforce HTTP strict transport security.
+An attempt will be made to add strict transport headers from a response to the store.
+
+Defaults to a shared store.
+
+
+### `request.proxies` <!-- --> {#http.request.proxies}
+
+The [*http.proxies*](#http.proxies) object used to select a proxy for the request.
+Only consulted if `request.proxy` is `nil`.
+
+
+### `request.cookie_store` <!-- --> {#http.request.cookie_store}
+
+The [*http.cookie.store*](#http.cookie.store) that will be used to find cookies for the request.
+An attempt will be made to add cookies from a response to the store.
+
+Defaults to a shared store.
+
+
+### `request.is_top_level` <!-- --> {#http.request.is_top_level}
+
+A boolean flag indicating if this request is a "top level" request (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2))
+
+Defaults to `true`
+
+
+### `request.site_for_cookies` <!-- --> {#http.request.site_for_cookies}
+
+A string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown.
+
+Defaults to `nil`.
+
+
 ### `request.follow_redirects` <!-- --> {#http.request.follow_redirects}
 
 Boolean indicating if `:go()` should follow redirects.
diff --git a/doc/modules/http.server.md b/doc/modules/http.server.md
index 9fd6bd7..135b826 100644
--- a/doc/modules/http.server.md
+++ b/doc/modules/http.server.md
@@ -1,6 +1,6 @@
 ## http.server
 
-*http.server* objects are used to encapsulate the accept() and dispatch of http clients. Each client request triggers `onstream` which is called from an independent cqueue, providing an independent process for each request. `onstream` can also be used for testing and upgrading a request, with HTTP 1.1 to WebSockets being the notable example.
+*http.server* objects are used to encapsulate the `accept()` and dispatch of http clients. Each new client request will invoke the `onstream` callback in a new cqueues managed coroutine. In addition to constructing and returning a HTTP response, an `onstream` handler may decide to take ownership of the connection for other purposes, e.g. upgrade from a HTTP 1.1 connection to a WebSocket connection.
 
 For examples of how to use the server library, please see the [examples directory](https://github.com/daurnimator/lua-http/tree/master/examples) in the source tree.
 
@@ -16,7 +16,8 @@ Creates a new instance of an HTTP server listening on the given socket.
 	  - `true`: Allows tls connections only
 	  - `false`: Allows non-tls connections only
   - `.ctx` (*context object*): An `openssl.ssl.context` object to use for tls connections. If `nil` is passed, a self-signed context will be generated.
-  - `.client_timeout` (*number*): Timeout (in seconds) to wait for client to send first bytes and/or complete TLS handshake. Default is 10 seconds.
+  - `.connection_setup_timeout` (*number*): Timeout (in seconds) to wait for client to send first bytes and/or complete TLS handshake. Default is 10 seconds.
+  - `.intra_stream_timeout` (*number*): Timeout (in seconds) to wait for a new [*stream*](#stream) on an idle connection before giving up and closing the connection
   - `.version` (*number*): The http version to allow to connect (default: any)
   - `.cq` (*cqueue*): A cqueues controller to use as a main loop. The default is a new controller for the server.
   - `.max_concurrent` (*number*): Maximum number of connections to allow live at a time. Default is infinity.
@@ -116,3 +117,8 @@ Add a new connection socket to the server for processing. The server will use th
   - Another cqueues thread with some other master socket.
   - From inetd for start on demand daemons.
   - A Unix socket with `SCM_RIGHTS`.
+
+
+### `server:add_stream(stream)` <!-- --> {#http.server:add_stream}
+
+Add an existing stream to the server for processing.
diff --git a/doc/modules/http.socks.md b/doc/modules/http.socks.md
index fee94ff..380c63d 100644
--- a/doc/modules/http.socks.md
+++ b/doc/modules/http.socks.md
@@ -11,7 +11,7 @@ Returns a *http.socks* object.
 
 ### `fdopen(socket)` <!-- --> {#http.socks.fdopen}
 
-This function takes an existing cqueues.socket as a parameter and returns a *http.socks* object with `socket` as it's base.
+This function takes an existing cqueues.socket as a parameter and returns a *http.socks* object with `socket` as its base.
 
 
 ### `socks.needs_resolve` <!-- --> {#http.socks.needs_resolve}
diff --git a/doc/modules/http.tls.md b/doc/modules/http.tls.md
index 9b8a02a..a2723d1 100644
--- a/doc/modules/http.tls.md
+++ b/doc/modules/http.tls.md
@@ -24,6 +24,11 @@ The [Mozilla "Modern" cipher list](https://wiki.mozilla.org/Security/Server_Side
 The [Mozilla "Intermediate" cipher list](https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29) as a colon separated list, ready to pass to OpenSSL
 
 
+### `old_cipher_list` <!-- --> {#http.tls.old_cipher_list}
+
+The [Mozilla "Old" cipher list](https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility) as a colon separated list, ready to pass to OpenSSL
+
+
 ### `banned_ciphers` <!-- --> {#http.tls.banned_ciphers}
 
 A set (table with string keys and values of `true`) of the [ciphers banned in HTTP 2](https://http2.github.io/http2-spec/#BadCipherSuites) where the keys are OpenSSL cipher names.
diff --git a/doc/modules/http.util.md b/doc/modules/http.util.md
index 606bcc0..4826764 100644
--- a/doc/modules/http.util.md
+++ b/doc/modules/http.util.md
@@ -45,6 +45,17 @@ print(http_util.dict_to_query({foo = "bar"; baz = "qux"})) --> "baz=qux&foo=bar"
 ### `resolve_relative_path(orig_path, relative_path)` <!-- --> {#http.util.resolve_relative_path}
 
 
+### `is_safe_method(method)` <!-- --> {#http.util.is_safe_method}
+
+Returns a boolean indicating if the passed string `method` is a "safe" method.
+See [RFC 7231 section 4.2.1](https://tools.ietf.org/html/rfc7231#section-4.2.1) for more information.
+
+
+### `is_ip(str)` <!-- --> {#http.util.is_ip}
+
+Returns a boolean indicating if the passed string `str` is a valid IP.
+
+
 ### `scheme_to_port` <!-- --> {#http.util.scheme_to_port}
 
 Map from schemes (as strings) to default ports (as integers).
diff --git a/doc/modules/http.websocket.md b/doc/modules/http.websocket.md
index 0b0d62e..18498c9 100644
--- a/doc/modules/http.websocket.md
+++ b/doc/modules/http.websocket.md
@@ -64,7 +64,8 @@ Low level function to send a raw frame.
 Send the given `data` as a data frame.
 
   - `data` should be a string
-  - `opcode` can be a numeric opcode, `"text"` or `"binary"`. If `nil`, defaults to a text frame
+  - `opcode` can be a numeric opcode, `"text"` or `"binary"`. If `nil`, defaults to a text frame.
+    Note this `opcode` is the websocket frame opcode, not an application specific opcode. The opcode should be one from the [IANA registry](https://www.iana.org/assignments/websocket/websocket.xhtml#opcode).
 
 
 ### `websocket:send_ping(data, timeout)` <!-- --> {#http.websocket:send_ping}
diff --git a/doc/template.html b/doc/template.html
index ea3f54f..bbdeceb 100644
--- a/doc/template.html
+++ b/doc/template.html
@@ -96,7 +96,7 @@ li.collapsible > ul {
     element.className = 'collapsible';
     element.setAttribute('state', 'collapsed');
     element.onclick = function(event) {
-      if (event.toElement != this) return;
+      if (event.target != this) return;
       if (this.getAttribute('state') == 'collapsed') {
         this.removeAttribute('state');
       } else {
diff --git a/examples/h2_streaming.lua b/examples/h2_streaming.lua
old mode 100644
new mode 100755
index 9485c3c..27d073b
--- a/examples/h2_streaming.lua
+++ b/examples/h2_streaming.lua
@@ -1,3 +1,4 @@
+#!/usr/bin/env lua
 --[[
 Makes a request to an HTTP2 endpoint that has an infinite length response.
 
diff --git a/examples/serve_dir.lua b/examples/serve_dir.lua
old mode 100644
new mode 100755
index 4563694..442f85d
--- a/examples/serve_dir.lua
+++ b/examples/serve_dir.lua
@@ -1,3 +1,4 @@
+#!/usr/bin/env lua
 --[=[
 This example serves a file/directory browser
 It defaults to serving the current directory.
@@ -92,6 +93,12 @@ local function reply(myserver, stream) -- luacheck: ignore 212
 	res_headers:append("server", default_server)
 	res_headers:append("date", http_util.imf_date())
 
+	if req_method ~= "GET" and req_method ~= "HEAD" then
+		res_headers:upsert(":status", "405")
+		assert(stream:write_headers(res_headers, true))
+		return
+	end
+
 	local path = req_headers:get(":path")
 	local uri_t = assert(uri_reference:match(path), "invalid path")
 	path = http_util.resolve_relative_path("/", uri_t.path)
@@ -145,7 +152,8 @@ local function reply(myserver, stream) -- luacheck: ignore 212
 		</tr></thead>
 		<tbody>
 ]], xml_escape(path), xml_escape(path)), false))
-			-- lfs doesn't provide a way to get an errno for attempting to open a directory https://github.com/keplerproject/luafilesystem/issues/87
+			-- lfs doesn't provide a way to get an errno for attempting to open a directory
+			-- See https://github.com/keplerproject/luafilesystem/issues/87
 			for filename in lfs.dir(real_path) do
 				if not (filename == ".." and path == "/") then -- Exclude parent directory entry listing from top level
 					local stats = lfs.attributes(real_path .. "/" .. filename)
diff --git a/examples/server_hello.lua b/examples/server_hello.lua
old mode 100644
new mode 100755
index 54c684c..d0f8afb
--- a/examples/server_hello.lua
+++ b/examples/server_hello.lua
@@ -1,3 +1,4 @@
+#!/usr/bin/env lua
 --[[
 A simple HTTP server
 
diff --git a/examples/server_sent_events.lua b/examples/server_sent_events.lua
old mode 100644
new mode 100755
index 14e1be5..1571229
--- a/examples/server_sent_events.lua
+++ b/examples/server_sent_events.lua
@@ -1,3 +1,4 @@
+#!/usr/bin/env lua
 --[[
 A server that responds with an infinite server-side-events format.
 https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format
@@ -21,6 +22,11 @@ local myserver = assert(http_server.listen {
 
 		-- Build response headers
 		local res_headers = http_headers.new()
+		if req_method ~= "GET" and req_method ~= "HEAD" then
+			res_headers:upsert(":status", "405")
+			assert(stream:write_headers(res_headers, true))
+			return
+		end
 		if req_headers:get ":path" == "/" then
 			res_headers:append(":status", "200")
 			res_headers:append("content-type", "text/html")
diff --git a/examples/simple_request.lua b/examples/simple_request.lua
old mode 100644
new mode 100755
index fa91178..082f9dc
--- a/examples/simple_request.lua
+++ b/examples/simple_request.lua
@@ -1,3 +1,4 @@
+#!/usr/bin/env lua
 --[[
 Verbosely fetches an HTTP resource
 If a body is given, use a POST request
diff --git a/examples/websocket_client.lua b/examples/websocket_client.lua
old mode 100644
new mode 100755
index f485a94..30a21bb
--- a/examples/websocket_client.lua
+++ b/examples/websocket_client.lua
@@ -1,8 +1,9 @@
+#!/usr/bin/env lua
 --[[
 Example of websocket client usage
 
-  - Connects to the coinbase feed.
-	Documentation of feed: https://docs.exchange.coinbase.com/#websocket-feed
+  - Connects to the gdax market data feed.
+	Documentation of feed: https://docs.gdax.com/#websocket-feed
   - Sends a subscribe message
   - Prints off 5 messages
   - Close the socket and clean up.
@@ -10,7 +11,7 @@ Example of websocket client usage
 
 local websocket = require "http.websocket"
 
-local ws = websocket.new_from_uri("wss://ws-feed.exchange.coinbase.com")
+local ws = websocket.new_from_uri("wss://ws-feed.gdax.com")
 assert(ws:connect())
 assert(ws:send([[{"type": "subscribe", "product_id": "BTC-USD"}]]))
 for _=1, 5 do
diff --git a/http-0.1-0.rockspec b/http-scm-0.rockspec
similarity index 88%
rename from http-0.1-0.rockspec
rename to http-scm-0.rockspec
index f698435..5c9dafd 100644
--- a/http-0.1-0.rockspec
+++ b/http-scm-0.rockspec
@@ -1,5 +1,5 @@
 package = "http"
-version = "0.1-0"
+version = "scm-0"
 
 description = {
 	summary = "HTTP library for Lua";
@@ -8,8 +8,7 @@ description = {
 }
 
 source = {
-	url = "https://github.com/daurnimator/lua-http/archive/v0.1.zip";
-	dir = "lua-http-0.1";
+	url = "git+https://github.com/daurnimator/lua-http.git";
 }
 
 dependencies = {
@@ -20,8 +19,10 @@ dependencies = {
 	"luaossl >= 20161208";
 	"basexx >= 0.2.0";
 	"lpeg";
-	"lpeg_patterns >= 0.3";
+	"lpeg_patterns >= 0.5";
+	"binaryheap >= 0.3";
 	"fifo";
+	-- "psl"; -- Optional
 }
 
 build = {
@@ -30,6 +31,7 @@ build = {
 		["http.bit"] = "http/bit.lua";
 		["http.client"] = "http/client.lua";
 		["http.connection_common"] = "http/connection_common.lua";
+		["http.cookie"] = "http/cookie.lua";
 		["http.h1_connection"] = "http/h1_connection.lua";
 		["http.h1_reason_phrases"] = "http/h1_reason_phrases.lua";
 		["http.h1_stream"] = "http/h1_stream.lua";
diff --git a/http/bit.tld b/http/bit.tld
new file mode 100644
index 0000000..886b0b2
--- /dev/null
+++ b/http/bit.tld
@@ -0,0 +1,3 @@
+band: (integer, integer) -> (integer)
+bor: (integer, integer) -> (integer)
+bxor: (integer, integer) -> (integer)
diff --git a/http/client.lua b/http/client.lua
index 5000d67..4949fb1 100644
--- a/http/client.lua
+++ b/http/client.lua
@@ -1,6 +1,7 @@
 local ca = require "cqueues.auxlib"
 local cs = require "cqueues.socket"
 local http_tls = require "http.tls"
+local http_util = require "http.util"
 local connection_common = require "http.connection_common"
 local onerror = connection_common.onerror
 local new_h1_connection = require "http.h1_connection".new
@@ -9,11 +10,6 @@ local openssl_ssl = require "openssl.ssl"
 local openssl_ctx = require "openssl.ssl.context"
 local openssl_verify_param = require "openssl.x509.verify_param"
 
-local EOF = require "lpeg".P(-1)
-local IPv4address = require "lpeg_patterns.IPv4".IPv4address
-local IPv6addrz = require "lpeg_patterns.IPv6".IPv6addrz
-local IPaddress = (IPv4address + IPv6addrz) * EOF
-
 -- Create a shared 'default' TLS context
 local default_ctx = http_tls.new_client_context()
 
@@ -24,13 +20,14 @@ local function negotiate(s, options, timeout)
 	if tls then
 		local ctx = options.ctx or default_ctx
 		local ssl = openssl_ssl.new(ctx)
-		local ip = options.host and IPaddress:match(options.host)
-		if options.sendname ~= nil then
-			if options.sendname then -- false indicates no sendname wanted
-				ssl:setHostName(options.sendname)
-			end
-		elseif options.host and not ip then
-			ssl:setHostName(options.host)
+		local host = options.host
+		local host_is_ip = host and http_util.is_ip(host)
+		local sendname = options.sendname
+		if sendname == nil and not host_is_ip and host then
+			sendname = host
+		end
+		if sendname then -- false indicates no sendname wanted
+			ssl:setHostName(sendname)
 		end
 		if http_tls.has_alpn then
 			if version == nil then
@@ -44,12 +41,12 @@ local function negotiate(s, options, timeout)
 		if version == 2 then
 			ssl:setOptions(openssl_ctx.OP_NO_TLSv1 + openssl_ctx.OP_NO_TLSv1_1)
 		end
-		if options.host and http_tls.has_hostname_validation then
+		if host and http_tls.has_hostname_validation then
 			local params = openssl_verify_param.new()
-			if ip then
-				params:setIP(options.host)
+			if host_is_ip then
+				params:setIP(host)
 			else
-				params:setHost(options.host)
+				params:setHost(host)
 			end
 			-- Allow user defined params to override
 			local old = ssl:getParam()
@@ -84,11 +81,30 @@ local function negotiate(s, options, timeout)
 end
 
 local function connect(options, timeout)
+	local bind = options.bind
+	if bind ~= nil then
+		assert(type(bind) == "string")
+		local bind_address, bind_port = bind:match("^(.-):(%d+)$")
+		if bind_address then
+			bind_port = tonumber(bind_port, 10)
+		else
+			bind_address = bind
+		end
+		local ipv6 = bind_address:match("^%[([:%x]+)%]$")
+		if ipv6 then
+			bind_address = ipv6
+		end
+		bind = {
+			address = bind_address;
+			port = bind_port;
+		}
+	end
 	local s, err, errno = ca.fileresult(cs.connect {
 		family = options.family;
 		host = options.host;
 		port = options.port;
 		path = options.path;
+		bind = bind;
 		sendname = false;
 		v6only = options.v6only;
 		nodelay = true;
diff --git a/http/connection_common.lua b/http/connection_common.lua
index d881319..432acfe 100644
--- a/http/connection_common.lua
+++ b/http/connection_common.lua
@@ -25,6 +25,27 @@ local function onerror(socket, op, why, lvl) -- luacheck: ignore 212
 	return err, why
 end
 
+function connection_methods:pollfd()
+	if self.socket == nil then
+		return nil
+	end
+	return self.socket:pollfd()
+end
+
+function connection_methods:events()
+	if self.socket == nil then
+		return nil
+	end
+	return self.socket:events()
+end
+
+function connection_methods:timeout()
+	if self.socket == nil then
+		return nil
+	end
+	return self.socket:timeout()
+end
+
 function connection_methods:onidle_() -- luacheck: ignore 212
 end
 
diff --git a/http/connection_common.tld b/http/connection_common.tld
new file mode 100644
index 0000000..9bee61c
--- /dev/null
+++ b/http/connection_common.tld
@@ -0,0 +1,21 @@
+interface connection
+    -- implements cqueues polling interface
+    const pollfd: (self) -> (nil)|(integer) -- TODO: cqueues condition
+    const events: (self) -> (nil)|(string|integer)
+    const timeout: (self) -> (nil)|(number)
+
+    const checktls: (self) -> (nil)|(any) -- TODO: luaossl SSL object
+    const localname: (self) -> (integer, string, integer?)|(nil)|(nil, string, number)
+    const peername: (self) -> (integer, string, integer?)|(nil)|(nil, string, number)
+    const onidle: (self, (connection)->()) -> ((connection)->())
+    const connect: (self) -> (true)|(nil)|(nil, string, number)
+    const flush: (self, number) -> (true)|(nil, string, number)
+    const close: (self) -> (true)
+
+    -- Not in connection_common.lua
+    const version: integer
+    -- XXX: needs circular require https://github.com/andremm/typedlua/issues/120
+    -- const new_stream: (self) -> (stream)|(nil) -- Note: in http2 this takes optional id argument
+    -- const get_next_incoming_stream: (self, number?) -> (stream)|(nil)|(nil, string, number)
+    const shutdown: (self) -> (true)
+end
diff --git a/http/cookie.lua b/http/cookie.lua
new file mode 100644
index 0000000..0c2f7e3
--- /dev/null
+++ b/http/cookie.lua
@@ -0,0 +1,867 @@
+--[[
+Data structures useful for Cookies
+RFC 6265
+]]
+
+local http_patts = require "lpeg_patterns.http"
+local binaryheap = require "binaryheap"
+local http_util = require "http.util"
+local has_psl, psl = pcall(require, "psl")
+
+local EOF = require "lpeg".P(-1)
+local sane_cookie_date = http_patts.IMF_fixdate * EOF
+local Cookie = http_patts.Cookie * EOF
+local Set_Cookie = http_patts.Set_Cookie * EOF
+
+local function bake(name, value, expiry_time, domain, path, secure_only, http_only, same_site)
+	-- This function is optimised to only do one concat operation at the end
+	local cookie = { name, "=", value }
+	local n = 3
+	if expiry_time and expiry_time ~= (1e999) then
+		-- Prefer Expires over Max-age unless it is a deletion request
+		if expiry_time == (-1e999) then
+			n = n + 1
+			cookie[n] = "; Max-Age=0"
+		else
+			n = n + 2
+			cookie[n-1] = "; Expires="
+			cookie[n] = http_util.imf_date(expiry_time)
+		end
+	end
+	if domain then
+		n = n + 2
+		cookie[n-1] = "; Domain="
+		cookie[n] = domain
+	end
+	if path then
+		n = n + 2
+		cookie[n-1] = "; Path="
+		cookie[n] = http_util.encodeURI(path)
+	end
+	if secure_only then
+		n = n + 1
+		cookie[n] = "; Secure"
+	end
+	if http_only then
+		n = n + 1
+		cookie[n] = "; HttpOnly"
+	end
+	-- https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2
+	if same_site then
+		local v
+		if same_site == "strict" then
+			v = "; SameSite=Strict"
+		elseif same_site == "lax" then
+			v = "; SameSite=Lax"
+		else
+			error('invalid value for same_site, expected "strict" or "lax"')
+		end
+		n = n + 1
+		cookie[n] = v
+	end
+	return table.concat(cookie, "", 1, n)
+end
+
+local function parse_cookie(cookie_header)
+	return Cookie:match(cookie_header)
+end
+
+local function parse_cookies(req_headers)
+	local cookie_headers = req_headers:get_as_sequence("cookie")
+	local cookies
+	for i=1, cookie_headers.n do
+		local header_cookies = parse_cookie(cookie_headers[i])
+		if header_cookies then
+			if cookies then
+				for k, v in pairs(header_cookies) do
+					cookies[k] = v
+				end
+			else
+				cookies = header_cookies
+			end
+		end
+	end
+	return cookies or {}
+end
+
+local function parse_setcookie(setcookie_header)
+	return Set_Cookie:match(setcookie_header)
+end
+
+local canonicalise_host
+if has_psl then
+	canonicalise_host = psl.str_to_utf8lower
+else
+	canonicalise_host = function(str)
+		-- fail on non-ascii chars
+		if str:find("[^%p%w]") then
+			return nil
+		end
+		return str:lower()
+	end
+end
+
+--[[
+A string domain-matches a given domain string if at least one of the following
+conditions hold:
+  - The domain string and the string are identical. (Note that both the domain
+    string and the string will have been canonicalized to lower case at this point.)
+  - All of the following conditions hold:
+	  - The domain string is a suffix of the string.
+	  - The last character of the string that is not included in the domain string
+	    is a %x2E (".") character.
+	  - The string is a host name (i.e., not an IP address).
+]]
+local function domain_match(domain_string, str)
+	return str == domain_string or (
+		str:sub(-#domain_string) == domain_string
+		and str:sub(-#domain_string-1, -#domain_string-1) == "."
+		and not http_util.is_ip(str)
+	)
+end
+
+--[[ A request-path path-matches a given cookie-path if at least one of the following conditions holds:
+  - The cookie-path and the request-path are identical.
+  - The cookie-path is a prefix of the request-path, and the last
+    character of the cookie-path is %x2F ("/").
+  - The cookie-path is a prefix of the request-path, and the first
+    character of the request-path that is not included in the cookie-path is a %x2F ("/") character.
+]]
+local function path_match(path, req_path)
+	if path == req_path then
+		return true
+	elseif path == req_path:sub(1, #path) then
+		if path:sub(-1, -1) == "/" then
+			return true
+		elseif req_path:sub(#path + 1, #path + 1) == "/" then
+			return true
+		end
+	end
+	return false
+end
+
+local cookie_methods = {}
+local cookie_mt = {
+	__name = "http.cookie.cookie";
+	__index = cookie_methods;
+}
+
+function cookie_methods:netscape_format()
+	return string.format("%s%s\t%s\t%s\t%s\t%d\t%s\t%s\n",
+		self.http_only and "#HttpOnly_" or "",
+		self.domain or "unknown",
+		self.host_only and "TRUE" or "FALSE",
+		self.path,
+		self.secure_only and "TRUE" or "FALSE",
+		math.max(0, math.min(2147483647, self.expiry_time)),
+		self.name,
+		self.value)
+end
+
+
+local default_psl
+if has_psl and psl.latest then
+	default_psl = psl.latest()
+elseif has_psl then
+	default_psl = psl.builtin()
+end
+local store_methods = {
+	psl = default_psl;
+	time = function() return os.time() end;
+	max_cookie_length = (1e999);
+	max_cookies = (1e999);
+	max_cookies_per_domain = (1e999);
+}
+
+local store_mt = {
+	__name = "http.cookie.store";
+	__index = store_methods;
+}
+
+local function new_store()
+	return setmetatable({
+		domains = {};
+		expiry_heap = binaryheap.minUnique();
+		n_cookies = 0;
+		n_cookies_per_domain = {};
+	}, store_mt)
+end
+
+local function add_to_store(self, cookie, req_is_http, now)
+	if cookie.expiry_time < now then
+		-- This was all just a trigger to delete the old cookie
+		self:remove(cookie.domain, cookie.path, cookie.name)
+	else
+		local name = cookie.name
+		local cookie_length = #name + 1 + #cookie.value
+		if cookie_length > self.max_cookie_length then
+			return false
+		end
+
+		local domain = cookie.domain
+		local domain_cookies = self.domains[domain]
+		local path_cookies
+		local old_cookie
+		if domain_cookies ~= nil then
+			path_cookies = domain_cookies[cookie.path]
+			if path_cookies ~= nil then
+				old_cookie = path_cookies[name]
+			end
+		end
+
+		-- If the cookie store contains a cookie with the same name,
+		-- domain, and path as the newly created cookie:
+		if old_cookie then
+			-- If the newly created cookie was received from a "non-HTTP"
+			-- API and the old-cookie's http-only-flag is set, abort these
+			-- steps and ignore the newly created cookie entirely.
+			if not req_is_http and old_cookie.http_only then
+				return false
+			end
+
+			-- Update the creation-time of the newly created cookie to
+			-- match the creation-time of the old-cookie.
+			cookie.creation_time = old_cookie.creation_time
+
+			-- Remove the old-cookie from the cookie store.
+			self.expiry_heap:remove(old_cookie)
+		else
+			if self.n_cookies >= self.max_cookies or self.max_cookies_per_domain < 1 then
+				return false
+			end
+
+			-- Cookie will be added
+			if domain_cookies == nil then
+				path_cookies = {}
+				domain_cookies = {
+					[cookie.path] = path_cookies;
+				}
+				self.domains[domain] = domain_cookies
+				self.n_cookies_per_domain[domain] = 1
+			else
+				local n_cookies_per_domain = self.n_cookies_per_domain[domain]
+				if n_cookies_per_domain >= self.max_cookies_per_domain then
+					return false
+				end
+				path_cookies = domain_cookies[cookie.path]
+				if path_cookies == nil then
+					path_cookies = {}
+					domain_cookies[cookie.path] = path_cookies
+				end
+				self.n_cookies_per_domain[domain] = n_cookies_per_domain
+			end
+
+			self.n_cookies = self.n_cookies + 1
+		end
+
+		path_cookies[name] = cookie
+		self.expiry_heap:insert(cookie.expiry_time, cookie)
+	end
+
+	return true
+end
+
+function store_methods:store(req_domain, req_path, req_is_http, req_is_secure, req_site_for_cookies, name, value, params)
+	assert(type(req_domain) == "string")
+	assert(type(req_path) == "string")
+	assert(type(name) == "string")
+	assert(type(value) == "string")
+	assert(type(params) == "table")
+
+	local now = self.time()
+
+	req_domain = assert(canonicalise_host(req_domain), "invalid request domain")
+
+	-- Clean now so that we can assume there are no expired cookies in store
+	self:clean()
+
+	-- RFC 6265 Section 5.3
+	local cookie = setmetatable({
+		name = name;
+		value = value;
+		expiry_time = (1e999);
+		domain = req_domain;
+		path = nil;
+		creation_time = now;
+		last_access_time = now;
+		persistent = false;
+		host_only = true;
+		secure_only = not not params.secure;
+		http_only = not not params.httponly;
+		same_site = nil;
+	}, cookie_mt)
+
+	-- If a cookie has both the Max-Age and the Expires attribute, the Max-
+	-- Age attribute has precedence and controls the expiration date of the
+	-- cookie.
+	local max_age = params["max-age"]
+	if max_age and max_age:find("^%-?[0-9]+$") then
+		max_age = tonumber(max_age, 10)
+		cookie.persistent = true
+		if max_age <= 0 then
+			cookie.expiry_time = (-1e999)
+		else
+			cookie.expiry_time = now + max_age
+		end
+	elseif params.expires then
+		local date = sane_cookie_date:match(params.expires)
+		if date then
+			cookie.persistent = true
+			cookie.expiry_time = os.time(date)
+		end
+	end
+
+	local domain = params.domain or "";
+
+	-- If the first character of the attribute-value string is %x2E ("."):
+	-- Let cookie-domain be the attribute-value without the leading %x2E (".") character.
+	if domain:sub(1, 1) == "." then
+		domain = domain:sub(2)
+	end
+
+	-- Convert the cookie-domain to lower case.
+	domain = canonicalise_host(domain)
+	if not domain then
+		return false
+	end
+
+	-- If the user agent is configured to reject "public suffixes" and
+	-- the domain-attribute is a public suffix:
+	if domain ~= "" and self.psl and self.psl:is_public_suffix(domain) then
+		-- If the domain-attribute is identical to the canonicalized request-host:
+		if domain == req_domain then
+			-- Let the domain-attribute be the empty string.
+			domain = ""
+		else
+			-- Ignore the cookie entirely and abort these steps.
+			return false
+		end
+	end
+
+	-- If the domain-attribute is non-empty:
+	if domain ~= "" then
+		-- If the canonicalized request-host does not domain-match the
+		-- domain-attribute:
+		if not domain_match(domain, req_domain) then
+			-- Ignore the cookie entirely and abort these steps.
+			return false
+		else
+			-- Set the cookie's host-only-flag to false.
+			cookie.host_only = false
+			-- Set the cookie's domain to the domain-attribute.
+			cookie.domain = domain
+		end
+	end
+
+	-- RFC 6265 Section 5.2.4
+	-- If the attribute-value is empty or if the first character of the
+	-- attribute-value is not %x2F ("/")
+	local path = params.path or ""
+	if path:sub(1, 1) ~= "/" then
+		-- Let cookie-path be the default-path.
+		local default_path
+		-- RFC 6265 Section 5.1.4
+		-- Let uri-path be the path portion of the request-uri if such a
+		-- portion exists (and empty otherwise).  For example, if the
+		-- request-uri contains just a path (and optional query string),
+		-- then the uri-path is that path (without the %x3F ("?") character
+		-- or query string), and if the request-uri contains a full
+		-- absoluteURI, the uri-path is the path component of that URI.
+
+		-- If the uri-path is empty or if the first character of the uri-
+		-- path is not a %x2F ("/") character, output %x2F ("/") and skip
+		-- the remaining steps.
+		-- If the uri-path contains no more than one %x2F ("/") character,
+		-- output %x2F ("/") and skip the remaining step.
+		if req_path:sub(1, 1) ~= "/" or not req_path:find("/", 2, true) then
+			default_path = "/"
+		else
+			-- Output the characters of the uri-path from the first character up
+			-- to, but not including, the right-most %x2F ("/").
+			default_path = req_path:match("^([^?]*)/")
+		end
+		cookie.path = default_path
+	else
+		cookie.path = path
+	end
+
+	-- If the scheme component of the request-uri does not denote a
+	-- "secure" protocol (as defined by the user agent), and the
+	-- cookie's secure-only-flag is true, then abort these steps and
+	-- ignore the cookie entirely.
+	if not req_is_secure and cookie.secure_only then
+		return false
+	end
+
+	-- If the cookie was received from a "non-HTTP" API and the
+	-- cookie's http-only-flag is set, abort these steps and ignore the
+	-- cookie entirely.
+	if not req_is_http and cookie.http_only then
+		return false
+	end
+
+	-- If the cookie's secure-only-flag is not set, and the scheme
+	-- component of request-uri does not denote a "secure" protocol,
+	if not req_is_secure and not cookie.secure_only then
+		-- then abort these steps and ignore the cookie entirely if the
+		-- cookie store contains one or more cookies that meet all of the
+		-- following criteria:
+		for d, domain_cookies in pairs(self.domains) do
+			-- See '3' below
+			if domain_match(cookie.domain, d) or domain_match(d, cookie.domain) then
+				for p, path_cookies in pairs(domain_cookies) do
+					local cmp_cookie = path_cookies[name]
+					-- 1. Their name matches the name of the newly-created cookie.
+					if cmp_cookie
+						-- 2. Their secure-only-flag is true.
+						and cmp_cookie.secure_only
+						-- 3. Their domain domain-matches the domain of the newly-created
+						-- cookie, or vice-versa.
+						-- Note: already checked above in domain_match
+						-- 4. The path of the newly-created cookie path-matches the path
+						-- of the existing cookie.
+						and path_match(p, cookie.path)
+					then
+						return false
+					end
+				end
+			end
+		end
+	end
+
+	-- If the cookie-attribute-list contains an attribute with an
+	-- attribute-name of "SameSite", set the cookie's same-site-flag to
+	-- attribute-value (i.e. either "Strict" or "Lax").  Otherwise, set
+	-- the cookie's same-site-flag to "None".
+	local same_site = params.samesite
+	if same_site then
+		same_site = same_site:lower()
+		if same_site == "lax" or same_site == "strict" then
+			-- If the cookie's "same-site-flag" is not "None", and the cookie
+			-- is being set from a context whose "site for cookies" is not an
+			-- exact match for request-uri's host's registered domain, then
+			-- abort these steps and ignore the newly created cookie entirely.
+			if req_domain ~= req_site_for_cookies then
+				return false
+			end
+
+			cookie.same_site = same_site
+		end
+	end
+
+	-- If the cookie-name begins with a case-sensitive match for the
+	-- string "__Secure-", abort these steps and ignore the cookie
+	-- entirely unless the cookie's secure-only-flag is true.
+	if not cookie.secure_only and name:sub(1, 9) == "__Secure-" then
+		return false
+	end
+
+	-- If the cookie-name begins with a case-sensitive match for the
+	-- string "__Host-", abort these steps and ignore the cookie
+	-- entirely unless the cookie meets all the following criteria:
+	-- 1.  The cookie's secure-only-flag is true.
+	-- 2.  The cookie's host-only-flag is true.
+	-- 3.  The cookie-attribute-list contains an attribute with an
+	--     attribute-name of "Path", and the cookie's path is "/".
+	if not (cookie.secure_only and cookie.host_only and cookie.path == "/") and name:sub(1, 7) == "__Host-" then
+		return false
+	end
+
+	return add_to_store(self, cookie, req_is_http, now)
+end
+
+function store_methods:store_from_request(req_headers, resp_headers, req_host, req_site_for_cookies)
+	local set_cookies = resp_headers:get_as_sequence("set-cookie")
+	local n = set_cookies.n
+	if n == 0 then
+		return true
+	end
+
+	local req_scheme = req_headers:get(":scheme")
+	local req_authority = req_headers:get(":authority")
+	local req_domain
+	if req_authority then
+		req_domain = http_util.split_authority(req_authority, req_scheme)
+	else -- :authority can be missing for HTTP/1.0 requests; fall back to req_host
+		req_domain = req_host
+	end
+	local req_path = req_headers:get(":path")
+	local req_is_secure = req_scheme == "https"
+
+	for i=1, n do
+		local name, value, params = parse_setcookie(set_cookies[i])
+		if name then
+			self:store(req_domain, req_path, true, req_is_secure, req_site_for_cookies, name, value, params)
+		end
+	end
+	return true
+end
+
+function store_methods:get(domain, path, name)
+	assert(type(domain) == "string")
+	assert(type(path) == "string")
+	assert(type(name) == "string")
+
+	-- Clean now so that we can assume there are no expired cookies in store
+	self:clean()
+
+	local domain_cookies = self.domains[domain]
+	if domain_cookies then
+		local path_cookies = domain_cookies[path]
+		if path_cookies then
+			local cookie = path_cookies[name]
+			if cookie then
+				return cookie.value
+			end
+		end
+	end
+	return nil
+end
+
+function store_methods:remove(domain, path, name)
+	assert(type(domain) == "string")
+	assert(type(path) == "string" or (path == nil and name == nil))
+	assert(type(name) == "string" or name == nil)
+	local domain_cookies = self.domains[domain]
+	if not domain_cookies then
+		return
+	end
+	local n_cookies = self.n_cookies
+	if path == nil then
+		-- Delete whole domain
+		for _, path_cookies in pairs(domain_cookies) do
+			for _, cookie in pairs(path_cookies) do
+				self.expiry_heap:remove(cookie)
+				n_cookies = n_cookies - 1
+			end
+		end
+		self.domains[domain] = nil
+		self.n_cookies_per_domain[domain] = nil
+	else
+		local path_cookies = domain_cookies[path]
+		if path_cookies then
+			if name == nil then
+				-- Delete all names at path
+				local domains_deleted = 0
+				for _, cookie in pairs(path_cookies) do
+					self.expiry_heap:remove(cookie)
+					domains_deleted = domains_deleted + 1
+				end
+				domain_cookies[path] = nil
+				n_cookies = n_cookies - domains_deleted
+				if next(domain_cookies) == nil then
+					self.domains[domain] = nil
+					self.n_cookies_per_domain[domain] = nil
+				else
+					self.n_cookies_per_domain[domain] = self.n_cookies_per_domain[domain] - domains_deleted
+				end
+			else
+				-- Delete singular cookie
+				local cookie = path_cookies[name]
+				if cookie then
+					self.expiry_heap:remove(cookie)
+					n_cookies = n_cookies - 1
+					self.n_cookies_per_domain[domain] = self.n_cookies_per_domain[domain] - 1
+					path_cookies[name] = nil
+					if next(path_cookies) == nil then
+						domain_cookies[path] = nil
+						if next(domain_cookies) == nil then
+							self.domains[domain] = nil
+							self.n_cookies_per_domain[domain] = nil
+						end
+					end
+				end
+			end
+		end
+	end
+	self.n_cookies = n_cookies
+end
+
+--[[ The user agent SHOULD sort the cookie-list in the following order:
+  - Cookies with longer paths are listed before cookies with shorter paths.
+  - Among cookies that have equal-length path fields, cookies with earlier
+	creation-times are listed before cookies with later creation-times.
+]]
+local function cookie_cmp(a, b)
+	if #a.path ~= #b.path then
+		return #a.path > #b.path
+	end
+	if a.creation_time ~= b.creation_time then
+		return a.creation_time < b.creation_time
+	end
+	-- Now order doesn't matter, but have to be consistent for table.sort:
+	-- use the fields that make a cookie unique
+	if a.domain ~= b.domain then
+		return a.domain < b.domain
+	end
+	return a.name < b.name
+end
+
+local function cookie_match(cookie, req_domain, req_is_http, req_is_secure, req_is_safe_method, req_site_for_cookies, req_is_top_level)
+	-- req_domain should be already canonicalized
+
+	if cookie.host_only then -- Either:
+		-- The cookie's host-only-flag is true and the canonicalized
+		-- request-host is identical to the cookie's domain.
+		if cookie.domain ~= req_domain then
+			return false
+		end
+	end
+	-- Or:
+	-- The cookie's host-only-flag is false and the canonicalized
+	-- request-host domain-matches the cookie's domain.
+
+	-- already done domain_match and path_match
+
+	-- If the cookie's http-only-flag is true, then exclude the
+	-- cookie if the cookie-string is being generated for a "non-
+	-- HTTP" API (as defined by the user agent).
+	if cookie.http_only and not req_is_http then
+		return false
+	end
+
+	if cookie.secure_only and not req_is_secure then
+		return false
+	end
+
+	-- If the cookie's same-site-flag is not "None", and the HTTP
+	-- request is cross-site (as defined in Section 5.2) then exclude
+	-- the cookie unless all of the following statements hold:
+	if cookie.same_site and req_site_for_cookies ~= req_domain and not (
+		-- 1. The same-site-flag is "Lax"
+		cookie.same_site == "lax"
+		-- 2. The HTTP request's method is "safe".
+		and req_is_safe_method
+		-- 3. The HTTP request's target browsing context is a top-level browsing context.
+		and req_is_top_level
+	) then
+		return false
+	end
+
+	return true
+end
+
+function store_methods:lookup(req_domain, req_path, req_is_http, req_is_secure, req_is_safe_method, req_site_for_cookies, req_is_top_level, max_cookie_length)
+	req_domain = assert(type(req_domain) == "string" and canonicalise_host(req_domain), "invalid request domain")
+	assert(type(req_path) == "string")
+	if max_cookie_length ~= nil then
+		assert(type(max_cookie_length) == "number")
+	else
+		max_cookie_length = self.max_cookie_length
+	end
+
+	local now = self.time()
+
+	-- Clean now so that we can assume there are no expired cookies in store
+	self:clean()
+
+	local list = {}
+	local n = 0
+	for domain, domain_cookies in pairs(self.domains) do
+		if domain_match(domain, req_domain) then
+			for path, path_cookies in pairs(domain_cookies) do
+				if path_match(path, req_path) then
+					for _, cookie in pairs(path_cookies) do
+						if cookie_match(cookie, req_domain, req_is_http, req_is_secure, req_is_safe_method, req_site_for_cookies, req_is_top_level) then
+							cookie.last_access_time = now
+							n = n + 1
+							list[n] = cookie
+						end
+					end
+				end
+			end
+		end
+	end
+	table.sort(list, cookie_cmp)
+	local cookie_length = -2 -- length of separator ("; ")
+	for i=1, n do
+		local cookie = list[i]
+		-- TODO: validate?
+		local cookie_pair = cookie.name .. "=" .. cookie.value
+		local new_length = cookie_length + #cookie_pair + 2
+		if new_length > max_cookie_length then
+			break
+		end
+		list[i] = cookie_pair
+		cookie_length = new_length
+	end
+	return table.concat(list, "; ", 1, n)
+end
+
+function store_methods:lookup_for_request(req_headers, req_host, req_site_for_cookies, req_is_top_level, max_cookie_length)
+	local req_method = req_headers:get(":method")
+	if req_method == "CONNECT" then
+		return ""
+	end
+	local req_scheme = req_headers:get(":scheme")
+	local req_authority = req_headers:get(":authority")
+	local req_domain
+	if req_authority then
+		req_domain = http_util.split_authority(req_authority, req_scheme)
+	else -- :authority can be missing for HTTP/1.0 requests; fall back to req_host
+		req_domain = req_host
+	end
+	local req_path = req_headers:get(":path")
+	local req_is_secure = req_scheme == "https"
+	local req_is_safe_method = http_util.is_safe_method(req_method)
+	return self:lookup(req_domain, req_path, true, req_is_secure, req_is_safe_method, req_site_for_cookies, req_is_top_level, max_cookie_length)
+end
+
+function store_methods:clean_due()
+	local next_expiring = self.expiry_heap:peek()
+	if not next_expiring then
+		return (1e999)
+	end
+	return next_expiring.expiry_time
+end
+
+function store_methods:clean()
+	local now = self.time()
+	while self:clean_due() < now do
+		local cookie = self.expiry_heap:pop()
+		self.n_cookies = self.n_cookies - 1
+		local domain = cookie.domain
+		local domain_cookies = self.domains[domain]
+		if domain_cookies then
+			self.n_cookies_per_domain[domain] = self.n_cookies_per_domain[domain] - 1
+			local path_cookies = domain_cookies[cookie.path]
+			if path_cookies then
+				path_cookies[cookie.name] = nil
+				if next(path_cookies) == nil then
+					domain_cookies[cookie.path] = nil
+					if next(domain_cookies) == nil then
+						self.domains[domain] = nil
+						self.n_cookies_per_domain[domain] = nil
+					end
+				end
+			end
+		end
+	end
+	return true
+end
+
+-- Files in 'netscape format'
+-- curl's lib/cookie.c is best reference for the format
+local function parse_netscape_format(line, now)
+	if line == "" then
+		return
+	end
+	local i = 1
+	local http_only = false
+	if line:sub(1, 1) == "#" then
+		if line:sub(1, 10) == "#HttpOnly_" then
+			http_only = true
+			i = 11
+		else
+			return
+		end
+	end
+
+	local domain, host_only, path, secure_only, expiry, name, value =
+		line:match("^%.?([^\t]+)\t([^\t]+)\t([^\t]+)\t([^\t]+)\t(%d+)\t([^\t]+)\t(.+)", i)
+	if not domain then
+		return
+	end
+	domain = canonicalise_host(domain)
+	if domain == nil then
+		return
+	end
+
+	if host_only == "TRUE" then
+		host_only = true
+	elseif host_only == "FALSE" then
+		host_only = false
+	else
+		return
+	end
+
+	if secure_only == "TRUE" then
+		secure_only = true
+	elseif secure_only == "FALSE" then
+		secure_only = false
+	else
+		return
+	end
+
+	expiry = tonumber(expiry, 10)
+
+	return setmetatable({
+		name = name;
+		value = value;
+		expiry_time = expiry;
+		domain = domain;
+		path = path;
+		creation_time = now;
+		last_access_time = now;
+		persistent = expiry == 0;
+		host_only = host_only;
+		secure_only = secure_only;
+		http_only = http_only;
+		same_site = nil;
+	}, cookie_mt)
+end
+
+function store_methods:load_from_file(file)
+	local now = self.time()
+
+	-- Clean now so that we don't hit storage limits
+	self:clean()
+
+	local cookies = {}
+	local n = 0
+	while true do
+		local line, err, errno = file:read()
+		if not line then
+			if err ~= nil then
+				return nil, err, errno
+			end
+			break
+		end
+		local cookie = parse_netscape_format(line, now)
+		if cookie then
+			n = n + 1
+			cookies[n] = cookie
+		end
+	end
+	for i=1, n do
+		local cookie = cookies[i]
+		add_to_store(self, cookie, cookie.http_only, now)
+	end
+	return true
+end
+
+function store_methods:save_to_file(file)
+	do -- write a preamble
+		local ok, err, errno = file:write [[
+# Netscape HTTP Cookie File
+# This file was generated by lua-http
+
+]]
+		if not ok then
+			return nil, err, errno
+		end
+	end
+	for _, domain_cookies in pairs(self.domains) do
+		for _, path_cookies in pairs(domain_cookies) do
+			for _, cookie in pairs(path_cookies) do
+				local ok, err, errno = file:write(cookie:netscape_format())
+				if not ok then
+					return nil, err, errno
+				end
+			end
+		end
+	end
+	return true
+end
+
+return {
+	bake = bake;
+
+	parse_cookie = parse_cookie;
+	parse_cookies = parse_cookies;
+	parse_setcookie = parse_setcookie;
+
+	new_store = new_store;
+	store_mt = store_mt;
+	store_methods = store_methods;
+}
diff --git a/http/cookie.tld b/http/cookie.tld
new file mode 100644
index 0000000..55eaf24
--- /dev/null
+++ b/http/cookie.tld
@@ -0,0 +1,28 @@
+require "http.headers"
+
+bake: (string, string, number?, string?, string?, true?, true?, string?) -> (string)
+
+parse_cookie: (string) -> ({string:string})
+parse_cookies: (headers) -> ({{string:string}})
+parse_setcookie: (string) -> (string, string, {string:string})
+
+interface cookie_store
+	psl: any|false -- TODO: use psl type
+	time: () -> (number)
+	max_cookie_length: number
+	max_cookies: number
+	max_cookies_per_domain: number
+
+    const store: (self, string, string, boolean, boolean, string?, string, string, {string:string}) -> (boolean)
+    const store_from_request: (self, headers, headers, string, string?) -> (boolean)
+    const get: (self, string, string, string) -> (string)
+    const remove: (self, string, string?, string?) -> ()
+    const lookup: (self, string, string, boolean?, boolean?, boolean?, string?, boolean?, integer?) -> ()
+    const lookup_for_request: (self, headers, string, string?, boolean?, integer?) -> ()
+    const clean_due: (self) -> (number)
+    const clean: (self) -> (boolean)
+    const load_from_file: (self, file) -> (true) | (nil, string, integer)
+    const save_to_file: (self, file) -> (true) | (nil, string, integer)
+end
+
+new_store: () -> (cookie_store)
diff --git a/http/h1_connection.lua b/http/h1_connection.lua
index 1dd5def..c5c0a64 100644
--- a/http/h1_connection.lua
+++ b/http/h1_connection.lua
@@ -50,11 +50,20 @@ local function new_connection(socket, conn_type, version)
 		-- A function that will be called if the connection becomes idle
 		onidle_ = nil;
 	}, connection_mt)
+	socket:setvbuf("full", math.huge) -- 'infinite' buffering; no write locks needed
 	socket:setmode("b", "bf")
 	socket:onerror(onerror)
 	return self
 end
 
+function connection_methods:setmaxline(read_length)
+	if self.socket == nil then
+		return nil
+	end
+	self.socket:setmaxline(read_length)
+	return true
+end
+
 function connection_methods:clearerr(...)
 	if self.socket == nil then
 		return nil
@@ -109,25 +118,30 @@ end
 -- this function *should never throw*
 function connection_methods:get_next_incoming_stream(timeout)
 	assert(self.type == "server")
-	local deadline = timeout and (monotime()+timeout)
 	-- Make sure we don't try and read before the previous request has been fully read
-	repeat
-		-- Wait until previous requests have been fully read
-		if self.req_locked then
-			if not self.req_cond:wait(deadline and deadline - monotime()) then
-				return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT
-			end
-			assert(self.req_locked == nil)
-		end
-		if self.socket == nil then
-			return nil
+	if self.req_locked then
+		local deadline = timeout and monotime()+timeout
+		assert(cqueues.running(), "cannot wait for condition if not within a cqueues coroutine")
+		if cqueues.poll(self.req_cond, timeout) == timeout then
+			return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT
 		end
-		-- Wait for at least one byte
-		local ok, err, errno = self.socket:fill(1, deadline and deadline-monotime())
-		if not ok then
-			return nil, err, errno
+		timeout = deadline and deadline-monotime()
+		assert(self.req_locked == nil)
+	end
+	if self.socket == nil then
+		return nil
+	end
+	-- Wait for at least one byte
+	local ok, err, errno = self.socket:fill(1, 0)
+	if not ok then
+		if errno == ce.ETIMEDOUT then
+			local deadline = timeout and monotime()+timeout
+			if cqueues.poll(self.socket, timeout) ~= timeout then
+				return self:get_next_incoming_stream(deadline and deadline-monotime())
+			end
 		end
-	until not self.req_locked
+		return nil, err, errno
+	end
 	local stream = h1_stream.new(self)
 	self.pipeline:push(stream)
 	self.req_locked = stream
@@ -145,19 +159,15 @@ function connection_methods:read_request_line(timeout)
 		line, err, errno = self.socket:xread("*L", deadline and (deadline-monotime()))
 	end
 	if line == nil then
-		if err == nil and self.socket:pending() > 0 then
-			self.socket:seterror("r", ce.EILSEQ)
-			if preline then
-				local ok, errno2 = self.socket:unget(preline)
-				if not ok then
-					return nil, onerror(self.socket, "unget", errno2)
-				end
+		if preline then
+			local ok, errno2 = self.socket:unget(preline)
+			if not ok then
+				return nil, onerror(self.socket, "unget", errno2)
 			end
-			return nil, onerror(self.socket, "read_request_line", ce.EILSEQ)
 		end
 		return nil, err, errno
 	end
-	local method, path, httpversion = line:match("^(%w+) (%S+) HTTP/(1%.[01])\r\n$")
+	local method, target, httpversion = line:match("^(%w+) (%S+) HTTP/(1%.[01])\r\n$")
 	if not method then
 		self.socket:seterror("r", ce.EILSEQ)
 		local ok, errno2 = self.socket:unget(line)
@@ -173,16 +183,12 @@ function connection_methods:read_request_line(timeout)
 		return nil, onerror(self.socket, "read_request_line", ce.EILSEQ)
 	end
 	httpversion = httpversion == "1.0" and 1.0 or 1.1 -- Avoid tonumber() due to locale issues
-	return method, path, httpversion
+	return method, target, httpversion
 end
 
 function connection_methods:read_status_line(timeout)
 	local line, err, errno = self.socket:xread("*L", timeout)
 	if line == nil then
-		if err == nil and self.socket:pending() > 0 then
-			self.socket:seterror("r", ce.EILSEQ)
-			return nil, onerror(self.socket, "read_status_line", ce.EILSEQ)
-		end
 		return nil, err, errno
 	end
 	local httpversion, status_code, reason_phrase = line:match("^HTTP/(1%.[01]) (%d%d%d) (.*)\r\n$")
@@ -274,10 +280,6 @@ function connection_methods:read_body_chunk(timeout)
 	local deadline = timeout and (monotime()+timeout)
 	local chunk_header, err, errno = self.socket:xread("*L", timeout)
 	if chunk_header == nil then
-		if err == nil and self.socket:pending() > 0 then
-			self.socket:seterror("r", ce.EILSEQ)
-			return nil, onerror(self.socket, "read_body_chunk", ce.EILSEQ)
-		end
 		return nil, err, errno
 	end
 	local chunk_size, chunk_ext = chunk_header:match("^(%x+) *(.-)\r\n")
@@ -300,12 +302,22 @@ function connection_methods:read_body_chunk(timeout)
 		-- you MUST read trailers after this!
 		return false, chunk_ext
 	else
-		local ok, err2, errno2 = self.socket:fill(chunk_size+2, deadline and deadline-monotime())
+		local ok, err2, errno2 = self.socket:fill(chunk_size+2, 0)
 		if not ok then
 			local unget_ok1, unget_errno1 = self.socket:unget(chunk_header)
 			if not unget_ok1 then
 				return nil, onerror(self.socket, "unget", unget_errno1)
 			end
+			if errno2 == ce.ETIMEDOUT then
+				timeout = deadline and deadline-monotime()
+				if cqueues.poll(self.socket, timeout) ~= timeout then
+					-- retry
+					return self:read_body_chunk(deadline and deadline-monotime())
+				end
+			elseif err2 == nil then
+				self.socket:seterror("r", ce.EILSEQ)
+				return nil, onerror(self.socket, "read_body_chunk", ce.EILSEQ)
+			end
 			return nil, err2, errno2
 		end
 		-- if `fill` succeeded these shouldn't be able to fail
@@ -332,11 +344,11 @@ function connection_methods:read_body_chunk(timeout)
 	end
 end
 
-function connection_methods:write_request_line(method, path, httpversion, timeout)
+function connection_methods:write_request_line(method, target, httpversion, timeout)
 	assert(method:match("^[^ \r\n]+$"))
-	assert(path:match("^[^ \r\n]+$"))
+	assert(target:match("^[^ \r\n]+$"))
 	assert(httpversion == 1.0 or httpversion == 1.1)
-	local line = string.format("%s %s HTTP/%1.1f\r\n", method, path, httpversion)
+	local line = string.format("%s %s HTTP/%s\r\n", method, target, httpversion == 1.0 and "1.0" or "1.1")
 	local ok, err, errno = self.socket:xwrite(line, "f", timeout)
 	if not ok then
 		return nil, err, errno
@@ -348,7 +360,7 @@ function connection_methods:write_status_line(httpversion, status_code, reason_p
 	assert(httpversion == 1.0 or httpversion == 1.1)
 	assert(status_code:match("^[1-9]%d%d$"), "invalid status code")
 	assert(type(reason_phrase) == "string" and reason_phrase:match("^[^\r\n]*$"), "invalid reason phrase")
-	local line = string.format("HTTP/%1.1f %s %s\r\n", httpversion, status_code, reason_phrase)
+	local line = string.format("HTTP/%s %s %s\r\n", httpversion == 1.0 and "1.0" or "1.1", status_code, reason_phrase)
 	local ok, err, errno = self.socket:xwrite(line, "f", timeout)
 	if not ok then
 		return nil, err, errno
diff --git a/http/h1_reason_phrases.lua b/http/h1_reason_phrases.lua
index bb19fff..5ea5e8c 100644
--- a/http/h1_reason_phrases.lua
+++ b/http/h1_reason_phrases.lua
@@ -5,6 +5,7 @@ local reason_phrases = setmetatable({
 	["100"] = "Continue";
 	["101"] = "Switching Protocols";
 	["102"] = "Processing";
+	["103"] = "Early Hints";
 
 	["200"] = "OK";
 	["201"] = "Created";
diff --git a/http/h1_reason_phrases.tld b/http/h1_reason_phrases.tld
new file mode 100644
index 0000000..ee58405
--- /dev/null
+++ b/http/h1_reason_phrases.tld
@@ -0,0 +1 @@
+reason_phrases: {string:string}
diff --git a/http/h1_stream.lua b/http/h1_stream.lua
index 4e824ca..b2469a1 100644
--- a/http/h1_stream.lua
+++ b/http/h1_stream.lua
@@ -46,6 +46,7 @@ end
 
 local stream_methods = {
 	use_zlib = has_zlib;
+	max_header_lines = 100;
 }
 for k,v in pairs(stream_common.methods) do
 	stream_methods[k] = v
@@ -73,9 +74,12 @@ local function new_stream(connection)
 
 		req_method = nil; -- string
 		peer_version = nil; -- 1.0 or 1.1
+		has_main_headers = false;
+		headers_in_progress = nil;
 		headers_fifo = new_fifo();
 		headers_cond = cc.new();
-		body_buffer = nil;
+		chunk_fifo = new_fifo();
+		chunk_cond = cc.new();
 		body_write_type = nil; -- "closed", "chunked", "length" or "missing"
 		body_write_left = nil; -- integer: only set when body_write_type == "length"
 		body_write_deflate_encoding = nil;
@@ -160,48 +164,87 @@ bad_request_headers:append(":status", "400")
 local server_error_headers = new_headers()
 server_error_headers:append(":status", "503")
 function stream_methods:shutdown()
-	if self.type == "server" and (self.state == "open" or self.state == "half closed (remote)") then
-		-- Make sure we're at the front of the pipeline
-		if self.connection.pipeline:peek() ~= self then
-			self.pipeline_cond:wait() -- wait without a timeout should never fail
-			assert(self.connection.pipeline:peek() == self)
-		end
-		if not self.body_write_type then
-			-- Can send an automatic error response
-			local error_headers
-			if self.connection:error("r") == ce.EILSEQ then
-				error_headers = bad_request_headers
-			else
-				error_headers = server_error_headers
+	if self.state == "idle" then
+		self:set_state("closed")
+	else
+		if self.type == "server" and (self.state == "open" or self.state == "half closed (remote)") then
+			-- Make sure we're at the front of the pipeline
+			if self.connection.pipeline:peek() ~= self then
+				-- FIXME: shouldn't have time-taking operation here
+				self.pipeline_cond:wait() -- wait without a timeout should never fail
+				assert(self.connection.pipeline:peek() == self)
+			end
+			if not self.body_write_type then
+				-- Can send an automatic error response
+				local error_headers
+				if self.connection:error("r") == ce.EILSEQ then
+					error_headers = bad_request_headers
+				else
+					error_headers = server_error_headers
+				end
+				self:write_headers(error_headers, true, 0)
 			end
-			self:write_headers(error_headers, true)
 		end
-	end
-	if self.state == "half closed (local)" then
-		-- we'd like to finishing reading any remaining response so that we get out of the way
+		-- read any remaining available response and get out of the way
 		local start = self.stats_recv
-		repeat
-			-- don't bother continuing if we're reading until connection is closed
-			if self.body_read_type == "close" then
+		while (self.state == "open" or self.state == "half closed (local)") and (self.stats_recv - start) < clean_shutdown_limit do
+			if not self:step(0) then
 				break
 			end
-			if self:get_next_chunk(0) == nil then
-				break -- ignore errors
+		end
+
+		if self.state ~= "closed" then
+			-- This is a bad situation: we are trying to shutdown a connection that has the body partially sent
+			-- Especially in the case of Connection: close, where closing indicates EOF,
+			-- this will result in a client only getting a partial response.
+			-- Could also end up here if a client sending headers fails.
+			if self.connection.socket then
+				self.connection.socket:shutdown()
 			end
-		until (self.stats_recv - start) >= clean_shutdown_limit
-		-- state may still be "half closed (local)" (but hopefully moved on to "closed")
+			self:set_state("closed")
+		end
 	end
-	if self.state == "idle" then
-		self:set_state("closed")
-	elseif self.state ~= "closed" then
-		-- This is a bad situation: we are trying to shutdown a connection that has the body partially sent
-		-- Especially in the case of Connection: close, where closing indicates EOF,
-		-- this will result in a client only getting a partial response.
-		-- Could also end up here if a client sending headers fails.
-		if self.connection.socket then
-			self.connection.socket:shutdown()
+	return true
+end
+
+function stream_methods:step(timeout)
+	if self.state == "open" or self.state == "half closed (local)" or (self.state == "idle" and self.type == "server") then
+		if self.connection.socket == nil then
+			return nil, ce.strerror(ce.EPIPE), ce.EPIPE
+		end
+		if not self.has_main_headers then
+			local headers, err, errno = self:read_headers(timeout)
+			if headers == nil then
+				return nil, err, errno
+			end
+			self.headers_fifo:push(headers)
+			self.headers_cond:signal(1)
+			return true
+		end
+		if self.body_read_left ~= 0 then
+			local chunk, err, errno = self:read_next_chunk(timeout)
+			if chunk == nil then
+				if err == nil then
+					return true
+				end
+				return nil, err, errno
+			end
+			self.chunk_fifo:push(chunk)
+			self.chunk_cond:signal()
+			return true
+		end
+		if self.body_read_type == "chunked" then
+			local trailers, err, errno = self:read_headers(timeout)
+			if trailers == nil then
+				return nil, err, errno
+			end
+			self.headers_fifo:push(trailers)
+			self.headers_cond:signal(1)
+			return true
 		end
-		self:set_state("closed")
+	end
+	if self.state == "half closed (remote)" then
+		return nil, ce.strerror(ce.EIO), ce.EIO
 	end
 	return true
 end
@@ -214,55 +257,85 @@ function stream_methods:read_headers(timeout)
 	if self.state == "closed" or self.state == "half closed (remote)" then
 		return nil
 	end
-	local headers = new_headers()
 	local status_code
 	local is_trailers = self.body_read_type == "chunked"
-	if is_trailers then -- luacheck: ignore 542
-	elseif self.type == "server" then
-		if self.state == "half closed (local)" then
-			return nil
-		end
-		local method, path, httpversion =
-			self.connection:read_request_line(deadline and (deadline-monotime()))
-		if method == nil then
-			return nil, path, httpversion
-		end
-		self.req_method = method
-		self.peer_version = httpversion
-		headers:append(":method", method)
-		if method == "CONNECT" then
-			headers:append(":authority", path)
-		else
-			headers:append(":path", path)
-		end
-		headers:append(":scheme", self:checktls() and "https" or "http")
-		self:set_state("open")
-	else -- client
-		-- Make sure we're at front of connection pipeline
-		if self.connection.pipeline:peek() ~= self then
-			if not self.pipeline_cond:wait(deadline and (deadline-monotime)) then
-				return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT
+	local headers = self.headers_in_progress
+	if not headers then
+		if is_trailers then
+			headers = new_headers()
+		elseif self.type == "server" then
+			if self.state == "half closed (local)" then
+				return nil
 			end
-			assert(self.connection.pipeline:peek() == self)
-		end
-		local httpversion, reason_phrase
-		httpversion, status_code, reason_phrase =
-			self.connection:read_status_line(deadline and (deadline-monotime()))
-		if httpversion == nil then
-			if status_code == nil then
-				return nil, ce.strerror(ce.EPIPE), ce.EPIPE
+			local method, target, httpversion = self.connection:read_request_line(0)
+			if method == nil then
+				if httpversion == ce.ETIMEDOUT then
+					timeout = deadline and deadline-monotime()
+					if cqueues.poll(self.connection.socket, timeout) ~= timeout then
+						return self:read_headers(deadline and deadline-monotime())
+					end
+				end
+				return nil, target, httpversion
+			end
+			self.req_method = method
+			self.peer_version = httpversion
+			headers = new_headers()
+			headers:append(":method", method)
+			if method == "CONNECT" then
+				headers:append(":authority", target)
+			else
+				headers:append(":path", target)
+			end
+			headers:append(":scheme", self:checktls() and "https" or "http")
+			self:set_state("open")
+		else -- client
+			-- Make sure we're at front of connection pipeline
+			if self.connection.pipeline:peek() ~= self then
+				assert(cqueues.running(), "cannot wait for condition if not within a cqueues coroutine")
+				if cqueues.poll(self.pipeline_cond, timeout) == timeout then
+					return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT
+				end
+				assert(self.connection.pipeline:peek() == self)
+			end
+			local httpversion, reason_phrase
+			httpversion, status_code, reason_phrase = self.connection:read_status_line(0)
+			if httpversion == nil then
+				if reason_phrase == ce.ETIMEDOUT then
+					timeout = deadline and deadline-monotime()
+					if cqueues.poll(self.connection.socket, timeout) ~= timeout then
+						return self:read_headers(deadline and deadline-monotime())
+					end
+				elseif status_code == nil then
+					return nil, ce.strerror(ce.EPIPE), ce.EPIPE
+				end
+				return nil, status_code, reason_phrase
 			end
-			return nil, status_code, reason_phrase
+			self.peer_version = httpversion
+			headers = new_headers()
+			headers:append(":status", status_code)
+			-- reason phase intentionally does not exist in HTTP2; discard for consistency
+		end
+		self.headers_in_progress = headers
+	else
+		if not is_trailers and self.type == "client" then
+			status_code = headers:get(":status")
 		end
-		self.peer_version = httpversion
-		headers:append(":status", status_code)
-		-- reason phase intentionally does not exist in HTTP2; discard for consistency
 	end
+
 	-- Use while loop for lua 5.1 compatibility
 	while true do
-		local k, v, errno = self.connection:read_header(deadline and (deadline-monotime()))
+		if headers:len() >= self.max_header_lines then
+			return nil, ce.strerror(ce.E2BIG), ce.E2BIG
+		end
+		local k, v, errno = self.connection:read_header(0)
 		if k == nil then
 			if v ~= nil then
+				if errno == ce.ETIMEDOUT then
+					timeout = deadline and deadline-monotime()
+					if cqueues.poll(self.connection.socket, timeout) ~= timeout then
+						return self:read_headers(deadline and deadline-monotime())
+					end
+				end
 				return nil, v, errno
 			end
 			break -- Success: End of headers.
@@ -275,13 +348,20 @@ function stream_methods:read_headers(timeout)
 	end
 
 	do
-		local ok, err, errno = self.connection:read_headers_done(deadline and (deadline-monotime()))
+		local ok, err, errno = self.connection:read_headers_done(0)
 		if ok == nil then
-			if err == nil then
+			if errno == ce.ETIMEDOUT then
+				timeout = deadline and deadline-monotime()
+				if cqueues.poll(self.connection.socket, timeout) ~= timeout then
+					return self:read_headers(deadline and deadline-monotime())
+				end
+			elseif err == nil then
 				return nil, ce.strerror(ce.EPIPE), ce.EPIPE
 			end
 			return nil, err, errno
 		end
+		self.headers_in_progress = nil
+		self.has_main_headers = status_code == nil or status_code:sub(1,1) ~= "1" or status_code == "101"
 	end
 
 	do -- if client is sends `Connection: close`, server knows it can close at end of response
@@ -383,24 +463,17 @@ end
 function stream_methods:get_headers(timeout)
 	if self.headers_fifo:length() > 0 then
 		return self.headers_fifo:pop()
+	else
+		if self.state == "closed" or self.state == "half closed (remote)" then
+			return nil
+		end
+		local deadline = timeout and monotime()+timeout
+		local ok, err, errno = self:step(timeout)
+		if not ok then
+			return nil, err, errno
+		end
+		return self:get_headers(deadline and deadline-monotime())
 	end
-	if self.body_read_type == "chunked" then
-		-- wait for signal from trailers
-		-- XXX: what if nothing is reading body?
-		local deadline = timeout and monotime() + timeout
-		repeat
-			if self.state == "closed" or self.state == "half closed (remote)" then
-				return nil
-			end
-			if not self.headers_cond:wait(timeout) then
-				return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT
-			end
-			timeout = deadline and deadline-monotime()
-		until self.headers_fifo:length() > 0
-		return self.headers_fifo:pop()
-	end
-	-- TODO: locking?
-	return self:read_headers(timeout)
 end
 
 local ignore_fields = {
@@ -409,6 +482,7 @@ local ignore_fields = {
 	[":path"] = true;
 	[":scheme"] = true;
 	[":status"] = true;
+	[":protocol"] = true; -- from RFC 8441
 	-- fields written manually in :write_headers
 	["connection"] = true;
 	["content-length"] = true;
@@ -455,7 +529,7 @@ function stream_methods:write_headers(headers, end_stream, timeout)
 	if self.body_write_type == "chunked" then
 		-- we are writing trailers; close off body
 		is_trailers = true
-		local ok, err, errno = self.connection:write_body_last_chunk(nil, deadline and deadline-monotime())
+		local ok, err, errno = self.connection:write_body_last_chunk(nil, 0)
 		if not ok then
 			return nil, err, errno
 		end
@@ -471,7 +545,9 @@ function stream_methods:write_headers(headers, end_stream, timeout)
 		end
 		-- Make sure we're at the front of the pipeline
 		if self.connection.pipeline:peek() ~= self then
-			if not self.pipeline_cond:wait(deadline and (deadline-monotime)) then
+			assert(cqueues.running(), "cannot wait for condition if not within a cqueues coroutine")
+			headers = headers:clone() -- don't want user to edit it and send wrong headers
+			if cqueues.poll(self.pipeline_cond, timeout) == timeout then
 				return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT
 			end
 			assert(self.connection.pipeline:peek() == self)
@@ -480,7 +556,7 @@ function stream_methods:write_headers(headers, end_stream, timeout)
 			-- Should send status line
 			local reason_phrase = reason_phrases[status_code]
 			local version = math.min(self.connection.version, self.peer_version)
-			local ok, err, errno = self.connection:write_status_line(version, status_code, reason_phrase, deadline and deadline-monotime())
+			local ok, err, errno = self.connection:write_status_line(version, status_code, reason_phrase, 0)
 			if not ok then
 				return nil, err, errno
 			end
@@ -489,26 +565,28 @@ function stream_methods:write_headers(headers, end_stream, timeout)
 		if self.state == "idle" then
 			method = assert(headers:get(":method"), "missing method")
 			self.req_method = method
-			local path
+			local target
 			if method == "CONNECT" then
-				path = assert(headers:get(":authority"), "missing authority")
+				target = assert(headers:get(":authority"), "missing authority")
 				assert(not headers:has(":path"), "CONNECT requests should not have a path")
 			else
 				-- RFC 7230 Section 5.4: A client MUST send a Host header field in all HTTP/1.1 request messages.
 				assert(self.connection.version < 1.1 or headers:has(":authority"), "missing authority")
-				path = assert(headers:get(":path"), "missing path")
+				target = assert(headers:get(":path"), "missing path")
 			end
-			if self.req_locked then
-				-- Wait until previous responses have been fully written
-				if not self.connection.req_cond:wait(deadline and (deadline-monotime())) then
+			if self.connection.req_locked then
+				-- Wait until previous request has been fully written
+				assert(cqueues.running(), "cannot wait for condition if not within a cqueues coroutine")
+				headers = headers:clone() -- don't want user to edit it and send wrong headers
+				if cqueues.poll(self.connection.req_cond, timeout) == timeout then
 					return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT
 				end
-				assert(self.req_locked == nil)
+				assert(self.connection.req_locked == nil)
 			end
 			self.connection.pipeline:push(self)
 			self.connection.req_locked = self
 			-- write request line
-			local ok, err, errno = self.connection:write_request_line(method, path, self.connection.version, deadline and (deadline-monotime()))
+			local ok, err, errno = self.connection:write_request_line(method, target, self.connection.version, 0)
 			if not ok then
 				return nil, err, errno
 			end
@@ -532,7 +610,8 @@ function stream_methods:write_headers(headers, end_stream, timeout)
 		end
 	elseif self.type == "server" and status_code and status_code:sub(1, 1) == "1" then
 		assert(not end_stream, "cannot end stream directly after 1xx status code")
-		-- A server MUST NOT send a Content-Length header field in any response with a status code of 1xx (Informational) or 204 (No Content)
+		-- A server MUST NOT send a Content-Length header field in any response
+		-- with a status code of 1xx (Informational) or 204 (No Content)
 		if cl then
 			error("Content-Length not allowed in response with 1xx status code")
 		end
@@ -549,11 +628,13 @@ function stream_methods:write_headers(headers, end_stream, timeout)
 		end
 		if cl then
 			-- RFC 7230 Section 3.3.2:
-			-- A sender MUST NOT send a Content-Length header field in any message that contains a Transfer-Encoding header field.
+			-- A sender MUST NOT send a Content-Length header field in any
+			-- message that contains a Transfer-Encoding header field.
 			if transfer_encoding_header then
 				error("Content-Length not allowed in message with a transfer-encoding")
 			elseif self.type == "server" then
-				-- A server MUST NOT send a Content-Length header field in any response with a status code of 1xx (Informational) or 204 (No Content)
+				-- A server MUST NOT send a Content-Length header field in any response
+				-- with a status code of 1xx (Informational) or 204 (No Content)
 				if status_code == "204" then
 					error("Content-Length not allowed in response with 204 status code")
 				end
@@ -656,7 +737,7 @@ function stream_methods:write_headers(headers, end_stream, timeout)
 
 	for name, value in headers:each() do
 		if not ignore_fields[name] then
-			local ok, err, errno = self.connection:write_header(name, value, deadline and (deadline-monotime()))
+			local ok, err, errno = self.connection:write_header(name, value, 0)
 			if not ok then
 				return nil, err, errno
 			end
@@ -664,7 +745,7 @@ function stream_methods:write_headers(headers, end_stream, timeout)
 			-- for CONNECT requests, :authority is the path
 			if self.req_method ~= "CONNECT" then
 				-- otherwise it's the Host header
-				local ok, err, errno = self.connection:write_header("host", value, deadline and (deadline-monotime()))
+				local ok, err, errno = self.connection:write_header("host", value, 0)
 				if not ok then
 					return nil, err, errno
 				end
@@ -677,7 +758,7 @@ function stream_methods:write_headers(headers, end_stream, timeout)
 		if not has(connection_header, "te") then
 			table.insert(connection_header, "te")
 		end
-		local ok, err, errno = self.connection:write_header("te", "gzip, deflate", deadline and deadline-monotime())
+		local ok, err, errno = self.connection:write_header("te", "gzip, deflate", 0)
 		if not ok then
 			return nil, err, errno
 		end
@@ -699,19 +780,19 @@ function stream_methods:write_headers(headers, end_stream, timeout)
 			value[i] = table.concat(params, ";")
 		end
 		value = table.concat(value, ",")
-		local ok, err, errno = self.connection:write_header("transfer-encoding", value, deadline and (deadline-monotime()))
+		local ok, err, errno = self.connection:write_header("transfer-encoding", value, 0)
 		if not ok then
 			return nil, err, errno
 		end
 	elseif cl then
-		local ok, err, errno = self.connection:write_header("content-length", cl, deadline and (deadline-monotime()))
+		local ok, err, errno = self.connection:write_header("content-length", cl, 0)
 		if not ok then
 			return nil, err, errno
 		end
 	end
 	if connection_header and connection_header[1] then
 		local value = table.concat(connection_header, ",")
-		local ok, err, errno = self.connection:write_header("connection", value, deadline and (deadline-monotime()))
+		local ok, err, errno = self.connection:write_header("connection", value, 0)
 		if not ok then
 			return nil, err, errno
 		end
@@ -742,30 +823,28 @@ function stream_methods:write_headers(headers, end_stream, timeout)
 	return true
 end
 
-function stream_methods:get_next_chunk(timeout)
-	local chunk = self.body_buffer
-	if chunk then
-		self.body_buffer = nil
-		return chunk
-	end
+function stream_methods:read_next_chunk(timeout)
 	if self.state == "closed" or self.state == "half closed (remote)" then
 		return nil
 	end
 	local end_stream
-	local err, errno
+	local chunk, err, errno
 	if self.body_read_type == "chunked" then
 		local deadline = timeout and (monotime()+timeout)
-		chunk, err, errno = self.connection:read_body_chunk(timeout)
+		if self.body_read_left == 0 then
+			chunk = false
+		else
+			chunk, err, errno = self.connection:read_body_chunk(timeout)
+		end
 		if chunk == false then
-			-- read trailers
-			local trailers
-			trailers, err, errno = self:read_headers(deadline and (deadline-monotime()))
-			if not trailers then
+			-- last chunk, :read_headers should be called to get trailers
+			self.body_read_left = 0
+			-- for API compat: attempt to read trailers
+			local ok
+			ok, err, errno = self:step(deadline and deadline-monotime())
+			if not ok then
 				return nil, err, errno
 			end
-			self.headers_fifo:push(trailers)
-			self.headers_cond:signal(1)
-			-- :read_headers has already closed connection; return immediately
 			return nil
 		else
 			end_stream = false
@@ -823,13 +902,16 @@ function stream_methods:get_next_chunk(timeout)
 	return chunk, err, errno
 end
 
-function stream_methods:unget(str)
-	local chunk = self.body_buffer
-	if chunk then
-		self.body_buffer = str .. chunk
-	else
-		self.body_buffer = str
+function stream_methods:get_next_chunk(timeout)
+	if self.chunk_fifo:length() > 0 then
+		return self.chunk_fifo:pop()
 	end
+	return self:read_next_chunk(timeout)
+end
+
+function stream_methods:unget(str)
+	self.chunk_fifo:insert(1, str)
+	self.chunk_cond:signal()
 	return true
 end
 
diff --git a/http/h2_connection.lua b/http/h2_connection.lua
index 11704d3..7e9c31e 100644
--- a/http/h2_connection.lua
+++ b/http/h2_connection.lua
@@ -9,10 +9,11 @@ local connection_common = require "http.connection_common"
 local onerror = connection_common.onerror
 local h2_error = require "http.h2_error"
 local h2_stream = require "http.h2_stream"
+local known_settings = h2_stream.known_settings
 local hpack = require "http.hpack"
 local h2_banned_ciphers = require "http.tls".banned_ciphers
-local spack = string.pack or require "compat53.string".pack
-local sunpack = string.unpack or require "compat53.string".unpack
+local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143
+local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143
 
 local assert = assert
 if _VERSION:match("%d+%.?%d*") < "5.3" then
@@ -26,23 +27,23 @@ end
 local preface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
 
 local default_settings = {
-	[0x1] = 4096; -- HEADER_TABLE_SIZE
-	[0x2] = true; -- ENABLE_PUSH
-	[0x3] = math.huge; -- MAX_CONCURRENT_STREAMS
-	[0x4] = 65535; -- INITIAL_WINDOW_SIZE
-	[0x5] = 16384; -- MAX_FRAME_SIZE
-	[0x6] = math.huge;  -- MAX_HEADER_LIST_SIZE
+	[known_settings.HEADER_TABLE_SIZE] = 4096;
+	[known_settings.ENABLE_PUSH] = true;
+	[known_settings.MAX_CONCURRENT_STREAMS] = math.huge;
+	[known_settings.INITIAL_WINDOW_SIZE] = 65535;
+	[known_settings.MAX_FRAME_SIZE] = 16384;
+	[known_settings.MAX_HEADER_LIST_SIZE] = math.huge;
+	[known_settings.SETTINGS_ENABLE_CONNECT_PROTOCOL] = false;
+	[known_settings.TLS_RENEG_PERMITTED] = 0;
 }
 
-local function merge_settings(new, old)
-	return {
-		[0x1] = new[0x1] or old[0x1];
-		[0x2] = new[0x2] or old[0x2];
-		[0x3] = new[0x3] or old[0x3];
-		[0x4] = new[0x4] or old[0x4];
-		[0x5] = new[0x5] or old[0x5];
-		[0x6] = new[0x6] or old[0x6];
-	}
+local function merge_settings(tbl, new)
+	for i=0x1, 0x6 do
+		local v = new[i]
+		if v ~= nil then
+			tbl[i] = v
+		end
+	end
 end
 
 local connection_methods = {}
@@ -100,10 +101,6 @@ local function new_connection(socket, conn_type, settings)
 		error('invalid connection type. must be "client" or "server"')
 	end
 
-	socket:setvbuf("full", math.huge) -- 'infinite' buffering; no write locks needed
-	socket:setmode("b", "bf") -- full buffering for now; will be set to no buffering after settings sent
-	socket:onerror(onerror)
-
 	local ssl = socket:checktls()
 	if ssl then
 		local cipher = ssl:getCipherInfo()
@@ -128,6 +125,8 @@ local function new_connection(socket, conn_type, settings)
 
 		-- For continuations
 		need_continuation = nil; -- stream
+		promised_stream = nil; -- stream
+		recv_headers_end_stream = nil;
 		recv_headers_buffer = nil;
 		recv_headers_buffer_pos = nil;
 		recv_headers_buffer_pad_len = nil;
@@ -135,52 +134,49 @@ local function new_connection(socket, conn_type, settings)
 		recv_headers_buffer_length = nil;
 
 		highest_odd_stream = -1;
+		highest_odd_non_idle_stream = -1;
 		highest_even_stream = -2;
+		highest_even_non_idle_stream = -2;
 		send_goaway_lowest = nil;
 		recv_goaway_lowest = nil;
 		recv_goaway = cc.new();
 		new_streams = new_fifo();
 		new_streams_cond = cc.new();
-		peer_settings = default_settings;
+		peer_settings = {};
 		peer_settings_cond = cc.new(); -- signaled when the peer has changed their settings
-		acked_settings = default_settings;
+		acked_settings = {};
 		send_settings = {n = 0};
 		send_settings_ack_cond = cc.new(); -- for when server ACKs our settings
 		send_settings_acked = 0;
 		peer_flow_credits = 65535; -- 5.2.1
-		peer_flow_credits_increase = cc.new();
+		peer_flow_credits_change = cc.new();
 		encoding_context = nil;
 		decoding_context = nil;
 		pongs = {}; -- pending pings we've sent. keyed by opaque 8 byte payload
 	}, connection_mt)
 	self:new_stream(0)
-	self.encoding_context = hpack.new(default_settings[0x1])
-	self.decoding_context = hpack.new(default_settings[0x1])
+	merge_settings(self.peer_settings, default_settings)
+	merge_settings(self.acked_settings, default_settings)
+	self.encoding_context = hpack.new(default_settings[known_settings.HEADER_TABLE_SIZE])
+	self.decoding_context = hpack.new(default_settings[known_settings.HEADER_TABLE_SIZE])
 
+	socket:setvbuf("full", math.huge) -- 'infinite' buffering; no write locks needed
+	socket:setmode("b", "bna") -- writes that don't explicitly buffer will now flush the buffer. autoflush on
+	socket:onerror(onerror)
 	if self.type == "client" then
-		-- fully buffered write; will be flushed when sending settings
 		assert(socket:xwrite(preface, "f", 0))
 	end
-	assert(self.stream0:write_settings_frame(false, settings or {}, 0))
-	socket:setmode("b", "bna") -- writes that don't explicitly buffer will now flush the buffer. autoflush on
+	assert(self.stream0:write_settings_frame(false, settings or {}, 0, "f"))
 	-- note that the buffer is *not* flushed right now
 
 	return self
 end
 
-function connection_methods:pollfd()
-	return self.socket:pollfd()
-end
-
-function connection_methods:events()
-	return self.socket:events()
-end
-
 function connection_methods:timeout()
 	if not self.had_eagain then
 		return 0
 	end
-	return self.socket:timeout()
+	return connection_common.methods.timeout(self)
 end
 
 local function handle_frame(self, typ, flag, streamid, payload, deadline)
@@ -192,19 +188,38 @@ local function handle_frame(self, typ, flag, streamid, payload, deadline)
 	-- Implementations MUST ignore and discard any frame that has a type that is unknown.
 	if handler then
 		local stream = self.streams[streamid]
-		if stream == nil and (not self.recv_goaway_lowest or streamid < self.recv_goaway_lowest) then
+		if stream == nil then
 			if xor(streamid % 2 == 1, self.type == "client") then
 				return nil, h2_error.errors.PROTOCOL_ERROR:new_traceback("Streams initiated by a client MUST use odd-numbered stream identifiers; those initiated by the server MUST use even-numbered stream identifiers"), ce.EILSEQ
 			end
 			-- TODO: check MAX_CONCURRENT_STREAMS
 			stream = self:new_stream(streamid)
-			self.new_streams:push(stream)
-			self.new_streams_cond:signal(1)
+			--[[ http2 spec section 6.8
+			the sender will ignore frames sent on streams initiated by
+			the receiver if the stream has an identifier higher than the included
+			last stream identifier
+			...
+			After sending a GOAWAY frame, the sender can discard frames for
+			streams initiated by the receiver with identifiers higher than the
+			identified last stream.  However, any frames that alter connection
+			state cannot be completely ignored.  For instance, HEADERS,
+			PUSH_PROMISE, and CONTINUATION frames MUST be minimally processed to
+			ensure the state maintained for header compression is consistent (see
+			Section 4.3); similarly, DATA frames MUST be counted toward the
+			connection flow-control window.  Failure to process these frames can
+			cause flow control or header compression state to become
+			unsynchronized.]]
+			-- If we haven't seen this stream before, and we should be discarding frames from it,
+			-- then don't push it into the new_streams fifo
+			if self.send_goaway_lowest == nil or streamid <= self.send_goaway_lowest then
+				self.new_streams:push(stream)
+				self.new_streams_cond:signal(1)
+			end
 		end
 		local ok, err, errno = handler(stream, flag, payload, deadline)
 		if not ok then
 			if h2_error.is(err) and err.stream_error and streamid ~= 0 and stream.state ~= "idle" then
-				local ok2, err2, errno2 = stream:write_rst_stream(err.code, deadline and deadline-monotime())
+				local ok2, err2, errno2 = stream:rst_stream(err, deadline and deadline-monotime())
 				if not ok2 then
 					return nil, err2, errno2
 				end
@@ -220,8 +235,10 @@ function connection_methods:step(timeout)
 	local deadline = timeout and monotime()+timeout
 	if not self.has_confirmed_preface and self.type == "server" then
 		local ok, err, errno = socket_has_preface(self.socket, false, timeout)
+		self.had_eagain = false
 		if ok == nil then
 			if errno == ce.ETIMEDOUT then
+				self.had_eagain = true
 				return true
 			end
 			return nil, err, errno
@@ -300,40 +317,13 @@ function connection_methods:shutdown()
 end
 
 function connection_methods:new_stream(id)
-	if id then
-		assert(id % 1 == 0)
-	else
-		if self.recv_goaway_lowest then
-			h2_error.errors.PROTOCOL_ERROR("Receivers of a GOAWAY frame MUST NOT open additional streams on the connection")
-		end
-		if self.type == "client" then
-			-- Pick next free odd number
-			id = self.highest_odd_stream + 2
-		else
-			-- Pick next free odd number
-			id = self.highest_even_stream + 2
-		end
-		-- TODO: check MAX_CONCURRENT_STREAMS
+	if id and self.streams[id] ~= nil then
+		error("stream id already in use")
 	end
-	assert(self.streams[id] == nil, "stream id already in use")
-	assert(id < 2^32, "stream id too large")
-	if id % 2 == 0 then
-		if id > self.highest_even_stream then
-			self.highest_even_stream = id
-		end
-	else
-		if id > self.highest_odd_stream then
-			self.highest_odd_stream = id
-		end
-	end
-	local stream = h2_stream.new(self, id)
-	if id == 0 then
-		self.stream0 = stream
-	else
-		-- Add dependency on stream 0. http2 spec, 5.3.1
-		self.stream0:reprioritise(stream)
+	local stream = h2_stream.new(self)
+	if id then
+		stream:pick_id(id)
 	end
-	self.streams[id] = stream
 	return stream
 end
 
@@ -385,10 +375,14 @@ function connection_methods:read_http2_frame(timeout)
 		end
 	end
 	local size, typ, flags, streamid = sunpack(">I3 B B I4", frame_header)
-	if size > self.acked_settings[0x5] then
+	if size > self.acked_settings[known_settings.MAX_FRAME_SIZE] then
+		local ok, errno2 = self.socket:unget(frame_header)
+		if not ok then
+			return nil, onerror(self.socket, "unget", errno2, 2)
+		end
 		return nil, h2_error.errors.FRAME_SIZE_ERROR:new_traceback("frame too large"), ce.E2BIG
 	end
-	local payload, err2, errno2 = self.socket:xread(size, deadline and (deadline-monotime()))
+	local payload, err2, errno2 = self.socket:xread(size, 0)
 	self.had_eagain = false
 	if payload and #payload < size then -- hit EOF
 		local ok, errno4 = self.socket:unget(payload)
@@ -398,15 +392,18 @@ function connection_methods:read_http2_frame(timeout)
 		payload = nil
 	end
 	if payload == nil then
-		if errno2 == ce.ETIMEDOUT then
-			self.had_eagain = true
-		end
 		-- put frame header back into socket so a retry will work
 		local ok, errno3 = self.socket:unget(frame_header)
 		if not ok then
 			return nil, onerror(self.socket, "unget", errno3, 2)
 		end
-		if err2 == nil then
+		if errno2 == ce.ETIMEDOUT then
+			self.had_eagain = true
+			timeout = deadline and deadline-monotime()
+			if cqueues.poll(self.socket, timeout) ~= timeout then
+				return self:read_http2_frame(deadline and deadline-monotime())
+			end
+		elseif err2 == nil then
 			self.socket:seterror("r", ce.EILSEQ)
 			return nil, onerror(self.socket, "read_http2_frame", ce.EILSEQ)
 		end
@@ -420,17 +417,16 @@ end
 -- If this times out, it was the flushing; not the write itself
 -- hence it's not always total failure.
 -- It's up to the caller to take some action (e.g. closing) rather than doing it here
-function connection_methods:write_http2_frame(typ, flags, streamid, payload, timeout)
-	local deadline = timeout and monotime()+timeout
-	if #payload > self.peer_settings[0x5] then
+function connection_methods:write_http2_frame(typ, flags, streamid, payload, timeout, flush)
+	if #payload > self.peer_settings[known_settings.MAX_FRAME_SIZE] then
 		return nil, h2_error.errors.FRAME_SIZE_ERROR:new_traceback("frame too large"), ce.E2BIG
 	end
 	local header = spack(">I3 B B I4", #payload, typ, flags, streamid)
-	local ok, err, errno = self.socket:xwrite(header, "f", timeout)
+	local ok, err, errno = self.socket:xwrite(header, "f", 0)
 	if not ok then
 		return nil, err, errno
 	end
-	return self.socket:xwrite(payload, deadline and deadline-monotime())
+	return self.socket:xwrite(payload, flush, timeout)
 end
 
 function connection_methods:ping(timeout)
@@ -470,7 +466,33 @@ function connection_methods:write_goaway_frame(last_stream_id, err_code, debug_m
 end
 
 function connection_methods:set_peer_settings(peer_settings)
-	self.peer_settings = merge_settings(peer_settings, self.peer_settings)
+	--[[ 6.9.2:
+	In addition to changing the flow-control window for streams that are
+	not yet active, a SETTINGS frame can alter the initial flow-control
+	window size for streams with active flow-control windows (that is,
+	streams in the "open" or "half-closed (remote)" state).  When the
+	value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust
+	the size of all stream flow-control windows that it maintains by the
+	difference between the new value and the old value.
+
+	A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available
+	space in a flow-control window to become negative.  A sender MUST
+	track the negative flow-control window and MUST NOT send new flow-
+	controlled frames until it receives WINDOW_UPDATE frames that cause
+	the flow-control window to become positive.]]
+	local new_window_size = peer_settings[known_settings.INITIAL_WINDOW_SIZE]
+	if new_window_size then
+		local old_windows_size = self.peer_settings[known_settings.INITIAL_WINDOW_SIZE]
+		local delta = new_window_size - old_windows_size
+		if delta ~= 0 then
+			for _, stream in pairs(self.streams) do
+				stream.peer_flow_credits = stream.peer_flow_credits + delta
+				stream.peer_flow_credits_change:signal()
+			end
+		end
+	end
+
+	merge_settings(self.peer_settings, peer_settings)
 	self.peer_settings_cond:signal()
 end
 
@@ -480,7 +502,7 @@ function connection_methods:ack_settings()
 	local acked_settings = self.send_settings[n]
 	if acked_settings then
 		self.send_settings[n] = nil
-		self.acked_settings = merge_settings(acked_settings, self.acked_settings)
+		merge_settings(self.acked_settings, acked_settings)
 	end
 	self.send_settings_ack_cond:signal()
 end
diff --git a/http/h2_error.tld b/http/h2_error.tld
new file mode 100644
index 0000000..78883dc
--- /dev/null
+++ b/http/h2_error.tld
@@ -0,0 +1,15 @@
+interface h2_error
+	const new: (self, {
+		"name": string?,
+		"code": integer?,
+		"description": string?,
+		"message": string?,
+		"traceback": string?,
+		"stream_error": boolean?
+	}) -> (h2_error)
+	const new_traceback: (self, string, boolean, integer?) -> (h2_error)
+	const error:  (self, string, boolean, integer?) -> (void)
+end
+
+errors: {any:h2_error}
+is: (any) -> (boolean)
diff --git a/http/h2_stream.lua b/http/h2_stream.lua
index cd57f1c..fc75129 100644
--- a/http/h2_stream.lua
+++ b/http/h2_stream.lua
@@ -5,11 +5,12 @@ local ce = require "cqueues.errno"
 local new_fifo = require "fifo"
 local band = require "http.bit".band
 local bor = require "http.bit".bor
-local h2_errors = require "http.h2_error".errors
+local h2_error = require "http.h2_error"
+local h2_errors = h2_error.errors
 local stream_common = require "http.stream_common"
-local spack = string.pack or require "compat53.string".pack
-local sunpack = string.unpack or require "compat53.string".unpack
-local unpack = table.unpack or unpack -- luacheck: ignore 113
+local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143
+local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143
+local unpack = table.unpack or unpack -- luacheck: ignore 113 143
 
 local assert = assert
 if _VERSION:match("%d+%.?%d*") < "5.3" then
@@ -18,6 +19,39 @@ end
 
 local MAX_HEADER_BUFFER_SIZE = 400*1024 -- 400 KB is max size in h2o
 
+local known_settings = {}
+for i, s in pairs({
+	[0x1] = "HEADER_TABLE_SIZE";
+	[0x2] = "ENABLE_PUSH";
+	[0x3] = "MAX_CONCURRENT_STREAMS";
+	[0x4] = "INITIAL_WINDOW_SIZE";
+	[0x5] = "MAX_FRAME_SIZE";
+	[0x6] = "MAX_HEADER_LIST_SIZE";
+	[0x8] = "SETTINGS_ENABLE_CONNECT_PROTOCOL";
+	[0x10] = "TLS_RENEG_PERMITTED";
+}) do
+	known_settings[i] = s
+	known_settings[s] = i
+end
+
+local frame_types = {
+	[0x0] = "DATA";
+	[0x1] = "HEADERS";
+	[0x2] = "PRIORITY";
+	[0x3] = "RST_STREAM";
+	[0x4] = "SETTING";
+	[0x5] = "PUSH_PROMISE";
+	[0x6] = "PING";
+	[0x7] = "GOAWAY";
+	[0x8] = "WINDOW_UPDATE";
+	[0x9] = "CONTINUATION";
+	[0xa] = "ALTSVC";
+	[0xc] = "ORIGIN";
+}
+for i=0x0, 0x9 do
+	frame_types[frame_types[i]] = i
+end
+
 local frame_handlers = {}
 
 local stream_methods = {}
@@ -36,22 +70,21 @@ function stream_mt:__tostring()
 	end
 	table.sort(dependee_list)
 	dependee_list = table.concat(dependee_list, ",")
-	return string.format("http.h2_stream{connection=%s;id=%d;state=%q;parent=%s;dependees={%s}}",
-		tostring(self.connection), self.id, self.state,
+	return string.format("http.h2_stream{connection=%s;id=%s;state=%q;parent=%s;dependees={%s}}",
+		tostring(self.connection), tostring(self.id), self.state,
 		(self.parent and tostring(self.parent.id) or "nil"), dependee_list)
 end
 
-local function new_stream(connection, id)
-	assert(type(id) == "number" and id >= 0 and id <= 0x7fffffff, "invalid stream id")
+local function new_stream(connection)
 	local self = setmetatable({
 		connection = connection;
 		type = connection.type;
 
 		state = "idle";
 
-		id = id;
-		peer_flow_credits = id ~= 0 and connection.peer_settings[0x4];
-		peer_flow_credits_increase = cc.new();
+		id = nil;
+		peer_flow_credits = 0;
+		peer_flow_credits_change = cc.new();
 		parent = nil;
 		dependees = setmetatable({}, {__mode="kv"});
 		weight = 16; -- http2 spec, section 5.3.5
@@ -68,10 +101,64 @@ local function new_stream(connection, id)
 
 		chunk_fifo = new_fifo();
 		chunk_cond = cc.new();
+
+		end_stream_after_continuation = nil;
+		content_length = nil;
 	}, stream_mt)
 	return self
 end
 
+function stream_methods:pick_id(id)
+	assert(self.id == nil)
+	if id == nil then
+		if self.connection.recv_goaway_lowest then
+			h2_error.errors.PROTOCOL_ERROR("Receivers of a GOAWAY frame MUST NOT open additional streams on the connection")
+		end
+		if self.type == "client" then
+			-- Pick next free odd number
+			id = self.connection.highest_odd_stream + 2
+			self.connection.highest_odd_stream = id
+		else
+			-- Pick next free even number
+			id = self.connection.highest_even_stream + 2
+			self.connection.highest_even_stream = id
+		end
+		self.id = id
+	else
+		assert(type(id) == "number" and id >= 0 and id <= 0x7fffffff and id % 1 == 0, "invalid stream id")
+		assert(self.connection.streams[id] == nil)
+		self.id = id
+		if id % 2 == 0 then
+			if id > self.connection.highest_even_stream then
+				self.connection.highest_even_stream = id
+			end
+			-- stream 'already' existed but was possibly collected. see http2 spec 5.1.1
+			if id <= self.connection.highest_even_non_idle_stream then
+				self:set_state("closed")
+			end
+		else
+			if id > self.connection.highest_odd_stream then
+				self.connection.highest_odd_stream = id
+			end
+			-- stream 'already' existed but was possibly collected. see http2 spec 5.1.1
+			if id <= self.connection.highest_odd_non_idle_stream then
+				self:set_state("closed")
+			end
+		end
+	end
+	-- TODO: check MAX_CONCURRENT_STREAMS
+	self.connection.streams[id] = self
+	if id == 0 then
+		self.connection.stream0 = self
+	else
+		self.peer_flow_credits = self.connection.peer_settings[known_settings.INITIAL_WINDOW_SIZE]
+		self.peer_flow_credits_change:signal()
+		-- Add dependency on stream 0. http2 spec, 5.3.1
+		self.connection.stream0:reprioritise(self)
+	end
+	return true
+end
+
 local valid_states = {
 	["idle"] = 1; -- initial
 	["open"] = 2; -- have sent or received headers; haven't sent body yet
@@ -87,7 +174,25 @@ function stream_methods:set_state(new)
 	if new_order <= valid_states[old] then
 		error("invalid state progression ('"..old.."' to '"..new.."')")
 	end
+	if new ~= "closed" then
+		assert(self.id)
+	end
 	self.state = new
+	if new == "closed" or new == "half closed (remote)" then
+		self.recv_headers_cond:signal()
+		self.chunk_cond:signal()
+	end
+	if old == "idle" then
+		if self.id % 2 == 0 then
+			if self.id > self.connection.highest_even_non_idle_stream then
+				self.connection.highest_even_non_idle_stream = self.id
+			end
+		else
+			if self.id > self.connection.highest_odd_non_idle_stream then
+				self.connection.highest_odd_non_idle_stream = self.id
+			end
+		end
+	end
 	if old == "idle" and new ~= "closed" then
 		self.connection.n_active_streams = self.connection.n_active_streams + 1
 	elseif old ~= "idle" and new == "closed" then
@@ -99,12 +204,14 @@ function stream_methods:set_state(new)
 	end
 end
 
-function stream_methods:write_http2_frame(typ, flags, payload, timeout)
-	return self.connection:write_http2_frame(typ, flags, self.id, payload, timeout)
+function stream_methods:write_http2_frame(typ, flags, payload, timeout, flush)
+	local stream_id = assert(self.id, "stream has unset id")
+	return self.connection:write_http2_frame(typ, flags, stream_id, payload, timeout, flush)
 end
 
 function stream_methods:reprioritise(child, exclusive)
 	assert(child)
+	assert(child.id)
 	assert(child.id ~= 0) -- cannot reprioritise stream 0
 	if self == child then
 		-- http2 spec, section 5.3.1
@@ -149,37 +256,31 @@ local chunk_mt = {
 	__index = chunk_methods;
 }
 
-local function new_chunk(stream, original_length, data)
+local function new_chunk(original_length, data)
 	return setmetatable({
-		stream = stream;
 		original_length = original_length;
-		data = data;
 		acked = false;
+		data = data;
 	}, chunk_mt)
 end
 
-function chunk_methods:ack(no_window_update)
+function chunk_methods:ack()
 	if self.acked then
-		return
-	end
-	self.acked = true
-	local len = self.original_length
-	if len > 0 and not no_window_update then
-		-- ignore errors
-		self.stream:write_window_update(len, 0)
-		self.stream.connection:write_window_update(len, 0)
+		return 0
+	else
+		self.acked = true
+		return self.original_length
 	end
 end
 
--- DATA
-frame_handlers[0x0] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
+frame_handlers[frame_types.DATA] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
 	if stream.id == 0 then
 		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'DATA' framess MUST be associated with a stream"), ce.EILSEQ
 	end
 	if stream.state == "idle" or stream.state == "reserved (remote)" then
 		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'DATA' frames not allowed in 'idle' state"), ce.EILSEQ
 	elseif stream.state ~= "open" and stream.state ~= "half closed (local)" then
-		return nil, h2_errors.STREAM_CLOSED:new_traceback("'DATA' frames not allowed in '" .. stream.state .. "' state", true), ce.EILSEQ
+		return nil, h2_errors.STREAM_CLOSED:new_traceback("'DATA' frames not allowed in '" .. stream.state .. "' state"), ce.EILSEQ
 	end
 
 	local end_stream = band(flags, 0x1) ~= 0
@@ -192,36 +293,42 @@ frame_handlers[0x0] = function(stream, flags, payload, deadline) -- luacheck: ig
 		if pad_len >= #payload then -- >= will take care of the pad_len itself
 			return nil, h2_errors.PROTOCOL_ERROR:new_traceback("length of the padding is the length of the frame payload or greater"), ce.EILSEQ
 		elseif payload:match("[^%z]", -pad_len) then
+			-- 6.1: A receiver is not obligated to verify padding but MAY treat non-zero padding as a connection error of type PROTOCOL_ERROR.
 			return nil, h2_errors.PROTOCOL_ERROR:new_traceback("padding not null bytes"), ce.EILSEQ
 		end
 		payload = payload:sub(2, -pad_len-1)
 	end
 
-	local chunk = new_chunk(stream, original_length, payload)
-	stream.chunk_fifo:push(chunk)
-	stream.stats_recv = stream.stats_recv + #payload
-	if end_stream then
-		stream.chunk_fifo:push(nil)
+	local stats_recv = stream.stats_recv + #payload
+	if stream.content_length and stats_recv > stream.content_length then
+		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("content-length exceeded", true), ce.EILSEQ
 	end
-	stream.chunk_cond:signal()
+
+	local chunk = new_chunk(original_length, payload)
+	stream.chunk_fifo:push(chunk)
+	stream.stats_recv = stats_recv
 
 	if end_stream then
+		stream.chunk_fifo:push(nil)
+		-- chunk_cond gets signaled by :set_state
 		if stream.state == "half closed (local)" then
 			stream:set_state("closed")
 		else
 			stream:set_state("half closed (remote)")
 		end
+	else
+		stream.chunk_cond:signal()
 	end
 
 	return true
 end
 
-function stream_methods:write_data_frame(payload, end_stream, padded, timeout)
+function stream_methods:write_data_frame(payload, end_stream, padded, timeout, flush)
 	if self.id == 0 then
 		h2_errors.PROTOCOL_ERROR("'DATA' frames MUST be associated with a stream")
 	end
 	if self.state ~= "open" and self.state ~= "half closed (remote)" then
-		h2_errors.STREAM_CLOSED("'DATA' frame not allowed in '" .. self.state .. "' state", true)
+		h2_errors.STREAM_CLOSED("'DATA' frame not allowed in '" .. self.state .. "' state")
 	end
 	local pad_len, padding = "", ""
 	local flags = 0
@@ -241,7 +348,7 @@ function stream_methods:write_data_frame(payload, end_stream, padded, timeout)
 	if new_stream_peer_flow_credits < 0 or new_connection_peer_flow_credits < 0 then
 		h2_errors.FLOW_CONTROL_ERROR("not enough flow credits")
 	end
-	local ok, err, errno = self:write_http2_frame(0x0, flags, payload, timeout)
+	local ok, err, errno = self:write_http2_frame(frame_types.DATA, flags, payload, timeout, flush)
 	if not ok then return nil, err, errno end
 	self.peer_flow_credits = new_stream_peer_flow_credits
 	self.connection.peer_flow_credits = new_connection_peer_flow_credits
@@ -265,7 +372,13 @@ local valid_pseudo_headers = {
 	[":status"] = false;
 }
 local function validate_headers(headers, is_request, nth_header, ended_stream)
-	do -- Validate that all colon fields are before other ones (section 8.1.2.1)
+	-- Section 8.1.2: A request or response containing uppercase header field names MUST be treated as malformed
+	for name in headers:each() do
+		if name:lower() ~= name then
+			return nil, h2_errors.PROTOCOL_ERROR:new_traceback("header field names MUST be lowercase", true), ce.EINVAL
+		end
+	end
+	do -- Section 8.1.2.1: Validate that all colon fields are before other ones
 		local seen_non_colon = false
 		for name, value in headers:each() do
 			if name:sub(1,1) == ":" then
@@ -275,8 +388,7 @@ local function validate_headers(headers, is_request, nth_header, ended_stream)
 				defined for responses MUST NOT appear in requests.
 				Pseudo-header fields MUST NOT appear in trailers.
 				Endpoints MUST treat a request or response that contains
-				undefined or invalid pseudo-header fields as malformed
-				(Section 8.1.2.6)]]
+				undefined or invalid pseudo-header fields as malformed]]
 				if (is_request and nth_header ~= 1) or valid_pseudo_headers[name] ~= is_request then
 					return nil, h2_errors.PROTOCOL_ERROR:new_traceback("Pseudo-header fields are only valid in the context in which they are defined", true), ce.EILSEQ
 				end
@@ -340,11 +452,13 @@ local function validate_headers(headers, is_request, nth_header, ended_stream)
 	return true
 end
 
-local function process_end_headers(stream, end_stream, pad_len, pos, promised_stream_id, payload)
+local function process_end_headers(stream, end_stream, pad_len, pos, promised_stream, payload)
 	if pad_len > 0 then
 		if pad_len + pos - 1 > #payload then
 			return nil, h2_errors.PROTOCOL_ERROR:new_traceback("length of the padding is the length of the frame payload or greater"), ce.EILSEQ
 		elseif payload:match("[^%z]", -pad_len) then
+			-- 6.2: Padding fields and flags are identical to those defined for DATA frames
+			-- 6.1: A receiver is not obligated to verify padding but MAY treat non-zero padding as a connection error of type PROTOCOL_ERROR.
 			return nil, h2_errors.PROTOCOL_ERROR:new_traceback("padding not null bytes"), ce.EILSEQ
 		end
 		payload = payload:sub(1, -pad_len-1)
@@ -358,24 +472,27 @@ local function process_end_headers(stream, end_stream, pad_len, pos, promised_st
 		return nil, h2_errors.COMPRESSION_ERROR:new_traceback("incomplete header fragment"), ce.EILSEQ
 	end
 
-	if not promised_stream_id then
+	if not promised_stream then
 		stream.stats_recv_headers = stream.stats_recv_headers + 1
-		local validate_ok, validate_err, errno2 = validate_headers(headers, stream.type ~= "client", stream.stats_recv_headers, stream.state == "half closed (remote)" or stream.state == "closed")
+		local validate_ok, validate_err, errno2 = validate_headers(headers, stream.type ~= "client", stream.stats_recv_headers, end_stream)
 		if not validate_ok then
 			return nil, validate_err, errno2
 		end
+		if headers:has("content-length") then
+			stream.content_length = tonumber(headers:get("content-length"), 10)
+		end
 		stream.recv_headers_fifo:push(headers)
-		stream.recv_headers_cond:signal()
 
 		if end_stream then
 			stream.chunk_fifo:push(nil)
-			stream.chunk_cond:signal()
+			-- recv_headers_cond and chunk_cond get signaled by :set_state
 			if stream.state == "half closed (local)" then
 				stream:set_state("closed")
 			else
 				stream:set_state("half closed (remote)")
 			end
 		else
+			stream.recv_headers_cond:signal()
 			if stream.state == "idle" then
 				stream:set_state("open")
 			end
@@ -386,23 +503,26 @@ local function process_end_headers(stream, end_stream, pad_len, pos, promised_st
 			return nil, validate_err, errno2
 		end
 
-		local promised_stream = stream.connection:new_stream(promised_stream_id)
-		stream:reprioritise(promised_stream)
 		promised_stream:set_state("reserved (remote)")
 		promised_stream.recv_headers_fifo:push(headers)
-		stream.connection.new_streams:push(promised_stream)
-		stream.connection.new_streams_cond:signal(1)
+		promised_stream.recv_headers_cond:signal()
+
+		-- If we have sent a haven't seen this stream before, and we should be discarding frames from it,
+		-- then don't push it into the new_streams fifo
+		if stream.connection.send_goaway_lowest == nil or promised_stream.id <= stream.connection.send_goaway_lowest then
+			stream.connection.new_streams:push(promised_stream)
+			stream.connection.new_streams_cond:signal(1)
+		end
 	end
 	return true
 end
 
--- HEADERS
-frame_handlers[0x1] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
+frame_handlers[frame_types.HEADERS] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
 	if stream.id == 0 then
 		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'HEADERS' frames MUST be associated with a stream"), ce.EILSEQ
 	end
 	if stream.state ~= "idle" and stream.state ~= "open" and stream.state ~= "half closed (local)" and stream.state ~= "reserved (remote)" then
-		return nil, h2_errors.STREAM_CLOSED:new_traceback("'HEADERS' frame not allowed in '" .. stream.state .. "' state", true), ce.EILSEQ
+		return nil, h2_errors.STREAM_CLOSED:new_traceback("'HEADERS' frame not allowed in '" .. stream.state .. "' state"), ce.EILSEQ
 	end
 
 	local end_stream = band(flags, 0x1) ~= 0
@@ -453,6 +573,7 @@ frame_handlers[0x1] = function(stream, flags, payload, deadline) -- luacheck: ig
 		return process_end_headers(stream, end_stream, pad_len, pos, nil, payload)
 	else
 		stream.connection.need_continuation = stream
+		stream.connection.recv_headers_end_stream = end_stream
 		stream.connection.recv_headers_buffer = { payload }
 		stream.connection.recv_headers_buffer_pos = pos
 		stream.connection.recv_headers_buffer_pad_len = pad_len
@@ -462,8 +583,11 @@ frame_handlers[0x1] = function(stream, flags, payload, deadline) -- luacheck: ig
 	end
 end
 
-function stream_methods:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, timeout)
+function stream_methods:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, timeout, flush)
 	assert(self.state ~= "closed" and self.state ~= "half closed (local)")
+	if self.id == nil then
+		self:pick_id()
+	end
 	local pad_len, pri, padding = "", "", ""
 	local flags = 0
 	if end_stream then
@@ -479,7 +603,7 @@ function stream_methods:write_headers_frame(payload, end_stream, end_headers, pa
 	end
 	if weight or stream_dep then
 		flags = bor(flags, 0x20)
-		assert(stream_dep < 0x80000000)
+		assert(stream_dep <= 0x7fffffff)
 		local tmp = stream_dep
 		if exclusive then
 			tmp = bor(tmp, 0x80000000)
@@ -488,27 +612,32 @@ function stream_methods:write_headers_frame(payload, end_stream, end_headers, pa
 		pri = spack("> I4 B", tmp, weight)
 	end
 	payload = pad_len .. pri .. payload .. padding
-	local ok, err, errno = self:write_http2_frame(0x1, flags, payload, timeout)
-	if ok == nil then return nil, err, errno end
+	local ok, err, errno = self:write_http2_frame(frame_types.HEADERS, flags, payload, timeout, flush)
+	if ok == nil then
+		return nil, err, errno
+	end
 	self.stats_sent_headers = self.stats_sent_headers + 1
-	if end_stream then
-		if self.state == "half closed (remote)" then
-			self:set_state("closed")
+	if end_headers then
+		if end_stream then
+			if self.state == "half closed (remote)" or self.state == "reserved (local)" then
+				self:set_state("closed")
+			else
+				self:set_state("half closed (local)")
+			end
 		else
-			self:set_state("half closed (local)")
+			if self.state == "idle" then
+				self:set_state("open")
+			elseif self.state == "reserved (local)" then
+				self:set_state("half closed (remote)")
+			end
 		end
 	else
-		if self.state == "reserved (local)" then
-			self:set_state("half closed (remote)")
-		elseif self.state == "idle" then
-			self:set_state("open")
-		end
+		self.end_stream_after_continuation = end_stream
 	end
 	return ok
 end
 
--- PRIORITY
-frame_handlers[0x2] = function(stream, flags, payload) -- luacheck: ignore 212
+frame_handlers[frame_types.PRIORITY] = function(stream, flags, payload) -- luacheck: ignore 212
 	if stream.id == 0 then
 		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'PRIORITY' frames MUST be associated with a stream"), ce.EILSEQ
 	end
@@ -523,29 +652,36 @@ frame_handlers[0x2] = function(stream, flags, payload) -- luacheck: ignore 212
 	exclusive = band(tmp, 0x80000000) ~= 0
 	stream_dep = band(tmp, 0x7fffffff)
 
+	-- 5.3.1. Stream Dependencies
+	-- A dependency on a stream that is not currently in the tree
+	-- results in that stream being given a default priority
 	local new_parent = stream.connection.streams[stream_dep]
-	local ok, err, errno = new_parent:reprioritise(stream, exclusive)
-	if not ok then
-		return nil, err, errno
+	if new_parent then
+		local ok, err, errno = new_parent:reprioritise(stream, exclusive)
+		if not ok then
+			return nil, err, errno
+		end
+		stream.weight = weight
 	end
-	stream.weight = weight
 
 	return true
 end
 
-function stream_methods:write_priority_frame(exclusive, stream_dep, weight, timeout)
-	assert(stream_dep < 0x80000000)
+function stream_methods:write_priority_frame(exclusive, stream_dep, weight, timeout, flush)
+	assert(stream_dep <= 0x7fffffff)
+	if self.id == nil then
+		self:pick_id()
+	end
 	local tmp = stream_dep
 	if exclusive then
 		tmp = bor(tmp, 0x80000000)
 	end
 	weight = weight and weight - 1 or 0
 	local payload = spack("> I4 B", tmp, weight)
-	return self:write_http2_frame(0x2, 0, payload, timeout)
+	return self:write_http2_frame(frame_types.PRIORITY, 0, payload, timeout, flush)
 end
 
--- RST_STREAM
-frame_handlers[0x3] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
+frame_handlers[frame_types.RST_STREAM] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
 	if stream.id == 0 then
 		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'RST_STREAM' frames MUST be associated with a stream"), ce.EILSEQ
 	end
@@ -554,22 +690,24 @@ frame_handlers[0x3] = function(stream, flags, payload, deadline) -- luacheck: ig
 	end
 	if stream.state == "idle" then
 		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'RST_STREAM' frames MUST NOT be sent for a stream in the 'idle' state"), ce.EILSEQ
+	elseif stream.state == "closed" then
+		-- probably a delayed RST_STREAM, ignore
+		return true
 	end
 
 	local err_code = sunpack(">I4", payload)
 
 	stream.rst_stream_error = (h2_errors[err_code] or h2_errors.INTERNAL_ERROR):new {
 		message = string.format("'RST_STREAM' on stream #%d (code=0x%x)", stream.id, err_code);
+		stream_error = true;
 	}
 
 	stream:set_state("closed")
-	stream.recv_headers_cond:signal()
-	stream.chunk_cond:signal()
 
 	return true
 end
 
-function stream_methods:write_rst_stream(err_code, timeout)
+function stream_methods:write_rst_stream_frame(err_code, timeout, flush)
 	if self.id == 0 then
 		h2_errors.PROTOCOL_ERROR("'RST_STREAM' frames MUST be associated with a stream")
 	end
@@ -578,7 +716,7 @@ function stream_methods:write_rst_stream(err_code, timeout)
 	end
 	local flags = 0
 	local payload = spack(">I4", err_code)
-	local ok, err, errno = self:write_http2_frame(0x3, flags, payload, timeout)
+	local ok, err, errno = self:write_http2_frame(frame_types.RST_STREAM, flags, payload, timeout, flush)
 	if not ok then return nil, err, errno end
 	if self.state ~= "closed" then
 		self:set_state("closed")
@@ -587,8 +725,26 @@ function stream_methods:write_rst_stream(err_code, timeout)
 	return ok
 end
 
--- SETTING
-frame_handlers[0x4] = function(stream, flags, payload, deadline)
+function stream_methods:rst_stream(err, timeout)
+	local code
+	if err == nil then
+		code = 0
+	elseif h2_error.is(err) then
+		code = err.code
+	else
+		err = h2_errors.INTERNAL_ERROR:new {
+			message = tostring(err);
+			stream_error = true;
+		}
+		code = err.code
+	end
+	if self.rst_stream_error == nil then
+		self.rst_stream_error = err
+	end
+	return self:write_rst_stream_frame(code, timeout)
+end
+
+frame_handlers[frame_types.SETTING] = function(stream, flags, payload, deadline)
 	if stream.id ~= 0 then
 		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("stream identifier for a 'SETTINGS' frame MUST be zero"), ce.EILSEQ
 	end
@@ -607,11 +763,11 @@ frame_handlers[0x4] = function(stream, flags, payload, deadline)
 		local peer_settings = {}
 		for i=1, #payload, 6 do
 			local id, val = sunpack(">I2 I4", payload, i)
-			if id == 0x1 then
+			if id == known_settings.HEADER_TABLE_SIZE then
 				stream.connection.encoding_context:set_max_dynamic_table_size(val)
 				-- Add a 'max size' element to the next outgoing header
 				stream.connection.encoding_context:encode_max_size(val)
-			elseif id == 0x2 then
+			elseif id == known_settings.ENABLE_PUSH then
 				-- Convert to boolean
 				if val == 0 then
 					val = false
@@ -626,11 +782,11 @@ frame_handlers[0x4] = function(stream, flags, payload, deadline)
 					-- error of type PROTOCOL_ERROR.
 					return nil, h2_errors.PROTOCOL_ERROR:new_traceback("SETTINGS_ENABLE_PUSH not allowed for clients"), ce.EILSEQ
 				end
-			elseif id == 0x4 then
+			elseif id == known_settings.INITIAL_WINDOW_SIZE then
 				if val >= 2^31 then
 					return nil, h2_errors.FLOW_CONTROL_ERROR:new_traceback("SETTINGS_INITIAL_WINDOW_SIZE must be less than 2^31"), ce.EILSEQ
 				end
-			elseif id == 0x5 then
+			elseif id == known_settings.MAX_FRAME_SIZE then
 				if val < 16384 then
 					return nil, h2_errors.PROTOCOL_ERROR:new_traceback("SETTINGS_MAX_FRAME_SIZE must be greater than or equal to 16384"), ce.EILSEQ
 				elseif val >= 2^24 then
@@ -641,8 +797,12 @@ frame_handlers[0x4] = function(stream, flags, payload, deadline)
 		end
 		stream.connection:set_peer_settings(peer_settings)
 		-- Ack server's settings
-		-- XXX: This shouldn't ignore all errors (it probably should not flush)
-		stream:write_settings_frame(true, nil, deadline and deadline-monotime())
+		local ok, err, errno = stream:write_settings_frame(true, nil, 0, "f")
+		if not ok then
+			return ok, err, errno
+		end
+		-- ignore :flush failure
+		stream.connection:flush(deadline and deadline-monotime())
 		return true
 	end
 end
@@ -656,21 +816,34 @@ local function pack_settings_payload(settings)
 		i = i + 1
 	end
 	local HEADER_TABLE_SIZE = settings[0x1]
+	if HEADER_TABLE_SIZE == nil then
+		HEADER_TABLE_SIZE = settings.HEADER_TABLE_SIZE
+	end
 	if HEADER_TABLE_SIZE ~= nil then
 		append(0x1, HEADER_TABLE_SIZE)
 	end
 	local ENABLE_PUSH = settings[0x2]
+	if ENABLE_PUSH == nil then
+		ENABLE_PUSH = settings.ENABLE_PUSH
+	end
 	if ENABLE_PUSH ~= nil then
 		if type(ENABLE_PUSH) == "boolean" then
 			ENABLE_PUSH = ENABLE_PUSH and 1 or 0
 		end
 		append(0x2, ENABLE_PUSH)
+		ENABLE_PUSH = ENABLE_PUSH ~= 0
 	end
 	local MAX_CONCURRENT_STREAMS = settings[0x3]
+	if MAX_CONCURRENT_STREAMS == nil then
+		MAX_CONCURRENT_STREAMS = settings.MAX_CONCURRENT_STREAMS
+	end
 	if MAX_CONCURRENT_STREAMS ~= nil then
 		append(0x3, MAX_CONCURRENT_STREAMS)
 	end
 	local INITIAL_WINDOW_SIZE = settings[0x4]
+	if INITIAL_WINDOW_SIZE == nil then
+		INITIAL_WINDOW_SIZE = settings.INITIAL_WINDOW_SIZE
+	end
 	if INITIAL_WINDOW_SIZE ~= nil then
 		if INITIAL_WINDOW_SIZE >= 2^31 then
 			h2_errors.FLOW_CONTROL_ERROR("SETTINGS_INITIAL_WINDOW_SIZE must be less than 2^31")
@@ -678,6 +851,9 @@ local function pack_settings_payload(settings)
 		append(0x4, INITIAL_WINDOW_SIZE)
 	end
 	local MAX_FRAME_SIZE = settings[0x5]
+	if MAX_FRAME_SIZE == nil then
+		MAX_FRAME_SIZE = settings.MAX_FRAME_SIZE
+	end
 	if MAX_FRAME_SIZE ~= nil then
 		if MAX_FRAME_SIZE < 16384 then
 			h2_errors.PROTOCOL_ERROR("SETTINGS_MAX_FRAME_SIZE must be greater than or equal to 16384")
@@ -687,17 +863,28 @@ local function pack_settings_payload(settings)
 		append(0x5, MAX_FRAME_SIZE)
 	end
 	local MAX_HEADER_LIST_SIZE = settings[0x6]
+	if MAX_HEADER_LIST_SIZE == nil then
+		MAX_HEADER_LIST_SIZE = settings.MAX_HEADER_LIST_SIZE
+	end
 	if MAX_HEADER_LIST_SIZE ~= nil then
 		append(0x6, MAX_HEADER_LIST_SIZE)
 	end
-	return spack(">" .. ("I2 I4"):rep(i), unpack(a, 1, i*2))
+	local settings_to_merge = {
+		HEADER_TABLE_SIZE;
+		ENABLE_PUSH;
+		MAX_CONCURRENT_STREAMS;
+		INITIAL_WINDOW_SIZE;
+		MAX_FRAME_SIZE;
+		MAX_HEADER_LIST_SIZE;
+	}
+	return spack(">" .. ("I2 I4"):rep(i), unpack(a, 1, i*2)), settings_to_merge
 end
 
-function stream_methods:write_settings_frame(ACK, settings, timeout)
+function stream_methods:write_settings_frame(ACK, settings, timeout, flush)
 	if self.id ~= 0 then
 		h2_errors.PROTOCOL_ERROR("'SETTINGS' frames must be on stream id 0")
 	end
-	local flags, payload
+	local flags, payload, settings_to_merge
 	if ACK then
 		if settings ~= nil then
 			h2_errors.PROTOCOL_ERROR("'SETTINGS' ACK cannot have new settings")
@@ -706,21 +893,20 @@ function stream_methods:write_settings_frame(ACK, settings, timeout)
 		payload = ""
 	else
 		flags = 0
-		payload = pack_settings_payload(settings)
+		payload, settings_to_merge = pack_settings_payload(settings)
 	end
-	local ok, err, errno = self:write_http2_frame(0x4, flags, payload, timeout)
+	local ok, err, errno = self:write_http2_frame(frame_types.SETTING, flags, payload, timeout, flush)
 	if ok and not ACK then
 		local n = self.connection.send_settings.n + 1
 		self.connection.send_settings.n = n
-		self.connection.send_settings[n] = settings
+		self.connection.send_settings[n] = settings_to_merge
 		ok = n
 	end
 	return ok, err, errno
 end
 
--- PUSH_PROMISE
-frame_handlers[0x5] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
-	if not stream.connection.acked_settings[0x2] then
+frame_handlers[frame_types.PUSH_PROMISE] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
+	if not stream.connection.acked_settings[known_settings.ENABLE_PUSH] then
 		-- An endpoint that has both set this parameter to 0 and had it acknowledged MUST
 		-- treat the receipt of a PUSH_PROMISE frame as a connection error of type PROTOCOL_ERROR.
 		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("SETTINGS_ENABLE_PUSH is 0"), ce.EILSEQ
@@ -756,22 +942,31 @@ frame_handlers[0x5] = function(stream, flags, payload, deadline) -- luacheck: ig
 		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("headers too large"), ce.EILSEQ
 	end
 
+	local promised_stream = stream.connection:new_stream(promised_stream_id)
+	stream:reprioritise(promised_stream)
+
 	if end_headers then
-		return process_end_headers(stream, false, pad_len, pos, promised_stream_id, payload)
+		return process_end_headers(stream, false, pad_len, pos, promised_stream, payload)
 	else
+		stream.connection.need_continuation = stream
+		stream.connection.promised_stream = promised_stream
+		stream.connection.recv_headers_end_stream = false
 		stream.connection.recv_headers_buffer = { payload }
 		stream.connection.recv_headers_buffer_pos = pos
 		stream.connection.recv_headers_buffer_pad_len = pad_len
 		stream.connection.recv_headers_buffer_items = 1
 		stream.connection.recv_headers_buffer_length = len
-		stream.connection.promised_steam_id = promised_stream_id
 		return true
 	end
 end
 
-function stream_methods:write_push_promise_frame(promised_stream_id, payload, end_headers, padded, timeout)
+function stream_methods:write_push_promise_frame(promised_stream_id, payload, end_headers, padded, timeout, flush)
 	assert(self.state == "open" or self.state == "half closed (remote)")
 	assert(self.id ~= 0)
+	local promised_stream = self.connection.streams[promised_stream_id]
+	assert(promised_stream and promised_stream.state == "idle")
+	-- 8.2.1: PUSH_PROMISE frames MUST NOT be sent by the client.
+	assert(self.type == "server" and promised_stream.id % 2 == 0)
 	local pad_len, padding = "", ""
 	local flags = 0
 	if end_headers then
@@ -782,17 +977,25 @@ function stream_methods:write_push_promise_frame(promised_stream_id, payload, en
 		pad_len = spack("> B", padded)
 		padding = ("\0"):rep(padded)
 	end
-	assert(promised_stream_id > 0)
-	assert(promised_stream_id < 0x80000000)
-	assert(promised_stream_id % 2 == 0)
-	-- TODO: promised_stream_id must be valid for sender
 	promised_stream_id = spack(">I4", promised_stream_id)
 	payload = pad_len .. promised_stream_id .. payload .. padding
-	return self:write_http2_frame(0x5, flags, payload, timeout)
+	local ok, err, errno = self:write_http2_frame(frame_types.PUSH_PROMISE, flags, payload, 0, "f")
+	if ok == nil then
+		return nil, err, errno
+	end
+	if end_headers then
+		promised_stream:set_state("reserved (local)")
+	else
+		promised_stream.end_stream_after_continuation = false
+	end
+	if flush ~= "f" then
+		return self.connection:flush(timeout)
+	else
+		return true
+	end
 end
 
--- PING
-frame_handlers[0x6] = function(stream, flags, payload, deadline)
+frame_handlers[frame_types.PING] = function(stream, flags, payload, deadline)
 	if stream.id ~= 0 then
 		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'PING' must be on stream id 0"), ce.EILSEQ
 	end
@@ -814,7 +1017,7 @@ frame_handlers[0x6] = function(stream, flags, payload, deadline)
 	end
 end
 
-function stream_methods:write_ping_frame(ACK, payload, timeout)
+function stream_methods:write_ping_frame(ACK, payload, timeout, flush)
 	if self.id ~= 0 then
 		h2_errors.PROTOCOL_ERROR("'PING' frames must be on stream id 0")
 	end
@@ -822,11 +1025,10 @@ function stream_methods:write_ping_frame(ACK, payload, timeout)
 		h2_errors.FRAME_SIZE_ERROR("'PING' frames must have 8 byte payload")
 	end
 	local flags = ACK and 0x1 or 0
-	return self:write_http2_frame(0x6, flags, payload, timeout)
+	return self:write_http2_frame(frame_types.PING, flags, payload, timeout, flush)
 end
 
--- GOAWAY
-frame_handlers[0x7] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
+frame_handlers[frame_types.GOAWAY] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
 	if stream.id ~= 0 then
 		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'GOAWAY' frames must be on stream id 0"), ce.EILSEQ
 	end
@@ -844,26 +1046,31 @@ frame_handlers[0x7] = function(stream, flags, payload, deadline) -- luacheck: ig
 	return true
 end
 
-function stream_methods:write_goaway_frame(last_streamid, err_code, debug_msg, timeout)
+function stream_methods:write_goaway_frame(last_streamid, err_code, debug_msg, timeout, flush)
 	if self.id ~= 0 then
 		h2_errors.PROTOCOL_ERROR("'GOAWAY' frames MUST be on stream 0")
 	end
-	assert(last_streamid)
+	if self.connection.send_goaway_lowest and last_streamid > self.connection.send_goaway_lowest then
+		h2_errors.PROTOCOL_ERROR("Endpoints MUST NOT increase the value they send in the last stream identifier")
+	end
 	local flags = 0
 	local payload = spack(">I4 I4", last_streamid, err_code)
 	if debug_msg then
 		payload = payload .. debug_msg
 	end
-	local ok, err, errno = self:write_http2_frame(0x7, flags, payload, timeout)
+	local ok, err, errno = self:write_http2_frame(frame_types.GOAWAY, flags, payload, 0, "f")
 	if not ok then
 		return nil, err, errno
 	end
-	self.connection.send_goaway_lowest = math.min(last_streamid, self.connection.send_goaway_lowest or math.huge)
-	return true
+	self.connection.send_goaway_lowest = last_streamid
+	if flush ~= "f" then
+		return self.connection:flush(timeout)
+	else
+		return true
+	end
 end
 
--- WINDOW_UPDATE
-frame_handlers[0x8] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
+frame_handlers[frame_types.WINDOW_UPDATE] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
 	if #payload ~= 4 then
 		return nil, h2_errors.FRAME_SIZE_ERROR:new_traceback("'WINDOW_UPDATE' frames must be 4 bytes"), ce.EILSEQ
 	end
@@ -891,26 +1098,26 @@ frame_handlers[0x8] = function(stream, flags, payload, deadline) -- luacheck: ig
 		return nil, h2_errors.FLOW_CONTROL_ERROR:new_traceback("A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets", stream.id ~= 0), ce.EILSEQ
 	end
 	ob.peer_flow_credits = newval
-	ob.peer_flow_credits_increase:signal()
+	ob.peer_flow_credits_change:signal()
 
 	return true
 end
 
-function stream_methods:write_window_update_frame(inc, timeout)
+function stream_methods:write_window_update_frame(inc, timeout, flush)
 	local flags = 0
 	if self.id ~= 0 and self.state == "idle" then
 		h2_errors.PROTOCOL_ERROR([['WINDOW_UPDATE' frames not allowed in "idle" state]])
 	end
-	if inc >= 0x80000000 or inc <= 0 then
+	if inc > 0x7fffffff or inc <= 0 then
 		h2_errors.PROTOCOL_ERROR("invalid window update increment", true)
 	end
 	local payload = spack(">I4", inc)
-	return self:write_http2_frame(0x8, flags, payload, timeout)
+	return self:write_http2_frame(frame_types.WINDOW_UPDATE, flags, payload, timeout, flush)
 end
 
 function stream_methods:write_window_update(inc, timeout)
-	while inc >= 0x80000000 do
-		local ok, err, errno = self:write_window_update_frame(0x7fffffff, 0)
+	while inc > 0x7fffffff do
+		local ok, err, errno = self:write_window_update_frame(0x7fffffff, 0, "f")
 		if not ok then
 			return nil, err, errno
 		end
@@ -919,8 +1126,7 @@ function stream_methods:write_window_update(inc, timeout)
 	return self:write_window_update_frame(inc, timeout)
 end
 
--- CONTINUATION
-frame_handlers[0x9] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
+frame_handlers[frame_types.CONTINUATION] = function(stream, flags, payload, deadline) -- luacheck: ignore 212
 	if stream.id == 0 then
 		return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'CONTINUATION' frames MUST be associated with a stream"), ce.EILSEQ
 	end
@@ -938,44 +1144,66 @@ frame_handlers[0x9] = function(stream, flags, payload, deadline) -- luacheck: ig
 	stream.connection.recv_headers_buffer_length = len
 
 	if end_headers then
+		local promised_stream = stream.connection.promised_stream
 		local pad_len = stream.connection.recv_headers_buffer_pad_len
 		local pos = stream.connection.recv_headers_buffer_pos
+		local end_stream = stream.connection.recv_headers_end_stream
 		payload = table.concat(stream.connection.recv_headers_buffer, "", 1, stream.connection.recv_headers_buffer_items)
-		local promised_steam_id = stream.connection.promised_steam_id
+		stream.connection.recv_headers_end_stream = nil
 		stream.connection.recv_headers_buffer = nil
 		stream.connection.recv_headers_buffer_pos = nil
 		stream.connection.recv_headers_buffer_pad_len = nil
 		stream.connection.recv_headers_buffer_items = nil
 		stream.connection.recv_headers_buffer_length = nil
-		stream.connection.promised_steam_id = nil
+		stream.connection.promised_stream = nil
 		stream.connection.need_continuation = nil
-		return process_end_headers(stream, false, pad_len, pos, promised_steam_id, payload)
+		return process_end_headers(stream, end_stream, pad_len, pos, promised_stream, payload)
 	else
 		return true
 	end
 end
 
-function stream_methods:write_continuation_frame(payload, end_headers, timeout)
-	assert(self.state == "open" or self.state == "half closed (remote)")
+function stream_methods:write_continuation_frame(payload, end_headers, timeout, flush)
+	assert(self.state ~= "closed" and self.state ~= "half closed (local)")
 	local flags = 0
 	if end_headers then
 		flags = bor(flags, 0x4)
 	end
-	return self:write_http2_frame(0x9, flags, payload, timeout)
+	local ok, err, errno = self:write_http2_frame(frame_types.CONTINUATION, flags, payload, timeout, flush)
+	if ok == nil then
+		return nil, err, errno
+	end
+	if end_headers then
+		if self.end_stream_after_continuation then
+			if self.state == "half closed (remote)" or self.state == "reserved (local)" then
+				self:set_state("closed")
+			else
+				self:set_state("half closed (local)")
+			end
+		else
+			if self.state == "idle" then
+				self:set_state("open")
+			elseif self.state == "reserved (local)" then
+				self:set_state("half closed (remote)")
+			end
+		end
+	else
+		self.end_stream_after_continuation = nil
+	end
+	return ok
 end
 
 -------------------------------------------
 
 function stream_methods:shutdown()
 	if self.state ~= "idle" and self.state ~= "closed" and self.id ~= 0 then
-		self:write_rst_stream(0, 0) -- ignore result
+		self:rst_stream(nil, 0) -- ignore result
 	end
 	local len = 0
 	for i=1, self.chunk_fifo:length() do
 		local chunk = self.chunk_fifo:peek(i)
 		if chunk ~= nil then
-			chunk:ack(true)
-			len = len + #chunk.data
+			len = len + chunk:ack()
 		end
 	end
 	if len > 0 then
@@ -1028,39 +1256,49 @@ function stream_methods:get_next_chunk(timeout)
 		return nil
 	else
 		local data = chunk.data
-		chunk:ack(false)
+		local len = chunk:ack()
+		if len > 0 then
+			-- if they don't get flushed now they will get flushed on next read or write
+			self:write_window_update(len, 0)
+			self.connection:write_window_update(len, 0)
+		end
 		return data
 	end
 end
 
 function stream_methods:unget(str)
-	local chunk = new_chunk(self, 0, str) -- 0 means :ack does nothing
+	local chunk = new_chunk(0, str)
 	self.chunk_fifo:insert(1, chunk)
+	self.chunk_cond:signal()
 	return true
 end
 
-local function write_headers(self, func, headers, timeout)
+local function write_headers(self, func, headers, extra_frame_data_len, timeout)
 	local deadline = timeout and (monotime()+timeout)
+
+	local SETTINGS_MAX_FRAME_SIZE = self.connection.peer_settings[known_settings.MAX_FRAME_SIZE]
+	local first_frame_max_size = SETTINGS_MAX_FRAME_SIZE - extra_frame_data_len
+	assert(first_frame_max_size >= 0)
+
 	local encoding_context = self.connection.encoding_context
 	encoding_context:encode_headers(headers)
 	local payload = encoding_context:render_data()
 	encoding_context:clear_data()
 
-	local SETTINGS_MAX_FRAME_SIZE = self.connection.peer_settings[0x5]
-	if #payload <= SETTINGS_MAX_FRAME_SIZE then
+	if #payload <= first_frame_max_size then
 		local ok, err, errno = func(payload, true, deadline)
 		if not ok then
 			return ok, err, errno
 		end
 	else
 		do
-			local partial = payload:sub(1, SETTINGS_MAX_FRAME_SIZE)
+			local partial = payload:sub(1, first_frame_max_size)
 			local ok, err, errno = func(partial, false, deadline)
 			if not ok then
 				return ok, err, errno
 			end
 		end
-		local sent = SETTINGS_MAX_FRAME_SIZE
+		local sent = first_frame_max_size
 		local max = #payload-SETTINGS_MAX_FRAME_SIZE
 		while sent < max do
 			local partial = payload:sub(sent+1, sent+SETTINGS_MAX_FRAME_SIZE)
@@ -1089,28 +1327,28 @@ function stream_methods:write_headers(headers, end_stream, timeout)
 	local padded, exclusive, stream_dep, weight = nil, nil, nil, nil
 	return write_headers(self, function(payload, end_headers, deadline)
 		return self:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, deadline and deadline-monotime())
-	end, headers, timeout)
+	end, headers, 0, timeout)
 end
 
 function stream_methods:push_promise(headers, timeout)
 	assert(self.type == "server")
 	assert(headers, "missing argument: headers")
 	assert(validate_headers(headers, true, 1, false))
-	assert(headers:has(":authority"))
+	assert(headers:has(":authority"), "PUSH_PROMISE must have an :authority")
 
 	local promised_stream = self.connection:new_stream()
+	promised_stream:pick_id()
 	self:reprioritise(promised_stream)
-	local promised_stream_id = promised_stream.id
 
 	local padded = nil
 	local ok, err, errno = write_headers(self, function(payload, end_headers, deadline)
-		return self:write_push_promise_frame(promised_stream_id, payload, end_headers, padded, deadline)
-	end, headers, timeout)
+		return self:write_push_promise_frame(promised_stream.id, payload, end_headers, padded, deadline)
+	end, headers, 4, timeout) -- 4 is size of promised stream id
 	if not ok then
 		return nil, err, errno
 	end
-
-	promised_stream:set_state("reserved (local)")
+	promised_stream.recv_headers_fifo:push(headers)
+	promised_stream.recv_headers_cond:signal()
 
 	return promised_stream
 end
@@ -1119,8 +1357,8 @@ function stream_methods:write_chunk(payload, end_stream, timeout)
 	local deadline = timeout and (monotime()+timeout)
 	local sent = 0
 	while true do
-		while self.peer_flow_credits == 0 do
-			local which = cqueues.poll(self.peer_flow_credits_increase, self.connection, timeout)
+		while self.peer_flow_credits <= 0 do
+			local which = cqueues.poll(self.peer_flow_credits_change, self.connection, timeout)
 			if which == self.connection then
 				local ok, err, errno = self.connection:step(0)
 				if not ok then
@@ -1131,8 +1369,8 @@ function stream_methods:write_chunk(payload, end_stream, timeout)
 			end
 			timeout = deadline and (deadline-monotime())
 		end
-		while self.connection.peer_flow_credits == 0 do
-			local which = cqueues.poll(self.connection.peer_flow_credits_increase, self.connection, timeout)
+		while self.connection.peer_flow_credits <= 0 do
+			local which = cqueues.poll(self.connection.peer_flow_credits_change, self.connection, timeout)
 			if which == self.connection then
 				local ok, err, errno = self.connection:step(0)
 				if not ok then
@@ -1143,12 +1381,12 @@ function stream_methods:write_chunk(payload, end_stream, timeout)
 			end
 			timeout = deadline and (deadline-monotime())
 		end
-		local SETTINGS_MAX_FRAME_SIZE = self.connection.peer_settings[0x5]
+		local SETTINGS_MAX_FRAME_SIZE = self.connection.peer_settings[known_settings.MAX_FRAME_SIZE]
 		local max_available = math.min(self.peer_flow_credits, self.connection.peer_flow_credits, SETTINGS_MAX_FRAME_SIZE)
 		if max_available < (#payload - sent) then
 			if max_available > 0 then
 				-- send partial payload
-				local ok, err, errno = self:write_data_frame(payload:sub(sent+1, sent+max_available), false, timeout)
+				local ok, err, errno = self:write_data_frame(payload:sub(sent+1, sent+max_available), false, false, timeout)
 				if not ok then
 					return nil, err, errno
 				end
@@ -1171,6 +1409,8 @@ return {
 	methods = stream_methods;
 	mt = stream_mt;
 
+	known_settings = known_settings;
+	frame_types = frame_types;
 	frame_handlers = frame_handlers;
 	pack_settings_payload = pack_settings_payload;
 }
diff --git a/http/headers.lua b/http/headers.lua
index 6e3052c..ceeeed3 100644
--- a/http/headers.lua
+++ b/http/headers.lua
@@ -3,8 +3,8 @@ HTTP Header data structure/type
 
 Design criteria:
   - the same header field is allowed more than once
-      - must be able to fetch seperate occurences (important for some headers e.g. Set-Cookie)
-      - optionally available as comma seperated list
+      - must be able to fetch separate occurences (important for some headers e.g. Set-Cookie)
+      - optionally available as comma separated list
   - http2 adds flag to headers that they should never be indexed
   - header order should be recoverable
 
@@ -12,7 +12,7 @@ I chose to implement headers as an array of entries.
 An index of field name => array indices is kept.
 ]]
 
-local unpack = table.unpack or unpack -- luacheck: ignore 113
+local unpack = table.unpack or unpack -- luacheck: ignore 113 143
 
 local entry_methods = {}
 local entry_mt = {
diff --git a/http/headers.tld b/http/headers.tld
index 63d91e9..92fb430 100644
--- a/http/headers.tld
+++ b/http/headers.tld
@@ -1,17 +1,17 @@
 interface headers
-	const clone : (self) -> headers
-	const append : (self, string, string, nil|boolean) -> ()
-	const each : (self) -> ((self) -> (string, string, boolean))
-	const has : (self, string) -> (boolean)
-	const delete : (self, string) -> (boolean)
-	const geti : (self, integer) -> (string, string, boolean)
-	const get_as_sequence : (self, string) -> ({"n": integer, integer:string})
-	const get : (self, string) -> (string*)
-	const get_comma_separated : (self, string) -> (string|nil)
-	const modifyi : (self, integer, string, boolean?) -> ()
-	const upsert : (self, string, string, boolean?) -> ()
-	const sort : (self) -> ()
-	const dump : (self, nil|file, nil|string) -> ()
+	const clone: (self) -> (headers)
+	const append: (self, string, string, boolean?) -> ()
+	const each: (self) -> ((self) -> (string, string, boolean))
+	const has: (self, string) -> (boolean)
+	const delete: (self, string) -> (boolean)
+	const geti: (self, integer) -> (string, string, boolean)
+	const get_as_sequence: (self, string) -> ({"n": integer, integer:string})
+	const get: (self, string) -> (string*)
+	const get_comma_separated: (self, string) -> (string|nil)
+	const modifyi: (self, integer, string, boolean?) -> ()
+	const upsert: (self, string, string, boolean?) -> ()
+	const sort: (self) -> ()
+	const dump: (self, file?, string?) -> ()
 end
 
 new : () -> (headers)
diff --git a/http/hpack.lua b/http/hpack.lua
index 0a6e1af..09c3cfa 100644
--- a/http/hpack.lua
+++ b/http/hpack.lua
@@ -2,12 +2,12 @@
 -- Reference documentation: https://http2.github.io/http2-spec/compression.html
 
 local schar = string.char
-local spack = string.pack or require "compat53.string".pack
-local sunpack = string.unpack or require "compat53.string".unpack
+local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143
+local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143
 local band = require "http.bit".band
 local bor = require "http.bit".bor
 local new_headers = require "http.headers".new
-local unpack = table.unpack or unpack -- luacheck: ignore 113
+local unpack = table.unpack or unpack -- luacheck: ignore 113 143
 local h2_errors = require "http.h2_error".errors
 
 -- Section 5.1
@@ -353,6 +353,7 @@ do
 		end
 		byte_to_bitstring[string.char(i)] = val
 	end
+	local EOS_length = #huffman_codes.EOS
 	huffman_decode = function(s)
 		local bitstring = s:gsub(".", byte_to_bitstring)
 		local node = huffman_tree
@@ -365,18 +366,29 @@ do
 				node = huffman_tree
 			elseif node == "EOS" then
 				-- 5.2: A Huffman encoded string literal containing the EOS symbol MUST be treated as a decoding error.
-				assert(node ~= 256, "invalid huffman code (EOS)")
+				return nil, h2_errors.COMPRESSION_ERROR:new_traceback("invalid huffman code (EOS)")
 			elseif nt ~= "table" then
-				error("invalid huffman code")
+				return nil, h2_errors.COMPRESSION_ERROR:new_traceback("invalid huffman code")
 			end
 		end
 		--[[ Ensure that any left over bits are all one.
 		Section 5.2: A padding not corresponding to the most significant bits
 		of the code for the EOS symbol MUST be treated as a decoding error]]
-		while type(node) == "table" do
-			node = node["1"]
+		if node ~= huffman_tree then
+			-- We check this by continuing through on the '1' branch and ensure that we end up at EOS
+			local n_padding = EOS_length
+			while type(node) == "table" do
+				node = node["1"]
+				n_padding = n_padding - 1
+			end
+			if node ~= "EOS" then
+				return nil, h2_errors.COMPRESSION_ERROR:new_traceback("invalid huffman padding: expected most significant bits to match EOS")
+			end
+			-- Section 5.2: A padding strictly longer than 7 bits MUST be treated as a decoding error
+			if n_padding < 0 or n_padding >= 8 then
+				return nil, h2_errors.COMPRESSION_ERROR:new_traceback("invalid huffman padding: too much padding")
+			end
 		end
-		assert(node == "EOS", "invalid huffman padding")
 
 		return string.char(unpack(output))
 	end
@@ -413,10 +425,13 @@ local function decode_string(str, pos)
 	if newpos > #str+1 then return end
 	local val = str:sub(pos, newpos-1)
 	if huffman then
-		return huffman_decode(val), newpos
-	else
-		return val, newpos
+		local err
+		val, err = huffman_decode(val)
+		if not val then
+			return nil, err
+		end
 	end
+	return val, newpos
 end
 
 local function compound_key(name, value)
@@ -430,24 +445,19 @@ local function dynamic_table_entry_size(k)
 	return 32 - 8 + #k -- 8 is number of bytes of overhead introduced by compound_key
 end
 local static_names_to_index = {}
-local static_index_to_names = {}
-local static_pairs = {} -- Duplicate writes are okay
+local static_pairs = {}
 local max_static_index
 do
 	-- We prefer earlier indexes as examples in spec are like that
-	local function s(i, name)
+	local function p(i, name, value)
 		if not static_names_to_index[name] then
 			static_names_to_index[name] = i
-			static_index_to_names[i] = name
 		end
-	end
-	local function p(i, name, value)
-		s(i, name)
-		local k = compound_key(name, value)
+		local k = compound_key(name, value or "")
 		static_pairs[k] = i
 		static_pairs[i] = k
 	end
-	s( 1, ":authority")
+	p( 1, ":authority")
 	p( 2, ":method", "GET")
 	p( 3, ":method", "POST")
 	p( 4, ":path", "/")
@@ -461,53 +471,53 @@ do
 	p(12, ":status", "400")
 	p(13, ":status", "404")
 	p(14, ":status", "500")
-	s(15, "accept-charset")
+	p(15, "accept-charset")
 	p(16, "accept-encoding", "gzip, deflate")
-	s(17, "accept-language")
-	s(18, "accept-ranges")
-	s(19, "accept")
-	s(20, "access-control-allow-origin")
-	s(21, "age")
-	s(22, "allow")
-	s(23, "authorization")
-	s(24, "cache-control")
-	s(25, "content-disposition")
-	s(26, "content-encoding")
-	s(27, "content-language")
-	s(28, "content-length")
-	s(29, "content-location")
-	s(30, "content-range")
-	s(31, "content-type")
-	s(32, "cookie")
-	s(33, "date")
-	s(34, "etag")
-	s(35, "expect")
-	s(36, "expires")
-	s(37, "from")
-	s(38, "host")
-	s(39, "if-match")
-	s(40, "if-modified-since")
-	s(41, "if-none-match")
-	s(42, "if-range")
-	s(43, "if-unmodified-since")
-	s(44, "last-modified")
-	s(45, "link")
-	s(46, "location")
-	s(47, "max-forwards")
-	s(48, "proxy-authenticate")
-	s(49, "proxy-authorization")
-	s(50, "range")
-	s(51, "referer")
-	s(52, "refresh")
-	s(53, "retry-after")
-	s(54, "server")
-	s(55, "set-cookie")
-	s(56, "strict-transport-security")
-	s(57, "transfer-encoding")
-	s(58, "user-agent")
-	s(59, "vary")
-	s(60, "via")
-	s(61, "www-authenticate")
+	p(17, "accept-language")
+	p(18, "accept-ranges")
+	p(19, "accept")
+	p(20, "access-control-allow-origin")
+	p(21, "age")
+	p(22, "allow")
+	p(23, "authorization")
+	p(24, "cache-control")
+	p(25, "content-disposition")
+	p(26, "content-encoding")
+	p(27, "content-language")
+	p(28, "content-length")
+	p(29, "content-location")
+	p(30, "content-range")
+	p(31, "content-type")
+	p(32, "cookie")
+	p(33, "date")
+	p(34, "etag")
+	p(35, "expect")
+	p(36, "expires")
+	p(37, "from")
+	p(38, "host")
+	p(39, "if-match")
+	p(40, "if-modified-since")
+	p(41, "if-none-match")
+	p(42, "if-range")
+	p(43, "if-unmodified-since")
+	p(44, "last-modified")
+	p(45, "link")
+	p(46, "location")
+	p(47, "max-forwards")
+	p(48, "proxy-authenticate")
+	p(49, "proxy-authorization")
+	p(50, "range")
+	p(51, "referer")
+	p(52, "refresh")
+	p(53, "retry-after")
+	p(54, "server")
+	p(55, "set-cookie")
+	p(56, "strict-transport-security")
+	p(57, "transfer-encoding")
+	p(58, "user-agent")
+	p(59, "vary")
+	p(60, "via")
+	p(61, "www-authenticate")
 	max_static_index = 61
 end
 
@@ -723,18 +733,12 @@ function methods:lookup_name_index(name)
 	return nil
 end
 
-function methods:lookup_index(index, allow_single)
+function methods:lookup_index(index)
 	if index <= max_static_index then
 		local k = static_pairs[index]
 		if k then
 			return uncompound_key(k)
 		end
-		if allow_single then
-			local name = static_index_to_names[index]
-			if name then
-				return name, nil
-			end
-		end
 	else -- Dynamic?
 		local id = self:dynamic_index_to_table_id(index)
 		local k = self.dynamic_pairs[id]
@@ -797,15 +801,12 @@ local function decode_header_helper(self, payload, prefix_len, pos)
 		if name == nil then
 			return name, pos
 		end
-		if name:match("%u") then
-			return nil, h2_errors.PROTOCOL_ERROR:new_traceback("malformed: header fields must not be uppercase")
-		end
 		value, pos = decode_string(payload, pos)
 		if value == nil then
 			return value, pos
 		end
 	else
-		name = self:lookup_index(index, true)
+		name = self:lookup_index(index)
 		if name == nil then
 			return nil, h2_errors.COMPRESSION_ERROR:new_traceback(string.format("index %d not found in table", index))
 		end
@@ -826,7 +827,7 @@ function methods:decode_headers(payload, header_list, pos)
 			local index, newpos = decode_integer(payload, 7, pos)
 			if index == nil then break end
 			pos = newpos
-			local name, value = self:lookup_index(index, false)
+			local name, value = self:lookup_index(index)
 			if name == nil then
 				return nil, h2_errors.COMPRESSION_ERROR:new_traceback(string.format("index %d not found in table", index))
 			end
diff --git a/http/hsts.lua b/http/hsts.lua
index 84317cb..70a4759 100644
--- a/http/hsts.lua
+++ b/http/hsts.lua
@@ -3,13 +3,12 @@ Data structures useful for HSTS (HTTP Strict Transport Security)
 HSTS is described in RFC 6797
 ]]
 
-local EOF = require "lpeg".P(-1)
-local IPv4address = require "lpeg_patterns.IPv4".IPv4address
-local IPv6address = require "lpeg_patterns.IPv6".IPv6address
-local IPaddress = (IPv4address + IPv6address) * EOF
+local binaryheap = require "binaryheap"
+local http_util = require "http.util"
 
 local store_methods = {
 	time = function() return os.time() end;
+	max_items = (1e999);
 }
 
 local store_mt = {
@@ -23,25 +22,22 @@ local store_item_mt = {
 	__index = store_item_methods;
 }
 
-local function host_is_ip(host)
-	if IPaddress:match(host) then
-		return true
-	else
-		return false
-	end
-end
-
 local function new_store()
 	return setmetatable({
 		domains = {};
+		expiry_heap = binaryheap.minUnique();
+		n_items = 0;
 	}, store_mt)
 end
 
 function store_methods:clone()
 	local r = new_store()
 	r.time = rawget(self, "time")
+	r.n_items = rawget(self, "n_items")
+	r.expiry_heap = binaryheap.minUnique()
 	for host, item in pairs(self.domains) do
 		r.domains[host] = item
+		r.expiry_heap:insert(item.expires, item)
 	end
 	return r
 end
@@ -56,34 +52,62 @@ function store_methods:store(host, directives)
 	else
 		max_age = tonumber(max_age, 10)
 	end
-	if host_is_ip(host) then
-		return false
-	end
+
+	-- Clean now so that we can assume there are no expired items in store
+	self:clean()
+
 	if max_age == 0 then
-		-- delete from store
-		self.domains[host] = nil
+		return self:remove(host)
 	else
+		if http_util.is_ip(host) then
+			return false
+		end
 		-- add to store
-		self.domains[host] = setmetatable({
+		local old_item = self.domains[host]
+		if old_item then
+			self.expiry_heap:remove(old_item)
+		else
+			local n_items = self.n_items
+			if n_items >= self.max_items then
+				return false
+			end
+			self.n_items = n_items + 1
+		end
+		local expires = now + max_age
+		local item = setmetatable({
+			host = host;
 			includeSubdomains = directives.includeSubdomains;
-			expires = now + max_age;
+			expires = expires;
 		}, store_item_mt)
+		self.domains[host] = item
+		self.expiry_heap:insert(expires, item)
+	end
+	return true
+end
+
+function store_methods:remove(host)
+	local item = self.domains[host]
+	if item then
+		self.expiry_heap:remove(item)
+		self.domains[host] = nil
+		self.n_items = self.n_items - 1
 	end
 	return true
 end
 
 function store_methods:check(host)
-	if host_is_ip(host) then
+	if http_util.is_ip(host) then
 		return false
 	end
-	local now = self.time()
+
+	-- Clean now so that we can assume there are no expired items in store
+	self:clean()
+
 	local h = host
 	repeat
 		local item = self.domains[h]
 		if item then
-			if item.expires < now then
-				self:clean()
-			elseif host == h or item.includeSubdomains then
+			if host == h or item.includeSubdomains then
 				return true
 			end
 		end
@@ -93,12 +117,20 @@ function store_methods:check(host)
 	return false
 end
 
+function store_methods:clean_due()
+	local next_expiring = self.expiry_heap:peek()
+	if not next_expiring then
+		return (1e999)
+	end
+	return next_expiring.expires
+end
+
 function store_methods:clean()
 	local now = self.time()
-	for host, item in pairs(self.domains) do
-		if item.expires < now then
-			self.domains[host] = nil
-		end
+	while self:clean_due() < now do
+		local item = self.expiry_heap:pop()
+		self.domains[item.host] = nil
+		self.n_items = self.n_items - 1
 	end
 	return true
 end
diff --git a/http/hsts.tld b/http/hsts.tld
new file mode 100644
index 0000000..952aab4
--- /dev/null
+++ b/http/hsts.tld
@@ -0,0 +1,13 @@
+interface hsts_store
+	time: () -> (number)
+	max_items: number
+
+    clone: (self) -> (hsts_store)
+    store: (self, string, {string:string}) -> (boolean)
+    remove: (self, string) -> (boolean)
+    check: (self, hsts_store) -> (boolean)
+    const clean_due: (self) -> (number)
+    const clean: (self) -> (boolean)
+end
+
+new_store: () -> (hsts_store)
diff --git a/http/proxies.lua b/http/proxies.lua
index 6580105..088112f 100644
--- a/http/proxies.lua
+++ b/http/proxies.lua
@@ -57,11 +57,11 @@ function proxies_methods:choose(scheme, host)
 			end
 		end
 	end
-	if scheme == "http" or scheme == "ws" then
+	if scheme == "http" then
 		if self.http_proxy then
 			return self.http_proxy
 		end
-	elseif scheme == "https" or scheme == "wss" then
+	elseif scheme == "https" then
 		if self.https_proxy then
 			return self.https_proxy
 		end
diff --git a/http/proxies.tld b/http/proxies.tld
new file mode 100644
index 0000000..f73d539
--- /dev/null
+++ b/http/proxies.tld
@@ -0,0 +1,6 @@
+interface proxies
+    const update: (self, (string)->(string?))->(self)
+    const choose: (self, string, string)->(string?)
+end
+
+new: proxies
diff --git a/http/request.lua b/http/request.lua
index c8c7be4..f9b65d6 100644
--- a/http/request.lua
+++ b/http/request.lua
@@ -4,6 +4,7 @@ local uri_patts = require "lpeg_patterns.uri"
 local basexx = require "basexx"
 local client = require "http.client"
 local new_headers = require "http.headers".new
+local http_cookie = require "http.cookie"
 local http_hsts = require "http.hsts"
 local http_socks = require "http.socks"
 local http_proxies = require "http.proxies"
@@ -15,10 +16,18 @@ local ce = require "cqueues.errno"
 local default_user_agent = string.format("%s/%s", http_version.name, http_version.version)
 local default_hsts_store = http_hsts.new_store()
 local default_proxies = http_proxies.new():update()
+local default_cookie_store = http_cookie.new_store()
+
+local default_h2_settings = {
+	ENABLE_PUSH = false;
+}
 
 local request_methods = {
 	hsts = default_hsts_store;
 	proxies = default_proxies;
+	cookie_store = default_cookie_store;
+	is_top_level = true;
+	site_for_cookies = nil;
 	expect_100_timeout = 1;
 	follow_redirects = true;
 	max_redirects = 5;
@@ -32,7 +41,7 @@ local request_mt = {
 }
 
 local EOF = lpeg.P(-1)
-local sts_patt = lpeg.Cf(lpeg.Ct(true) * http_patts.Strict_Transport_Security, rawset) * EOF
+local sts_patt = http_patts.Strict_Transport_Security * EOF
 local uri_patt = uri_patts.uri * EOF
 local uri_ref = uri_patts.uri_reference * EOF
 
@@ -44,7 +53,7 @@ local function new_from_uri(uri_t, headers)
 	end
 	local scheme = assert(uri_t.scheme, "URI missing scheme")
 	assert(scheme == "https" or scheme == "http" or scheme == "ws" or scheme == "wss", "scheme not valid")
-	local host = tostring(assert(uri_t.host, "URI must include a host")) -- tostring required to e.g. convert lpeg_patterns IPv6 objects
+	local host = assert(uri_t.host, "URI must include a host")
 	local port = uri_t.port or http_util.scheme_to_port[scheme]
 	local is_connect -- CONNECT requests are a bit special, see http2 spec section 8.3
 	if headers == nil then
@@ -68,6 +77,11 @@ local function new_from_uri(uri_t, headers)
 			path = path .. "?" .. uri_t.query
 		end
 		headers:upsert(":path", path)
+		if scheme == "wss" then
+			scheme = "https"
+		elseif scheme == "ws" then
+			scheme = "http"
+		end
 		headers:upsert(":scheme", scheme)
 	end
 	if uri_t.userinfo then
@@ -77,8 +91,8 @@ local function new_from_uri(uri_t, headers)
 		else
 			field = "authorization"
 		end
-		local userinfo = http_util.decodeURIComponent(uri_t.userinfo) -- XXX: this doesn't seem right, but it's same behaviour as curl
-		headers:append(field, "basic " .. basexx.to_base64(userinfo), true)
+		local userinfo = http_util.decodeURIComponent(uri_t.userinfo) -- XXX: this doesn't seem right, but it's the same behaviour as curl
+		headers:upsert(field, "basic " .. basexx.to_base64(userinfo), true)
 	end
 	if not headers:has("user-agent") then
 		headers:append("user-agent", default_user_agent)
@@ -86,7 +100,7 @@ local function new_from_uri(uri_t, headers)
 	return setmetatable({
 		host = host;
 		port = port;
-		tls = (scheme == "https" or scheme == "wss");
+		tls = (scheme == "https");
 		headers = headers;
 		body = nil;
 	}, request_mt)
@@ -103,6 +117,7 @@ function request_methods:clone()
 	return setmetatable({
 		host = self.host;
 		port = self.port;
+		bind = self.bind;
 		tls = self.tls;
 		ctx = self.ctx;
 		sendname = self.sendname;
@@ -114,6 +129,9 @@ function request_methods:clone()
 
 		hsts = rawget(self, "hsts");
 		proxies = rawget(self, "proxies");
+		cookie_store = rawget(self, "cookie_store");
+		is_top_level = rawget(self, "is_top_level");
+		site_for_cookies = rawget(self, "site_for_cookies");
 		expect_100_timeout = rawget(self, "expect_100_timeout");
 		follow_redirects = rawget(self, "follow_redirects");
 		max_redirects = rawget(self, "max_redirects");
@@ -196,9 +214,9 @@ function request_methods:handle_redirect(orig_headers)
 		if not is_connect then
 			new_req.headers:upsert(":scheme", new_scheme)
 		end
-		if new_scheme == "https" or new_scheme == "wss" then
+		if new_scheme == "https" then
 			new_req.tls = true
-		elseif new_scheme == "http" or new_scheme == "ws" then
+		elseif new_scheme == "http" then
 			new_req.tls = false
 		else
 			return nil, "unknown scheme", ce.EINVAL
@@ -339,6 +357,7 @@ function request_methods:go(timeout)
 	local host = self.host
 	local port = self.port
 	local tls = self.tls
+	local version = self.version
 
 	-- RFC 6797 Section 8.3
 	if not tls and self.hsts and self.hsts:check(host) then
@@ -362,6 +381,18 @@ function request_methods:go(timeout)
 		end
 	end
 
+	if self.cookie_store then
+		local cookie_header = self.cookie_store:lookup_for_request(request_headers, host, self.site_for_cookies, self.is_top_level)
+		if cookie_header ~= "" then
+			if not cloned_headers then
+				request_headers = request_headers:clone()
+				cloned_headers = true
+			end
+			-- Append rather than upsert: user may have added their own cookies
+			request_headers:append("cookie", cookie_header)
+		end
+	end
+
 	local connection
 
 	local proxy = self.proxy
@@ -373,8 +404,16 @@ function request_methods:go(timeout)
 	if proxy then
 		if type(proxy) == "string" then
 			proxy = assert(uri_patt:match(proxy), "invalid proxy URI")
+			proxy.path = nil -- ignore proxy.path component
 		else
 			assert(type(proxy) == "table" and getmetatable(proxy) == nil and proxy.scheme, "invalid proxy URI")
+			proxy = {
+				scheme = proxy.scheme;
+				userinfo = proxy.userinfo;
+				host = proxy.host;
+				port = proxy.port;
+				-- ignore proxy.path component
+			}
 		end
 		if proxy.scheme == "http" or proxy.scheme == "https" then
 			if tls then
@@ -383,10 +422,8 @@ function request_methods:go(timeout)
 				local connect_request = new_connect(proxy, authority)
 				connect_request.proxy = false
 				connect_request.version = 1.1 -- TODO: CONNECT over HTTP/2
-				if tls then
-					if connect_request.tls then
-						error("NYI: TLS over TLS")
-					end
+				if connect_request.tls then
+					error("NYI: TLS over TLS")
 				end
 				-- Perform CONNECT request
 				local headers, stream, errno = connect_request:go(deadline and deadline-monotime())
@@ -404,10 +441,12 @@ function request_methods:go(timeout)
 				local sock = stream.connection:take_socket()
 				local err, errno2
 				connection, err, errno2 = client.negotiate(sock, {
+					host = host;
 					tls = tls;
 					ctx = self.ctx;
-					sendname = self.sendname ~= nil and self.sendname or host;
-					version = self.version;
+					sendname = self.sendname;
+					version = version;
+					h2_settings = default_h2_settings;
 				}, deadline and deadline-monotime())
 				if connection == nil then
 					sock:close()
@@ -417,9 +456,6 @@ function request_methods:go(timeout)
 				if request_headers:get(":method") == "CONNECT" then
 					error("cannot use HTTP Proxy with CONNECT method")
 				end
-				if proxy.path ~= nil and proxy.path ~= "" then
-					error("an HTTP proxy cannot have a path component")
-				end
 				-- TODO: Check if :path already has authority?
 				local old_url = self:to_uri(false)
 				host = assert(proxy.host, "proxy is missing host")
@@ -445,7 +481,8 @@ function request_methods:go(timeout)
 				tls = tls;
 				ctx = self.ctx;
 				sendname = self.sendname ~= nil and self.sendname or host;
-				version = self.version;
+				version = version;
+				h2_settings = default_h2_settings;
 			}, deadline and deadline-monotime())
 			if connection == nil then
 				sock:close()
@@ -461,10 +498,12 @@ function request_methods:go(timeout)
 		connection, err, errno = client.connect({
 			host = host;
 			port = port;
+			bind = self.bind;
 			tls = tls;
 			ctx = self.ctx;
 			sendname = self.sendname;
-			version = self.version;
+			version = version;
+			h2_settings = default_h2_settings;
 		}, deadline and deadline-monotime())
 		if connection == nil then
 			return nil, err, errno
@@ -580,6 +619,10 @@ function request_methods:go(timeout)
 		end
 	end
 
+	if self.cookie_store then
+		self.cookie_store:store_from_request(request_headers, headers, self.host, self.site_for_cookies)
+	end
+
 	if self.follow_redirects and headers:get(":status"):sub(1,1) == "3" then
 		stream:shutdown()
 		local new_req, err2, errno2 = self:handle_redirect(headers)
diff --git a/http/request.tld b/http/request.tld
new file mode 100644
index 0000000..bbe50c2
--- /dev/null
+++ b/http/request.tld
@@ -0,0 +1,26 @@
+require "http.cookie"
+require "http.hsts"
+require "http.proxies"
+require "http.stream_common"
+
+interface request
+	hsts: hsts_store|false
+	proxies: proxies|false
+	cookie_store: cookie_store|false
+	is_top_level: boolean
+	site_for_cookies: string?
+	expect_100_timeout: integer
+	follow_redirects: boolean
+	max_redirects: integer
+	post301: boolean
+	post302: boolean
+	headers: headers
+	const clone: (self) -> (request)
+	const to_uri: (self, boolean?) -> (string)
+	const handle_redirect: (self, headers) -> (request)|(nil, string, integer)
+	const set_body: (self, string|file|()->(string?)) -> ()
+	const go: (self, number) -> (headers, stream)|(nil, string, integer)
+end
+
+new_from_uri: (string, headers?) -> (request)
+new_connect: (string, string) -> (request)
diff --git a/http/server.lua b/http/server.lua
index 14e1760..7f5bfd0 100644
--- a/http/server.lua
+++ b/http/server.lua
@@ -45,7 +45,8 @@ end
 -- Wrap a bare cqueues socket in an HTTP connection of a suitable version
 -- Starts TLS if necessary
 -- this function *should never throw*
-local function wrap_socket(self, socket, deadline)
+local function wrap_socket(self, socket, timeout)
+	local deadline = timeout and monotime()+timeout
 	socket:setmode("b", "b")
 	socket:onerror(onerror)
 	local version = self.version
@@ -62,15 +63,19 @@ local function wrap_socket(self, socket, deadline)
 		if not ok then
 			return nil, err, errno
 		end
-		local ssl = socket:checktls()
-		if ssl and http_tls.has_alpn then
+		local ssl = assert(socket:checktls())
+		if http_tls.has_alpn then
 			local proto = ssl:getAlpnSelected()
-			if proto == "h2" and (version == nil or version == 2) then
-				version = 2
-			elseif (proto == "http/1.1") and (version == nil or version < 2) then
-				version = 1.1
-			elseif proto ~= nil then
-				return nil, "unexpected ALPN protocol: " .. proto, ce.EILSEQNOSUPPORT
+			if proto then
+				if proto == "h2" and (version == nil or version == 2) then
+					version = 2
+				elseif proto == "http/1.1" and (version == nil or version < 2) then
+					version = 1.1
+				elseif proto == "http/1.0" and (version == nil or version == 1.0) then
+					version = 1.0
+				else
+					return nil, "unexpected ALPN protocol: " .. proto, ce.EILSEQNOSUPPORT
+				end
 			end
 		end
 	end
@@ -79,7 +84,7 @@ local function wrap_socket(self, socket, deadline)
 	if version == nil then
 		local is_h2, err, errno = h2_connection.socket_has_preface(socket, true, deadline and (deadline-monotime()))
 		if is_h2 == nil then
-			return nil, err, errno
+			return nil, err or ce.EPIPE, errno
 		end
 		version = is_h2 and 2 or 1.1
 	end
@@ -126,7 +131,7 @@ end
 
 local function handle_socket(self, socket)
 	local error_operation, error_context
-	local conn, err, errno = wrap_socket(self, socket)
+	local conn, err, errno = wrap_socket(self, socket, self.connection_setup_timeout)
 	if not conn then
 		socket:close()
 		if err ~= ce.EPIPE -- client closed connection
@@ -138,30 +143,32 @@ local function handle_socket(self, socket)
 	else
 		local cond = cc.new()
 		local idle = true
+		local deadline
 		conn:onidle(function()
 			idle = true
+			deadline = self.intra_stream_timeout + monotime()
 			cond:signal(1)
 		end)
 		while true do
+			local timeout = deadline and deadline-monotime() or self.intra_stream_timeout
 			local stream
-			stream, err, errno = conn:get_next_incoming_stream()
+			stream, err, errno = conn:get_next_incoming_stream(timeout)
 			if stream == nil then
 				if (err ~= nil -- client closed connection
 					and errno ~= ce.ECONNRESET
-					and errno ~= ce.ENOTCONN) then
+					and errno ~= ce.ENOTCONN
+					and errno ~= ce.ETIMEDOUT) then
 					error_operation = "get_next_incoming_stream"
 					error_context = conn
+					break
+				elseif errno ~= ce.ETIMEDOUT or not idle or (deadline and deadline <= monotime()) then -- want to go around loop again if deadline not hit
+					break
 				end
-				break
+			else
+				idle = false
+				deadline = nil
+				self:add_stream(stream)
 			end
-			idle = false
-			self.cq:wrap(function()
-				local ok, err2 = http_util.yieldable_pcall(self.onstream, self, stream)
-				stream:shutdown()
-				if not ok then
-					self:onerror()(self, stream, "onstream", err2)
-				end
-			end)
 		end
 		-- wait for streams to complete
 		if not idle then
@@ -176,33 +183,25 @@ local function handle_socket(self, socket)
 	end
 end
 
--- Prefer whichever comes first
-local function alpn_select_either(ssl, protos) -- luacheck: ignore 212
-	for _, proto in ipairs(protos) do
-		if proto == "h2" then
-			-- HTTP2 only allows >=TLSv1.2
-			if ssl:getVersion() >= openssl_ssl.TLS1_2_VERSION then
-				return proto
-			end
-		elseif proto == "http/1.1" then
-			return proto
-		end
-	end
-	return nil
-end
-
-local function alpn_select_h2(ssl, protos) -- luacheck: ignore 212
-	for _, proto in ipairs(protos) do
-		if proto == "h2" then
-			return proto
-		end
+local function handle_stream(self, stream)
+	local ok, err = http_util.yieldable_pcall(self.onstream, self, stream)
+	stream:shutdown()
+	if not ok then
+		self:onerror()(self, stream, "onstream", err)
 	end
-	return nil
 end
 
-local function alpn_select_h1(ssl, protos) -- luacheck: ignore 212
+-- Prefer whichever comes first
+local function alpn_select(ssl, protos, version)
 	for _, proto in ipairs(protos) do
-		if proto == "http/1.1" then
+		if proto == "h2" and (version == nil or version == 2) then
+			-- HTTP2 only allows >= TLSv1.2
+			-- allow override via version
+			if ssl:getVersion() >= openssl_ssl.TLS1_2_VERSION or version == 2 then
+				return proto
+			end
+		elseif (proto == "http/1.1" and (version == nil or version == 1.1))
+			or (proto == "http/1.0" and (version == nil or version == 1.0)) then
 			return proto
 		end
 	end
@@ -213,13 +212,7 @@ end
 local function new_ctx(host, version)
 	local ctx = http_tls.new_server_context()
 	if http_tls.has_alpn then
-		if version == nil then
-			ctx:setAlpnSelect(alpn_select_either)
-		elseif version == 2 then
-			ctx:setAlpnSelect(alpn_select_h2)
-		elseif version == 1.1 then
-			ctx:setAlpnSelect(alpn_select_h1)
-		end
+		ctx:setAlpnSelect(alpn_select, version)
 	end
 	if version == 2 then
 		ctx:setOptions(openssl_ctx.OP_NO_TLSv1 + openssl_ctx.OP_NO_TLSv1_1)
@@ -253,7 +246,8 @@ end
 local server_methods = {
 	version = nil;
 	max_concurrent = math.huge;
-	client_timeout = 10;
+	connection_setup_timeout = 10;
+	intra_stream_timeout = 10;
 }
 local server_mt = {
 	__name = "http.server";
@@ -269,7 +263,7 @@ end
 
 Takes a table of options:
   - `.cq` (optional): A cqueues controller to use
-  - `.socket`: A cqueues socket object
+  - `.socket` (optional): A cqueues socket object to accept() from
   - `.onstream`: function to call back for each stream read
   - `.onerror`: function that will be called when an error occurs (default: throw an error)
   - `.tls`: `nil`: allow both tls and non-tls connections
@@ -279,7 +273,8 @@ Takes a table of options:
   - `       `nil`: a self-signed context will be generated
   - `.version`: the http version to allow to connect (default: any)
   - `.max_concurrent`: Maximum number of connections to allow live at a time (default: infinity)
-  - `.client_timeout`: Timeout (in seconds) to wait for client to send first bytes and/or complete TLS handshake (default: 10)
+  - `.connection_setup_timeout`: Timeout (in seconds) to wait for client to send first bytes and/or complete TLS handshake (default: 10)
+  - `.intra_stream_timeout`: Timeout (in seoncds) to wait between start of client streams (default: 10)
 ]]
 local function new_server(tbl)
 	local cq = tbl.cq
@@ -288,18 +283,15 @@ local function new_server(tbl)
 	else
 		assert(cqueues.type(cq) == "controller", "optional cq field should be a cqueue controller")
 	end
-	local socket = assert(tbl.socket, "missing 'socket'")
+	local socket = tbl.socket
+	if socket ~= nil then
+		assert(cs.type(socket), "optional socket field should be a cqueues socket")
+	end
 	local onstream = assert(tbl.onstream, "missing 'onstream'")
-
 	if tbl.ctx == nil and tbl.tls ~= false then
 		error("OpenSSL context required if .tls isn't false")
 	end
 
-	-- Return errors rather than throwing
-	socket:onerror(function(s, op, why, lvl) -- luacheck: ignore 431 212
-		return why
-	end)
-
 	local self = setmetatable({
 		cq = cq;
 		socket = socket;
@@ -313,10 +305,17 @@ local function new_server(tbl)
 		pause_cond = cc.new();
 		paused = false;
 		connection_done = cc.new(); -- signalled when connection has been closed
-		client_timeout = tbl.client_timeout;
+		connection_setup_timeout = tbl.connection_setup_timeout;
+		intra_stream_timeout = tbl.intra_stream_timeout;
 	}, server_mt)
 
-	cq:wrap(server_loop, self)
+	if socket then
+		-- Return errors rather than throwing
+		socket:onerror(function(socket, op, why, lvl) -- luacheck: ignore 431 212
+			return why
+		end)
+		cq:wrap(server_loop, self)
+	end
 
 	return self
 end
@@ -381,7 +380,8 @@ local function listen(tbl)
 		ctx = ctx;
 		version = tbl.version;
 		max_concurrent = tbl.max_concurrent;
-		client_timeout = tbl.client_timeout;
+		connection_setup_timeout = tbl.connection_setup_timeout;
+		intra_stream_timeout = tbl.intra_stream_timeout;
 	}
 end
 
@@ -404,11 +404,20 @@ end
 -- Actually wait for and *do* the binding
 -- Don't *need* to call this, as if not it will be done lazily
 function server_methods:listen(timeout)
-	return ca.fileresult(self.socket:listen(timeout))
+	if self.socket then
+		local ok, err, errno = ca.fileresult(self.socket:listen(timeout))
+		if not ok then
+			return nil, err, errno
+		end
+	end
+	return true
 end
 
 function server_methods:localname()
-	return self.socket:localname()
+	if self.socket == nil then
+		return
+	end
+	return ca.fileresult(self.socket:localname())
 end
 
 function server_methods:pause()
@@ -469,6 +478,11 @@ function server_methods:add_socket(socket)
 	return true
 end
 
+function server_methods:add_stream(stream)
+	self.cq:wrap(handle_stream, self, stream)
+	return true
+end
+
 return {
 	new = new_server;
 	listen = listen;
diff --git a/http/socks.lua b/http/socks.lua
index c09d479..aadf35f 100644
--- a/http/socks.lua
+++ b/http/socks.lua
@@ -14,8 +14,8 @@ local monotime = cqueues.monotime
 local ca = require "cqueues.auxlib"
 local ce = require "cqueues.errno"
 local cs = require "cqueues.socket"
-local spack = string.pack or require "compat53.string".pack
-local sunpack = string.unpack or require "compat53.string".unpack
+local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143
+local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143
 local IPv4 = require "lpeg_patterns.IPv4"
 local IPv6 = require "lpeg_patterns.IPv6"
 local uri_patts = require "lpeg_patterns.uri"
diff --git a/http/stream_common.lua b/http/stream_common.lua
index 7633ce8..d9faf50 100644
--- a/http/stream_common.lua
+++ b/http/stream_common.lua
@@ -152,14 +152,27 @@ function stream_methods:write_body_from_string(str, timeout)
 	return self:write_chunk(str, true, timeout)
 end
 
-function stream_methods:write_body_from_file(file, timeout)
+function stream_methods:write_body_from_file(options, timeout)
 	local deadline = timeout and (monotime()+timeout)
-	-- Can't use :lines here as in Lua 5.1 it doesn't take a parameter
-	while true do
-		local chunk, err = file:read(CHUNK_SIZE)
+	local file, count
+	if io.type(options) then -- lua-http <= 0.2 took a file handle
+		file = options
+	else
+		file = options.file
+		count = options.count
+	end
+	if count == nil then
+		count = math.huge
+	elseif type(count) ~= "number" or count < 0 or count % 1 ~= 0 then
+		error("invalid .count parameter (expected positive integer)")
+	end
+	while count > 0 do
+		local chunk, err = file:read(math.min(CHUNK_SIZE, count))
 		if chunk == nil then
 			if err then
 				error(err)
+			elseif count ~= math.huge and count > 0 then
+				error("unexpected EOF")
 			end
 			break
 		end
@@ -167,6 +180,7 @@ function stream_methods:write_body_from_file(file, timeout)
 		if not ok then
 			return nil, err2, errno2
 		end
+		count = count - #chunk
 	end
 	return self:write_chunk("", true, deadline and (deadline-monotime()))
 end
diff --git a/http/stream_common.tld b/http/stream_common.tld
new file mode 100644
index 0000000..162c6af
--- /dev/null
+++ b/http/stream_common.tld
@@ -0,0 +1,25 @@
+require "http.connection_common"
+
+interface stream
+    const checktls: (self) -> (nil)|(any)
+    const localname: (self) -> (integer, string, integer?)|(nil)|(nil, string, number)
+    const peername: (self) -> (integer, string, integer?)|(nil)|(nil, string, number)
+    const write_continue: (self, number?) -> (true)|(nil, string, number)
+    const each_chunk: (self) -> ((stream)->(string)|(nil)|(nil, string, number), self)
+    const get_body_as_string: (self, number?) -> (string)|(nil, string, number)
+    const get_body_chars: (self, integer, number?) -> (string)|(nil, string, number)
+    const get_body_until: (self, string, boolean, boolean, number?) -> (string)|(nil, string, number)
+    const save_body_to_file: (self, file, number?) -> (true)|(nil, string, number)
+    const get_body_as_file: (self, number?) -> (file)|(nil, string, number)
+    const write_body_from_string: (self, string, number?) -> (true)|(nil, string, number)
+    const write_body_from_file: (self, {"file":file, "count": integer?}|file, number?) -> (true)|(nil, string, number)
+
+    -- Not in stream_common.lua
+    const connection: connection
+    const get_headers: (self, number?) -> (headers)|(nil)|(nil, string, number)
+    const get_next_chunk: (self, number?) -> (string)|(nil)|(nil, string, number)
+    const write_headers: (self, headers, boolean, number?) -> (true)|(nil, string, number)
+    const write_chunk: (self, string, boolean, number?) -> (true)|(nil, string, number)
+    const unget: (self, string) -> (true)
+    const shutdown: (self) -> (true)
+end
diff --git a/http/tls.lua b/http/tls.lua
index d948c0b..efe3443 100644
--- a/http/tls.lua
+++ b/http/tls.lua
@@ -66,6 +66,59 @@ local intermediate_cipher_list = cipher_list {
 	"!DSS";
 }
 
+-- "Old" cipher list
+local old_cipher_list = cipher_list {
+	"ECDHE-ECDSA-CHACHA20-POLY1305";
+	"ECDHE-RSA-CHACHA20-POLY1305";
+	"ECDHE-RSA-AES128-GCM-SHA256";
+	"ECDHE-ECDSA-AES128-GCM-SHA256";
+	"ECDHE-RSA-AES256-GCM-SHA384";
+	"ECDHE-ECDSA-AES256-GCM-SHA384";
+	"DHE-RSA-AES128-GCM-SHA256";
+	"DHE-DSS-AES128-GCM-SHA256";
+	"kEDH+AESGCM";
+	"ECDHE-RSA-AES128-SHA256";
+	"ECDHE-ECDSA-AES128-SHA256";
+	"ECDHE-RSA-AES128-SHA";
+	"ECDHE-ECDSA-AES128-SHA";
+	"ECDHE-RSA-AES256-SHA384";
+	"ECDHE-ECDSA-AES256-SHA384";
+	"ECDHE-RSA-AES256-SHA";
+	"ECDHE-ECDSA-AES256-SHA";
+	"DHE-RSA-AES128-SHA256";
+	"DHE-RSA-AES128-SHA";
+	"DHE-DSS-AES128-SHA256";
+	"DHE-RSA-AES256-SHA256";
+	"DHE-DSS-AES256-SHA";
+	"DHE-RSA-AES256-SHA";
+	"ECDHE-RSA-DES-CBC3-SHA";
+	"ECDHE-ECDSA-DES-CBC3-SHA";
+	"EDH-RSA-DES-CBC3-SHA";
+	"AES128-GCM-SHA256";
+	"AES256-GCM-SHA384";
+	"AES128-SHA256";
+	"AES256-SHA256";
+	"AES128-SHA";
+	"AES256-SHA";
+	"AES";
+	"DES-CBC3-SHA";
+	"HIGH";
+	"SEED";
+	"!aNULL";
+	"!eNULL";
+	"!EXPORT";
+	"!DES";
+	"!RC4";
+	"!MD5";
+	"!PSK";
+	"!RSAPSK";
+	"!aDH";
+	"!aECDH";
+	"!EDH-DSS-DES-CBC3-SHA";
+	"!KRB5-DES-CBC3-SHA";
+	"!SRP";
+}
+
 -- A map from the cipher identifiers used in specifications to
 -- the identifiers used by OpenSSL.
 local spec_to_openssl = {
@@ -698,14 +751,10 @@ local default_tls_options = openssl_ctx.OP_NO_COMPRESSION
 	+ openssl_ctx.OP_NO_SSLv2
 	+ openssl_ctx.OP_NO_SSLv3
 
-local client_params = openssl_verify_param.new()
-client_params:setPurpose("sslserver") -- the purpose the peer has to present
-
 local function new_client_context()
 	local ctx = openssl_ctx.new("TLS", false)
 	ctx:setCipherList(intermediate_cipher_list)
 	ctx:setOptions(default_tls_options)
-	ctx:setParam(client_params)
 	ctx:setEphemeralKey(openssl_pkey.new{ type = "EC", curve = "prime256v1" })
 	local store = ctx:getStore()
 	store:addDefaults()
@@ -713,14 +762,10 @@ local function new_client_context()
 	return ctx
 end
 
-local server_params = openssl_verify_param.new()
-server_params:setPurpose("sslclient") -- the purpose the peer has to present
-
 local function new_server_context()
 	local ctx = openssl_ctx.new("TLS", true)
 	ctx:setCipherList(intermediate_cipher_list)
 	ctx:setOptions(default_tls_options)
-	ctx:setParam(server_params)
 	ctx:setEphemeralKey(openssl_pkey.new{ type = "EC", curve = "prime256v1" })
 	return ctx
 end
@@ -730,6 +775,7 @@ return {
 	has_hostname_validation = has_hostname_validation;
 	modern_cipher_list = modern_cipher_list;
 	intermediate_cipher_list = intermediate_cipher_list;
+	old_cipher_list = old_cipher_list;
 	banned_ciphers = banned_ciphers;
 	new_client_context = new_client_context;
 	new_server_context = new_server_context;
diff --git a/http/tls.tld b/http/tls.tld
new file mode 100644
index 0000000..43b851e
--- /dev/null
+++ b/http/tls.tld
@@ -0,0 +1,9 @@
+has_alpn: boolean
+has_hostname_validation: boolean
+modern_cipher_list: string
+intermediate_cipher_list: string
+old_cipher_list: string
+banned_ciphers: {string: true}
+-- TODO: luaossl SSL context type
+new_client_context: any
+new_server_context: any
diff --git a/http/util.lua b/http/util.lua
index f9d811d..a6d9520 100644
--- a/http/util.lua
+++ b/http/util.lua
@@ -1,5 +1,9 @@
 local lpeg = require "lpeg"
 local http_patts = require "lpeg_patterns.http"
+local IPv4_patts = require "lpeg_patterns.IPv4"
+local IPv6_patts = require "lpeg_patterns.IPv6"
+
+local EOF = lpeg.P(-1)
 
 -- Encodes a character as a percent encoded string
 local function char_to_pchar(c)
@@ -20,10 +24,8 @@ local function encodeURIComponent(str)
 end
 
 -- decodeURI unescapes url encoded characters
--- excluding for characters that are special in urls
+-- excluding characters that are special in urls
 local decodeURI do
-	-- Keep the blacklist in numeric form.
-	-- This means we can skip case normalisation of the hex characters
 	local decodeURI_blacklist = {}
 	for char in ("#$&+,/:;=?@"):gmatch(".") do
 		decodeURI_blacklist[string.byte(char)] = true
@@ -125,6 +127,24 @@ local function resolve_relative_path(orig_path, relative_path)
 	return table.concat(t, "/", s, i)
 end
 
+local safe_methods = {
+	-- RFC 7231 Section 4.2.1:
+	-- Of the request methods defined by this specification, the GET, HEAD,
+	-- OPTIONS, and TRACE methods are defined to be safe.
+	GET = true;
+	HEAD = true;
+	OPTIONS = true;
+	TRACE = true;
+}
+local function is_safe_method(method)
+	return safe_methods[method] or false
+end
+
+local IPaddress = (IPv4_patts.IPv4address + IPv6_patts.IPv6addrz) * EOF
+local function is_ip(str)
+	return IPaddress:match(str) ~= nil
+end
+
 local scheme_to_port = {
 	http = 80;
 	ws = 80;
@@ -135,17 +155,17 @@ local scheme_to_port = {
 -- Splits a :authority header (same as Host) into host and port
 local function split_authority(authority, scheme)
 	local host, port
-	local h, p = authority:match("^ *(.-):(%d+) *$")
+	local h, p = authority:match("^[ \t]*(.-):(%d+)[ \t]*$")
 	if p then
 		authority = h
-		port = tonumber(p)
+		port = tonumber(p, 10)
 	else -- when port missing from host header, it defaults to the default for that scheme
 		port = scheme_to_port[scheme]
 		if port == nil then
 			return nil, "unknown scheme"
 		end
 	end
-	local ipv6 = authority:match("%[([:%x]+)%]")
+	local ipv6 = authority:match("^%[([:%x]+)%]$")
 	if ipv6 then
 		host = ipv6
 	else
@@ -177,10 +197,9 @@ local function imf_date(time)
 	return os.date("!%a, %d %b %Y %H:%M:%S GMT", time)
 end
 
--- This pattern checks if it's argument is a valid token, if so, it returns it as is.
+-- This pattern checks if its argument is a valid token, if so, it returns it as is.
 -- Otherwise, it returns it as a quoted string (with any special characters escaped)
 local maybe_quote do
-	local EOF = lpeg.P(-1)
 	local patt = http_patts.token * EOF
 		+ lpeg.Cs(lpeg.Cc'"' * ((lpeg.S"\\\"") / "\\%0" + http_patts.qdtext)^0 * lpeg.Cc'"') * EOF
 	maybe_quote = function (s)
@@ -188,10 +207,29 @@ local maybe_quote do
 	end
 end
 
--- A pcall relative that can be yielded over in PUC 5.1
+-- A pcall-alike function that can be yielded over even in PUC 5.1
 local yieldable_pcall
--- See if pcall can be yielded over
-if coroutine.wrap(function() return pcall(coroutine.yield, true) end)() then
+--[[ If pcall can already yield, then we want to use that.
+
+However, we can't do the feature check straight away, Openresty breaks
+coroutine.wrap in some contexts. See #98
+Openresty nominally only supports LuaJIT, which always supports a yieldable
+pcall, so we short-circuit the feature check by checking if the 'ngx' library
+is loaded, plus that jit.version_num indicates LuaJIT 2.0.
+This combination ensures that we don't take the wrong branch if:
+  - lua-http is being used to mock the openresty environment
+  - openresty is compiled with something other than LuaJIT
+]]
+if (
+		package.loaded.ngx
+		and type(package.loaded.jit) == "table"
+		and type(package.loaded.jit.version_num) == "number"
+		and package.loaded.jit.version_num >= 20000
+	)
+	-- See if pcall can be yielded over
+	or coroutine.wrap(function()
+		return pcall(coroutine.yield, true) end
+	)() then
 	yieldable_pcall = pcall
 else
 	local function handle_resume(co, ok, ...)
@@ -221,6 +259,8 @@ return {
 	query_args = query_args;
 	dict_to_query = dict_to_query;
 	resolve_relative_path = resolve_relative_path;
+	is_safe_method = is_safe_method;
+	is_ip = is_ip;
 	scheme_to_port = scheme_to_port;
 	split_authority = split_authority;
 	to_authority = to_authority;
diff --git a/http/util.tld b/http/util.tld
new file mode 100644
index 0000000..e23998f
--- /dev/null
+++ b/http/util.tld
@@ -0,0 +1,15 @@
+encodeURI: (string) -> (string)
+encodeURIComponent: (string) -> (string)
+decodeURI: (string) -> (string)
+decodeURIComponent: (string) -> (string)
+query_args: (string) -> ((any) -> (string, string), any, any)
+dict_to_query: ({string:string}) -> (string)
+resolve_relative_path: (orig_path, relative_path) -> (string)
+is_safe_method: (method) -> (boolean)
+is_ip: (string) -> (boolean)
+scheme_to_port: {string:integer}
+split_authority: (string, string) -> (string, integer)|(nil, string)
+to_authority: (string, integer, string|nil) -> (string)
+imf_date: (time) -> (string)
+maybe_quote: (string) -> (string)
+yieldable_pcall: ((any*) -> (any*), any*) -> (boolean, any*)
diff --git a/http/version.lua b/http/version.lua
index e7aa26d..396dcf2 100644
--- a/http/version.lua
+++ b/http/version.lua
@@ -5,5 +5,5 @@ It should be updated as part of the release process
 
 return {
 	name = "lua-http";
-	version = "0.1";
+	version = "scm";
 }
diff --git a/http/version.tld b/http/version.tld
new file mode 100644
index 0000000..b0728b0
--- /dev/null
+++ b/http/version.tld
@@ -0,0 +1,2 @@
+name: string
+version: string
diff --git a/http/websocket.lua b/http/websocket.lua
index a7bca88..fe53f63 100644
--- a/http/websocket.lua
+++ b/http/websocket.lua
@@ -23,9 +23,9 @@ If the peer is sending *anything*, then you know they are still connected.
 ]]
 
 local basexx = require "basexx"
-local spack = string.pack or require "compat53.string".pack
-local sunpack = string.unpack or require "compat53.string".unpack
-local unpack = table.unpack or unpack -- luacheck: ignore 113
+local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143
+local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143
+local unpack = table.unpack or unpack -- luacheck: ignore 113 143
 local utf8 = utf8 or require "compat53.utf8" -- luacheck: ignore 113
 local cqueues = require "cqueues"
 local monotime = cqueues.monotime
@@ -35,6 +35,7 @@ local http_patts = require "lpeg_patterns.http"
 local rand = require "openssl.rand"
 local digest = require "openssl.digest"
 local bit = require "http.bit"
+local onerror  = require "http.connection_common".onerror
 local new_headers = require "http.headers".new
 local http_request = require "http.request"
 
@@ -177,13 +178,18 @@ local function build_close(code, message, mask)
 end
 
 local function read_frame(sock, deadline)
-	local frame do
-		local first_2, err, errno = sock:xread(2, "b", deadline and (deadline-monotime()))
+	local frame, first_2 do
+		local err, errno
+		first_2, err, errno = sock:xread(2, "b", deadline and (deadline-monotime()))
 		if not first_2 then
 			return nil, err, errno
 		elseif #first_2 ~= 2 then
 			sock:seterror("r", ce.EILSEQ)
-			return nil, ce.strerror(ce.EILSEQ), ce.EILSEQ
+			local ok, errno2 = sock:unget(first_2)
+			if not ok then
+				return nil, onerror(sock, "unget", errno2)
+			end
+			return nil, onerror(sock, "read_frame", ce.EILSEQ)
 		end
 		local byte1, byte2 = first_2:byte(1, 2)
 		frame = {
@@ -200,50 +206,80 @@ local function read_frame(sock, deadline)
 		}
 	end
 
-	if frame.length == 126 then
-		local length, err, errno = sock:xread(2, "b", deadline and (deadline-monotime()))
-		if not length or #length ~= 2 then
-			if err == nil then
+	local fill_length = frame.length
+	if fill_length == 126 then
+		fill_length = 2
+	elseif fill_length == 127 then
+		fill_length = 8
+	end
+	if frame.MASK then
+		fill_length = fill_length + 4
+	end
+	do
+		local ok, err, errno = sock:fill(fill_length, 0)
+		if not ok then
+			local unget_ok1, unget_errno1 = sock:unget(first_2)
+			if not unget_ok1 then
+				return nil, onerror(sock, "unget", unget_errno1)
+			end
+			if errno == ce.ETIMEDOUT then
+				local timeout = deadline and deadline-monotime()
+				if cqueues.poll(sock, timeout) ~= timeout then
+					-- retry
+					return read_frame(sock, deadline)
+				end
+			elseif err == nil then
 				sock:seterror("r", ce.EILSEQ)
-				return nil, ce.strerror(ce.EILSEQ), ce.EILSEQ
+				return nil, onerror(sock, "read_frame", ce.EILSEQ)
 			end
 			return nil, err, errno
 		end
-		frame.length = sunpack(">I2", length)
+	end
+
+	-- if `fill` succeeded these shouldn't be able to fail
+	local extra_fill_unget
+	if frame.length == 126 then
+		extra_fill_unget = assert(sock:xread(2, "b", 0))
+		frame.length = sunpack(">I2", extra_fill_unget)
+		fill_length = fill_length - 2
 	elseif frame.length == 127 then
-		local length, err, errno = sock:xread(8, "b", deadline and (deadline-monotime()))
-		if not length or #length ~= 8 then
-			if err == nil then
+		extra_fill_unget = assert(sock:xread(8, "b", 0))
+		frame.length = sunpack(">I8", extra_fill_unget)
+		fill_length = fill_length - 8 + frame.length
+	end
+
+	if extra_fill_unget then
+		local ok, err, errno = sock:fill(fill_length, 0)
+		if not ok then
+			local unget_ok1, unget_errno1 = sock:unget(extra_fill_unget)
+			if not unget_ok1 then
+				return nil, onerror(sock, "unget", unget_errno1)
+			end
+			local unget_ok2, unget_errno2 = sock:unget(first_2)
+			if not unget_ok2 then
+				return nil, onerror(sock, "unget", unget_errno2)
+			end
+			if errno == ce.ETIMEDOUT then
+				local timeout = deadline and deadline-monotime()
+				if cqueues.poll(sock, timeout) ~= timeout then
+					-- retry
+					return read_frame(sock, deadline)
+				end
+			elseif err == nil then
 				sock:seterror("r", ce.EILSEQ)
-				return nil, ce.strerror(ce.EILSEQ), ce.EILSEQ
+				return nil, onerror(sock, "read_frame", ce.EILSEQ)
 			end
 			return nil, err, errno
 		end
-		frame.length = sunpack(">I8", length)
 	end
 
 	if frame.MASK then
-		local key, err, errno = sock:xread(4, "b", deadline and (deadline-monotime()))
-		if not key or #key ~= 4 then
-			if err == nil then
-				sock:seterror("r", ce.EILSEQ)
-				return nil, ce.strerror(ce.EILSEQ), ce.EILSEQ
-			end
-			return nil, err, errno
-		end
+		local key = assert(sock:xread(4, "b", 0))
 		frame.key = { key:byte(1, 4) }
 	end
 
 	do
-		local data, err, errno = sock:xread(frame.length, "b", deadline and (deadline-monotime()))
-		if data == nil or #data ~= frame.length then
-			if err == nil then
-				sock:seterror("r", ce.EILSEQ)
-				return nil, ce.strerror(ce.EILSEQ), ce.EILSEQ
-			end
-			return nil, err, errno
-		end
-
+		local data = assert(sock:xread(frame.length, "b", 0))
 		if frame.MASK then
 			frame.data = apply_mask(data, frame.key)
 		else
@@ -267,9 +303,9 @@ end
 
 function websocket_methods:send_frame(frame, timeout)
 	if self.readyState < 1 then
-		return nil, ce.strerror(ce.ENOTCONN), ce.ENOTCONN
+		return nil, onerror(self.socket, "send_frame", ce.ENOTCONN)
 	elseif self.readyState > 2 then
-		return nil, ce.strerror(ce.EPIPE), ce.EPIPE
+		return nil, onerror(self.socket, "send_frame", ce.EPIPE)
 	end
 	local ok, err, errno = self.socket:xwrite(build_frame(frame), "bn", timeout)
 	if not ok then
@@ -349,13 +385,13 @@ end
 
 function websocket_methods:receive(timeout)
 	if self.readyState < 1 then
-		return nil, ce.strerror(ce.ENOTCONN), ce.ENOTCONN
+		return nil, onerror(self.socket, "receive", ce.ENOTCONN)
 	elseif self.readyState > 2 then
-		return nil, ce.strerror(ce.EPIPE), ce.EPIPE
+		return nil, onerror(self.socket, "receive", ce.EPIPE)
 	end
 	local deadline = timeout and (monotime()+timeout)
 	while true do
-		local frame, err, errno = read_frame(self.socket, deadline and (deadline-monotime()))
+		local frame, err, errno = read_frame(self.socket, deadline)
 		if frame == nil then
 			return nil, err, errno
 		end
@@ -509,8 +545,6 @@ end
 
 local function new_from_uri(uri, protocols)
 	local request = http_request.new_from_uri(uri)
-	local scheme = request.headers:get(":scheme")
-	assert(scheme == "ws" or scheme == "wss", "scheme not websocket")
 	local self = new("client")
 	self.request = request
 	self.request.version = 1.1
@@ -638,6 +672,7 @@ local function handle_websocket_response(self, headers, stream)
 	-- Success!
 	assert(self.socket == nil, "websocket:connect called twice")
 	self.socket = assert(stream.connection:take_socket())
+	self.socket:onerror(onerror)
 	self.request = nil
 	self.headers = headers
 	self.readyState = 1
@@ -752,19 +787,20 @@ function websocket_methods:accept(options, timeout)
 	response_headers:upsert("sec-websocket-accept", base64_sha1(self.key .. magic))
 
 	local chosen_protocol
-	if self.protocols then
-		if options.protocols then
-			for _, protocol in ipairs(options.protocols) do
-				if self.protocols[protocol] then
-					chosen_protocol = protocol
-					break
-				end
+	if self.protocols and options.protocols then
+		--[[ The |Sec-WebSocket-Protocol| request-header field can be
+		used to indicate what subprotocols (application-level protocols
+		layered over the WebSocket Protocol) are acceptable to the client.
+		The server selects one or none of the acceptable protocols and echoes
+		that value in its handshake to indicate that it has selected that
+		protocol.]]
+		for _, protocol in ipairs(options.protocols) do
+			if self.protocols[protocol] then
+				response_headers:upsert("sec-websocket-protocol", protocol)
+				chosen_protocol = protocol
+				break
 			end
 		end
-		if not chosen_protocol then
-			return nil, "no matching protocol", ce.EILSEQNOSUPPORT
-		end
-		response_headers:upsert("sec-websocket-protocol", chosen_protocol)
 	end
 
 	do
@@ -775,6 +811,7 @@ function websocket_methods:accept(options, timeout)
 	end
 
 	self.socket = assert(self.stream.connection:take_socket())
+	self.socket:onerror(onerror)
 	self.stream = nil
 	self.readyState = 1
 	self.protocol = chosen_protocol
diff --git a/http/zlib.tld b/http/zlib.tld
new file mode 100644
index 0000000..2470fd6
--- /dev/null
+++ b/http/zlib.tld
@@ -0,0 +1,2 @@
+inflate: () -> ((string, boolean) -> (string))
+deflate: () -> ((string, boolean) -> (string))
diff --git a/spec/client_spec.lua b/spec/client_spec.lua
index 8a8fbd6..daae1e9 100644
--- a/spec/client_spec.lua
+++ b/spec/client_spec.lua
@@ -1,5 +1,6 @@
 describe("http.client module", function()
 	local client = require "http.client"
+	local http_connection_common = require "http.connection_common"
 	local http_h1_connection = require "http.h1_connection"
 	local http_h2_connection = require "http.h2_connection"
 	local http_headers = require "http.headers"
@@ -105,4 +106,26 @@ describe("http.client module", function()
 			return http_h2_connection.new(s, "server", {})
 		end)
 	end)
+	it("reports errors from :starttls", function()
+		-- default settings should fail as it should't allow self-signed
+		local s, c = ca.assert(cs.pair())
+		local cq = cqueues.new();
+		cq:wrap(function()
+			local ok, err = client.negotiate(c, {
+				tls = true;
+			})
+			assert.falsy(ok)
+			assert.truthy(err:match("starttls: "))
+		end)
+		cq:wrap(function()
+			s:onerror(http_connection_common.onerror)
+			local ok, err = s:starttls()
+			assert.falsy(ok)
+			assert.truthy(err:match("starttls: "))
+		end)
+		assert_loop(cq, TEST_TIMEOUT)
+		assert.truthy(cq:empty())
+		c:close()
+		s:close()
+	end)
 end)
diff --git a/spec/compat_socket_spec.lua b/spec/compat_socket_spec.lua
index 4c791d5..c8b8f7a 100644
--- a/spec/compat_socket_spec.lua
+++ b/spec/compat_socket_spec.lua
@@ -8,7 +8,7 @@ describe("http.compat.socket module", function()
 		-- in the luasocket example they use 'wrong.host', but 'host' is now a valid TLD.
 		-- use 'wrong.invalid' instead for this test.
 		local r, e = http.request("http://wrong.invalid/")
-		assert.same(r, nil)
+		assert.same(nil, r)
 		-- in luasocket, the error is documented as "host not found", but we allow something else
 		assert.same("string", type(e))
 	end)
diff --git a/spec/cookie_spec.lua b/spec/cookie_spec.lua
new file mode 100644
index 0000000..d80c93e
--- /dev/null
+++ b/spec/cookie_spec.lua
@@ -0,0 +1,510 @@
+describe("cookie module", function()
+	local http_cookie = require "http.cookie"
+	local http_headers = require "http.headers"
+	describe(".parse_cookies", function()
+		it("can parse a request with a single cookie headers", function()
+			local h = http_headers.new()
+			h:append("cookie", "foo=FOO; bar=BAR")
+			assert.same({
+				foo = "FOO";
+				bar = "BAR";
+			}, http_cookie.parse_cookies(h))
+		end)
+		it("can parse a request with a multiple cookie headers", function()
+			local h = http_headers.new()
+			h:append("cookie", "foo=FOO; bar=BAR")
+			h:append("cookie", "baz=BAZ; bar=BAR2")
+			h:append("cookie", "qux=QUX")
+			assert.same({
+				foo = "FOO";
+				bar = "BAR2"; -- last occurence should win
+				baz = "BAZ";
+				qux = "QUX";
+			}, http_cookie.parse_cookies(h))
+		end)
+	end)
+	it(":get works", function()
+		local s = http_cookie.new_store()
+		assert.same(nil, s:get("mysite.com", "/", "lang"))
+		local key, value, params = http_cookie.parse_setcookie("lang=en-US; Expires=Wed, 09 Jun 2021 10:18:14 GMT")
+		assert(s:store("mysite.com", "/", true, true, nil, key, value, params))
+		assert.same("en-US", s:get("mysite.com", "/", "lang"))
+		assert.same(nil, s:get("other.com", "/", "lang"))
+		assert.same(nil, s:get("mysite.com", "/other", "lang"))
+		assert.same(nil, s:get("mysite.com", "/", "other"))
+	end)
+	describe("examples from spec", function()
+		it("can handle basic cookie without parameters", function()
+			local s = http_cookie.new_store()
+			assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("SID=31d4d96e407aad42")))
+			assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/", true, true))
+			assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/other", true, true))
+			assert.same("", s:lookup("subdomain.example.com", "/", true, true))
+			assert.same("", s:lookup("other.com", "/", true, true))
+		end)
+
+		it("can handle cookie with Path and Domain parameters", function()
+			local s = http_cookie.new_store()
+			assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("SID=31d4d96e407aad42; Path=/; Domain=example.com")))
+			assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/", true, true))
+			assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/other", true, true))
+			assert.same("SID=31d4d96e407aad42", s:lookup("subdomain.example.com", "/", true, true))
+			assert.same("", s:lookup("other.com", "/", true, true))
+		end)
+
+		it("can handle two cookies with different names and parameters", function()
+			local s = http_cookie.new_store()
+			assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("SID=31d4d96e407aad42; Path=/; Secure; HttpOnly")))
+			assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("lang=en-US; Path=/; Domain=example.com")))
+			assert.same("SID=31d4d96e407aad42; lang=en-US", s:lookup("example.com", "/other", true, true))
+			assert.same("lang=en-US", s:lookup("subdomain.example.com", "/", true, true))
+			assert.same("lang=en-US", s:lookup("example.com", "/", true, false))
+			assert.same("lang=en-US", s:lookup("example.com", "/", false, true))
+			assert.same("", s:lookup("other.com", "/", true, true))
+		end)
+
+		it("can expire a cookie", function()
+			local s = http_cookie.new_store()
+			s.time = function() return 1234567890 end -- set time to something before the expiry
+			-- in spec this is kept from previous example.
+			assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("SID=31d4d96e407aad42; Path=/; Secure; HttpOnly")))
+			assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("lang=en-US; Expires=Wed, 09 Jun 2021 10:18:14 GMT")))
+			assert.same("SID=31d4d96e407aad42; lang=en-US", s:lookup("example.com", "/", true, true))
+			s.time = function() return 9234567890 end -- set time to something after the expiry
+			assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/", true, true))
+		end)
+	end)
+	describe(":store uses correct domain", function()
+		it("ignores leading '.' in domain", function()
+			local s = http_cookie.new_store()
+			assert.truthy(s:store("subdomain.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=.example.com")))
+			assert.same("bar", s:get("example.com", "/", "foo"))
+		end)
+		;(http_cookie.store_methods.psl and it or pending)("checks against public suffix list", function()
+			assert(not http_cookie.store_methods.psl:is_cookie_domain_acceptable("foo.com", "com"))
+			local s = http_cookie.new_store()
+			assert.falsy(s:store("foo.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=com")))
+		end)
+		;(http_cookie.store_methods.psl and it or pending)("allows explicit domains even when on the public suffix list", function()
+			assert(http_cookie.store_methods.psl:is_public_suffix("hashbang.sh"))
+			local s = http_cookie.new_store()
+			assert.truthy(s:store("hashbang.sh", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=hashbang.sh")))
+			-- And check that host_only flag has been set to true
+			assert.same("foo=bar", s:lookup("hashbang.sh", "/", true, true))
+			assert.same("", s:lookup("sub.hashbang.sh", "/", true, true))
+		end)
+		it("doesn't domain-match a completely different domain", function()
+			local s = http_cookie.new_store()
+			assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=other.example.com")))
+		end)
+		it("doesn't domain-match a subdomain when request is at super-domain", function()
+			local s = http_cookie.new_store()
+			assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=subdomain.example.com")))
+		end)
+		it("doesn't domain-match a partial ip", function()
+			local s = http_cookie.new_store()
+			assert.falsy(s:store("127.0.0.1", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=0.0.1")))
+		end)
+	end)
+	describe("domain-match on lookup", function()
+		it("matches domains correctly when host_only flag is true", function()
+			local s = http_cookie.new_store()
+			assert.truthy(s:store("s.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar")))
+			assert.same("bar", s:get("s.example.com", "/", "foo"))
+
+			assert.same("foo=bar", s:lookup("s.example.com", "/", true, true))
+			assert.same("", s:lookup("s.s.example.com", "/", true, true))
+			assert.same("", s:lookup("s.s.s.example.com", "/", true, true))
+			assert.same("", s:lookup("com", "/", true, true))
+			assert.same("", s:lookup("example.com", "/", true, true))
+			assert.same("", s:lookup("other.com", "/", true, true))
+			assert.same("", s:lookup("s.other.com", "/", true, true))
+		end)
+		it("matches domains correctly when host_only flag is false", function()
+			local s = http_cookie.new_store()
+			assert.truthy(s:store("s.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=s.example.com")))
+			assert.same("bar", s:get("s.example.com", "/", "foo"))
+
+			assert.same("foo=bar", s:lookup("s.example.com", "/", true, true))
+			assert.same("foo=bar", s:lookup("s.s.example.com", "/", true, true))
+			assert.same("foo=bar", s:lookup("s.s.s.example.com", "/", true, true))
+			assert.same("", s:lookup("com", "/", true, true))
+			assert.same("", s:lookup("example.com", "/", true, true))
+			assert.same("", s:lookup("other.com", "/", true, true))
+			assert.same("", s:lookup("s.other.com", "/", true, true))
+		end)
+	end)
+	describe(":store uses correct path", function()
+		it("handles absolute set-cookie header", function()
+			local s = http_cookie.new_store()
+			assert.truthy(s:store("example.com", "/absolute/path", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=/different/absolute/path")))
+			assert.same("bar", s:get("example.com", "/different/absolute/path", "foo"))
+		end)
+		it("handles relative set-cookie path", function()
+			local s = http_cookie.new_store()
+			assert.truthy(s:store("example.com", "/absolute/path", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=relative/path")))
+			-- should trim off last component
+			assert.same("bar", s:get("example.com", "/absolute", "foo"))
+		end)
+		it("handles relative set-cookie path with no request path", function()
+			local s = http_cookie.new_store()
+			assert.truthy(s:store("example.com", "?", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=relative/path")))
+			-- should default to /
+			assert.same("bar", s:get("example.com", "/", "foo"))
+		end)
+		it("handles absolute set-cookie path with relative request path", function()
+			local s = http_cookie.new_store()
+			assert.truthy(s:store("example.com", "relative/path", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=/absolute/path")))
+			assert.same("bar", s:get("example.com", "/absolute/path", "foo"))
+		end)
+		it("handles relative request path and relative set-cookie header", function()
+			local s = http_cookie.new_store()
+			assert.truthy(s:store("example.com", "relative/path", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=different/relative/path")))
+			assert.same("bar", s:get("example.com", "/", "foo"))
+		end)
+	end)
+	it("matches paths correctly", function()
+		local s = http_cookie.new_store()
+		assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=/path/subpath")))
+		assert.same("foo=bar", s:lookup("example.com", "/path/subpath/foo", true, true))
+		assert.same("foo=bar", s:lookup("example.com", "/path/subpath/bar", true, true))
+		assert.same("foo=bar", s:lookup("example.com", "/path/subpath", true, true))
+		assert.same("", s:lookup("example.com", "/", true, true))
+		assert.same("", s:lookup("example.com", "/path", true, true))
+		assert.same("", s:lookup("example.com", "/path/otherpath/", true, true))
+		assert.same("", s:lookup("example.com", "/path/otherpath/things", true, true))
+	end)
+	it("prefers max-age over expires", function()
+		local s = http_cookie.new_store()
+		assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; max-age=50; Expires=Thu, 01 Jan 1970 00:00:00 GMT")))
+		assert.truthy(s:get("example.com", "/", "foo"))
+		assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; max-age=0; Expires=Tue, 19 Jan 2038 03:14:07 GMT")))
+		assert.falsy(s:get("example.com", "/", "foo"))
+	end)
+	it("supports HttpOnly attribute", function()
+		local s = http_cookie.new_store()
+		assert.falsy(s:store("example.com", "/", false, true, nil, http_cookie.parse_setcookie("foo=bar; HttpOnly")))
+		assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; HttpOnly")))
+		assert.same("", s:lookup("example.com", "/", false, true))
+		assert.same("foo=bar", s:lookup("example.com", "/", true, true))
+		-- Now try and overwrite it with non-http :store
+		assert.falsy(s:store("example.com", "/", false, true, nil, http_cookie.parse_setcookie("foo=bar")))
+	end)
+	it("supports Secure attribute", function()
+		local s = http_cookie.new_store()
+		assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("foo=bar; Secure")))
+		assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Secure")))
+		assert.same("", s:lookup("example.com", "/", true, false))
+		assert.same("foo=bar", s:lookup("example.com", "/", true, true))
+	end)
+	describe("tough cookies", function()
+		it("enforces __Secure- prefix", function()
+			local s = http_cookie.new_store()
+			assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("__Secure-foo=bar; Secure")))
+			assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("__Secure-foo=bar")))
+			assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Secure-foo=bar;")))
+			assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Secure-foo=bar; Secure")))
+		end)
+		it("enforces __Host- prefix", function()
+			local s = http_cookie.new_store()
+			-- Checks secure flag
+			assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("__Host-foo=bar; Secure")))
+			assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("__Host-foo=bar")))
+			assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Host-foo=bar;")))
+			-- Checks for host only flag
+			assert.falsy(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Host-foo=bar; Secure; Domain=example.com")))
+			-- Checks that path is /
+			assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Host-foo=bar; Secure; Path=/path")))
+			-- Success case
+			assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Host-foo=bar; Secure")))
+		end)
+	end)
+	describe("cookie fixing mitigation", function()
+		it("ignores already existing path", function()
+			local s = http_cookie.new_store()
+			assert.truthy(s:store("example.com", "/path/subpath/foo", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=/path; Secure")))
+			assert.falsy(s:store("example.com", "/path/subpath/foo", true, false, nil, http_cookie.parse_setcookie("foo=bar; Path=/path")))
+		end)
+	end)
+	describe("SameSite attribute", function()
+		it("fails to store if domain and site_for_cookies don't match", function()
+			local s = http_cookie.new_store()
+			assert.falsy(s:store("example.com", "/", true, true, "other.com", http_cookie.parse_setcookie("foo=foo; SameSite=Strict")))
+		end)
+
+		it("implements SameSite=Strict", function()
+			local s = http_cookie.new_store()
+			assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("foo=foo; SameSite=Strict")))
+			assert.same("foo=foo", s:lookup("example.com", "/", true, true, true, "example.com"))
+			assert.same("", s:lookup("example.com", "/", true, true, true, "other.com"))
+		end)
+
+		it("implements SameSite=Lax", function()
+			local s = http_cookie.new_store()
+			assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("foo=foo; SameSite=Lax")))
+			assert.same("foo=foo", s:lookup("example.com", "/", true, true, true, "example.com", true))
+			assert.same("foo=foo", s:lookup("example.com", "/", true, true, true, "other.com", true))
+			assert.same("", s:lookup("example.com", "/", true, true, false, "other.com", true))
+			assert.same("", s:lookup("example.com", "/", true, true, true, "other.com", false))
+			assert.same("", s:lookup("example.com", "/", true, true, false, "other.com", false))
+		end)
+	end)
+	it("cleans up", function()
+		local s = http_cookie.new_store()
+		assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo; Expires=Wed, 09 Jun 2021 10:18:14 GMT")))
+		assert.same("foo", s:get("example.com", "/", "foo"))
+		s.time = function() return 9876543210 end -- set time to something after the expiry
+		s:clean()
+		assert.same(nil, s:get("example.com", "/", "foo"))
+	end)
+	describe(":remove()", function()
+		it("can remove cookies by domain", function()
+			local s = http_cookie.new_store()
+			-- Try remove on empty store
+			s:remove("example.com")
+
+			assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo")))
+			assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=other; Path=/subpath")))
+			assert.truthy(s:store("other.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar")))
+			assert.same("foo", s:get("example.com", "/", "foo"))
+			assert.same("other", s:get("example.com", "/subpath", "foo"))
+			assert.same("bar", s:get("other.com", "/", "bar"))
+
+			s:remove("example.com")
+			assert.same(nil, s:get("example.com", "/", "foo"))
+			assert.same(nil, s:get("example.com", "/subpath", "foo"))
+			assert.same("bar", s:get("other.com", "/", "bar"))
+		end)
+		it("can remove cookies by path", function()
+			local s = http_cookie.new_store()
+			-- Try remove on empty store
+			s:remove("example.com", "/")
+
+			assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo")))
+			assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=other; Path=/subpath")))
+			assert.truthy(s:store("other.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar")))
+			assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("qux=qux")))
+			assert.same("foo", s:get("example.com", "/", "foo"))
+			assert.same("other", s:get("example.com", "/subpath", "foo"))
+			assert.same("bar", s:get("other.com", "/", "bar"))
+			assert.same("qux", s:get("example.com", "/", "qux"))
+
+			-- Remove all names under "/" path
+			s:remove("example.com", "/")
+			assert.same(nil, s:get("example.com", "/", "foo"))
+			assert.same("other", s:get("example.com", "/subpath", "foo"))
+			assert.same("bar", s:get("other.com", "/", "bar"))
+			assert.same(nil, s:get("example.com", "/", "qux"))
+
+			-- Remove last path in domain (making domain empty)
+			s:remove("example.com", "/subpath")
+			assert.same(nil, s:get("example.com", "/", "foo"))
+			assert.same(nil, s:get("example.com", "/subpath", "foo"))
+			assert.same("bar", s:get("other.com", "/", "bar"))
+			assert.same(nil, s:get("example.com", "/", "qux"))
+		end)
+		it("can remove cookies by name", function()
+			local s = http_cookie.new_store()
+			-- Try remove on empty store
+			s:remove("example.com", "/", "foo")
+
+			assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo")))
+			assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=other; Path=/subpath")))
+			assert.truthy(s:store("other.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar")))
+			assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("qux=qux")))
+			assert.same("foo", s:get("example.com", "/", "foo"))
+			assert.same("other", s:get("example.com", "/subpath", "foo"))
+			assert.same("bar", s:get("other.com", "/", "bar"))
+			assert.same("qux", s:get("example.com", "/", "qux"))
+
+			-- Remove just one name
+			s:remove("example.com", "/", "foo")
+			assert.same(nil, s:get("example.com", "/", "foo"))
+			assert.same("other", s:get("example.com", "/subpath", "foo"))
+			assert.same("bar", s:get("other.com", "/", "bar"))
+			assert.same("qux", s:get("example.com", "/", "qux"))
+
+			-- Remove last name in path (making path empty)
+			s:remove("example.com", "/", "qux")
+			assert.same(nil, s:get("example.com", "/", "foo"))
+			assert.same("other", s:get("example.com", "/subpath", "foo"))
+			assert.same("bar", s:get("other.com", "/", "bar"))
+			assert.same(nil, s:get("example.com", "/", "qux"))
+
+			-- Remove last name in domain (making domain empty)
+			s:remove("example.com", "/subpath", "foo")
+			assert.same(nil, s:get("example.com", "/", "foo"))
+			assert.same(nil, s:get("example.com", "/subpath", "foo"))
+			assert.same("bar", s:get("other.com", "/", "bar"))
+			assert.same(nil, s:get("example.com", "/", "qux"))
+		end)
+	end)
+	describe("cookie order", function()
+		it("returns in order for simple cookies", function() -- used as assumed base case for future tests in this section
+			local s = http_cookie.new_store()
+			assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=basic")))
+			assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=basic")))
+			assert.same("bar=basic; foo=basic", s:lookup("example.com", "/", true, true))
+		end)
+		it("returns in order for domain differing cookies", function() -- spec doesn't care about this case
+			local s = http_cookie.new_store()
+			assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=domain; Domain=sub.example.com")))
+			assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=domain; Domain=example.com")))
+			assert.same("bar=domain; foo=domain", s:lookup("sub.example.com", "/", true, true))
+		end)
+		it("returns in order for different length paths", function()
+			local s = http_cookie.new_store()
+			assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=path; Path=/path/longerpath")))
+			assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=path; Path=/path/")))
+			assert.same("foo=path; bar=path", s:lookup("example.com", "/path/longerpath", true, true))
+		end)
+		it("returns in order for different creation times", function()
+			local s = http_cookie.new_store()
+			s.time = function() return 0 end
+			assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=time")))
+			s.time = function() return 50 end
+			assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=time")))
+			assert.same("foo=time; bar=time", s:lookup("example.com", "/path/longerpath", true, true))
+		end)
+		it("returns in order when all together!", function()
+			local s = http_cookie.new_store()
+			assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=basic")))
+			assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=basic")))
+			assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=path; Path=/path/longerpath")))
+			assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=path; Path=/path/")))
+			-- foo=domain case would get overridden below
+			assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=domain; Domain=example.com")))
+			s.time = function() return 0 end
+			assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=time")))
+			s.time = function() return 50 end
+			assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=time")))
+			assert.same("foo=path; bar=path; bar=domain; bar=time; foo=time", s:lookup("sub.example.com", "/path/longerpath", true, true))
+		end)
+	end)
+	it("can store cookies from a request+response", function()
+		local s = http_cookie.new_store()
+		local req_headers = http_headers.new()
+		req_headers:append(":scheme", "http")
+		req_headers:append(":method", "GET")
+		req_headers:append(":path", "/")
+		local resp_headers = http_headers.new()
+		resp_headers:append(":status", "200")
+		resp_headers:append("set-cookie", http_cookie.bake("foo", "FOO"))
+		resp_headers:append("set-cookie", http_cookie.bake("bar", "BAR", 0))
+		assert.truthy(s:store_from_request(req_headers, resp_headers, "my.host", "my.host"))
+		assert.same("FOO", s:get("my.host", "/", "foo"))
+		assert.same(nil, s:get("my.host", "/", "bar"))
+		-- Now with an :authority header
+		req_headers:append(":authority", "my.host")
+		resp_headers:append("set-cookie", http_cookie.bake("baz", "BAZ"))
+		assert.truthy(s:store_from_request(req_headers, resp_headers, "my.host", "my.host"))
+		assert.same("FOO", s:get("my.host", "/", "foo"))
+		assert.same(nil, s:get("my.host", "/", "bar"))
+		assert.same("BAZ", s:get("my.host", "/", "baz"))
+	end)
+	it("enforces store.max_cookie_length", function()
+		local s = http_cookie.new_store()
+		s.max_cookie_length = 3
+		assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo")))
+		s.max_cookie_length = 8
+		assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo")))
+		assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=longervalue")))
+	end)
+	it("enforces store.max_cookies", function()
+		local s = http_cookie.new_store()
+		s.max_cookies = 0
+		assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo")))
+		s.max_cookies = 1
+		assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo")))
+		assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar")))
+		s:remove("example.com", "/", "foo")
+		assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar")))
+	end)
+	it("enforces store.max_cookies_per_domain", function()
+		local s = http_cookie.new_store()
+		s.max_cookies_per_domain = 0
+		assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo")))
+		s.max_cookies_per_domain = 1
+		assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo")))
+		assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar")))
+		assert.truthy(s:store("other.com", "/", true, true, nil, http_cookie.parse_setcookie("baz=baz")))
+		s:remove("example.com", "/", "foo")
+		assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar")))
+	end)
+	it("can bake cookies", function()
+		assert.same("foo=bar", http_cookie.bake("foo", "bar"))
+		assert.same("foo=bar; Max-Age=0", http_cookie.bake("foo", "bar", -math.huge))
+		assert.same("foo=bar; Expires=Thu, 01 Jan 1970 00:00:00 GMT", http_cookie.bake("foo", "bar", 0))
+		assert.same("foo=bar; Max-Age=0; Domain=example.com; Path=/path; Secure; HttpOnly; SameSite=Strict",
+			http_cookie.bake("foo", "bar", -math.huge, "example.com", "/path", true, true, "strict"))
+		assert.same("foo=bar; Max-Age=0; Domain=example.com; Path=/path; Secure; HttpOnly; SameSite=Lax",
+			http_cookie.bake("foo", "bar", -math.huge, "example.com", "/path", true, true, "lax"))
+		assert.has.errors(function()
+			http_cookie.bake("foo", "bar", -math.huge, "example.com", "/path", true, true, "somethingelse")
+		end, [[invalid value for same_site, expected "strict" or "lax"]])
+	end)
+	it("can dump a netscape format cookiejar", function()
+		local s = http_cookie.new_store()
+		assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("foo=FOO;")))
+		assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("bar=BAR; HttpOnly")))
+		assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("baz=BAZ; Path=/someplace")))
+		assert(s:store("sub.example.com", "/", true, true, "sub.example.com", http_cookie.parse_setcookie("subdomain=matched; Domain=sub.example.com")))
+		assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("qux=QUX; SameSite=Lax")))
+		assert(s:store("other.com", "/", true, true, "other.com", http_cookie.parse_setcookie("foo=somethingelse; HttpOnly")))
+		local file = io.tmpfile()
+		assert(s:save_to_file(file))
+		assert(file:seek("set"))
+		-- preamble
+		assert.truthy(assert(file:read("*l")):match"^#.*HTTP Cookie File")
+		assert.truthy(assert(file:read("*l")):match"^#")
+		assert.same("", assert(file:read("*l")))
+		local lines = {}
+		for line in file:lines() do
+			table.insert(lines, line)
+		end
+		table.sort(lines)
+		assert.same({
+			"#HttpOnly_example.com	TRUE	/	FALSE	2147483647	bar	BAR";
+			"#HttpOnly_other.com	TRUE	/	FALSE	2147483647	foo	somethingelse";
+			"example.com	TRUE	/	FALSE	2147483647	foo	FOO";
+			"example.com	TRUE	/	FALSE	2147483647	qux	QUX";
+			"example.com	TRUE	/someplace	FALSE	2147483647	baz	BAZ";
+			"sub.example.com	FALSE	/	FALSE	2147483647	subdomain	matched";
+		}, lines)
+	end)
+	it("can load a netscape format cookiejar", function()
+		local s = http_cookie.new_store()
+		local file = io.tmpfile()
+		assert(file:write([[
+# Netscape HTTP Cookie File
+# https://curl.haxx.se/docs/http-cookies.html
+# This file was generated by libcurl! Edit at your own risk.
+
+#HttpOnly_other.com	TRUE	/	FALSE	2147483647	foo	somethingelse
+sub.example.com	FALSE	/	FALSE	2147483647	subdomain	matched
+example.com	TRUE	/	TRUE	2147483647	qux	QUX
+#HttpOnly_example.com	TRUE	/	FALSE	2147483647	bar	BAR
+example.com	TRUE	/	FALSE	2147483647	foo	FOO
+example.com	TRUE	/someplace	FALSE	2147483647	baz	BAZ
+]]))
+		assert(file:seek("set"))
+		assert(s:load_from_file(file))
+		assert.same("bar=BAR; foo=FOO; qux=QUX", s:lookup("example.com", "/", true, true))
+	end)
+	it("can load a netscape format cookiejar with invalid lines", function()
+		local s = http_cookie.new_store()
+		local file = io.tmpfile()
+		assert(file:write([[
+example.com	TRUE	/	TRUE	2147483647	qux	QUX
+not a valid line
+example.com	INVALID_BOOLEAN	/	FALSE	2147483647	should	fail
+example.com	TRUE	/	INVALID_BOOLEAN	2147483647	should	fail
+example.com	TRUE	/	FALSE	not_a_number	should	fail
+#HttpOnly_example.com	TRUE	/	FALSE	2147483647	bar	BAR
+example.com	TRUE	/	FALSE	2147483647	foo	FOO
+]]))
+		assert(file:seek("set"))
+		assert(s:load_from_file(file))
+		assert.same("bar=BAR; foo=FOO; qux=QUX", s:lookup("example.com", "/", true, true))
+	end)
+end)
diff --git a/spec/h1_connection_spec.lua b/spec/h1_connection_spec.lua
index fa1d001..d563310 100644
--- a/spec/h1_connection_spec.lua
+++ b/spec/h1_connection_spec.lua
@@ -113,23 +113,25 @@ describe("low level http 1 connection operations", function()
 		local function test(chunk)
 			local s, c = new_pair(1.1)
 			s = s:take_socket()
-			assert(s:write(chunk, "\r\n"))
-			assert(s:flush())
-			assert.same(ce.EILSEQ, select(3, c:read_request_line()))
+			assert(s:xwrite(chunk, "n", TEST_TIMEOUT))
 			s:close()
+			assert.same(ce.EILSEQ, select(3, c:read_request_line(TEST_TIMEOUT)))
 			c:close()
 		end
-		test("invalid request line")
-		test(" / HTTP/1.1")
-		test("HTTP/1.1")
-		test("GET HTTP/1.0")
-		test("GET  HTTP/1.0")
-		test("GET HTTP/1.0")
-		test("GET / HTP/1.1")
-		test("GET / HTTP 1.1")
-		test("GET / HTTP/1")
-		test("GET / HTTP/2.0")
-		test("GET / HTTP/1.1\nHeader: value") -- missing \r
+		test("GET") -- no \r\n
+		test("\r\nGET") -- no \r\n with preceeding \r\n
+		test("invalid request line\r\n")
+		test(" / HTTP/1.1\r\n")
+		test("\r\n / HTTP/1.1\r\n")
+		test("HTTP/1.1\r\n")
+		test("GET HTTP/1.0\r\n")
+		test("GET  HTTP/1.0\r\n")
+		test("GET HTTP/1.0\r\n")
+		test("GET / HTP/1.1\r\n")
+		test("GET / HTTP 1.1\r\n")
+		test("GET / HTTP/1\r\n")
+		test("GET / HTTP/2.0\r\n")
+		test("GET / HTTP/1.1\nHeader: value\r\n") -- missing \r
 	end)
 	it(":read_request_line should allow a leading CRLF", function()
 		local function test(chunk)
@@ -142,6 +144,40 @@ describe("low level http 1 connection operations", function()
 		end
 		test("\r\nGET / HTTP/1.1\r\n")
 	end)
+	describe("overlong lines", function()
+		it(":read_request_line", function()
+			local s, c = new_pair(1.1)
+			s = s:take_socket()
+			assert(s:xwrite(("a"):rep(10000), "n"))
+			assert.same(ce.EILSEQ, select(3, c:read_request_line(TEST_TIMEOUT)))
+			s:close()
+			c:close()
+		end)
+		it(":read_status_line", function()
+			local s, c = new_pair(1.1)
+			s = s:take_socket()
+			assert(s:xwrite(("a"):rep(10000), "n"))
+			assert.same(ce.EILSEQ, select(3, c:read_status_line(TEST_TIMEOUT)))
+			s:close()
+			c:close()
+		end)
+		it(":read_header", function()
+			local s, c = new_pair(1.1)
+			s = s:take_socket()
+			assert(s:xwrite(("a"):rep(10000), "n"))
+			assert.same(ce.EILSEQ, select(3, c:read_header(TEST_TIMEOUT)))
+			s:close()
+			c:close()
+		end)
+		it(":read_body_chunk", function()
+			local s, c = new_pair(1.1)
+			s = s:take_socket()
+			assert(s:xwrite(("a"):rep(10000), "n"))
+			assert.same(ce.EILSEQ, select(3, c:read_body_chunk(TEST_TIMEOUT)))
+			s:close()
+			c:close()
+		end)
+	end)
 	it("status line should round trip", function()
 		local function test(req_version, req_status, req_reason)
 			local s, c = new_pair(req_version)
@@ -494,6 +530,49 @@ describe("low level http 1 connection operations", function()
 		s:close()
 		c:close()
 	end)
+	it(":read_body_chunk fails on invalid chunk", function()
+		local function test(chunk, expected_errno)
+			local s, c = new_pair(1.1)
+			s = s:take_socket()
+			assert(s:xwrite(chunk, "n", TEST_TIMEOUT))
+			s:close()
+			local data, _, errno = c:read_body_chunk(TEST_TIMEOUT)
+			assert.same(nil, data)
+			assert.same(expected_errno, errno)
+			c:close()
+		end
+		test("", nil)
+		test("5", ce.EILSEQ)
+		test("5\r", ce.EILSEQ)
+		test("fffffffffffffff\r\n", ce.E2BIG)
+		test("not a number\r\n", ce.EILSEQ)
+		test("4\r\n1", ce.EILSEQ)
+		test("4\r\nfour\n", ce.EILSEQ)
+		test("4\r\nlonger than four", ce.EILSEQ)
+		test("4\r\nfour\nmissing \r", ce.EILSEQ)
+	end)
+	it(":read_body_chunk is cqueues thread-safe", function()
+		local s, c = new_pair(1.1)
+		s = s:take_socket()
+		local cq = cqueues.new()
+		cq:wrap(function()
+			local chunk = assert(c:read_body_chunk())
+			assert.same("bytes", chunk)
+		end)
+		cq:wrap(function()
+			assert(s:xwrite("5\r\n", "bn"))
+			cqueues.sleep(0.001) -- let other thread block on reading chunk body
+			assert(s:xwrite("chars\r\n", "bn"))
+			local chunk = assert(c:read_body_chunk())
+			assert.same("chars", chunk)
+			-- send a 2nd frame
+			assert(s:xwrite("5\r\nbytes\r\n", "bn"))
+			s:close()
+		end)
+		assert_loop(cq, TEST_TIMEOUT)
+		assert.truthy(cq:empty())
+		c:close()
+	end)
 end)
 describe("high level http1 connection operations", function()
 	local h1_connection = require "http.h1_connection"
diff --git a/spec/h1_stream_spec.lua b/spec/h1_stream_spec.lua
index 89913ed..f9cfea9 100644
--- a/spec/h1_stream_spec.lua
+++ b/spec/h1_stream_spec.lua
@@ -12,13 +12,27 @@ describe("http1 stream", function()
 		c = h1_connection.new(c, "client", version)
 		return s, c
 	end
+	it("allows resuming :read_headers", function()
+		local server, client = new_pair(1.1)
+		client = client:take_socket()
+		assert(client:xwrite("GET / HTTP/1.1\r\n", "n"))
+		local stream = server:get_next_incoming_stream()
+		assert.same(ce.ETIMEDOUT, select(3, stream:read_headers(0.001)))
+		assert(client:xwrite("Foo: bar\r\n", "n"))
+		assert.same(ce.ETIMEDOUT, select(3, stream:read_headers(0.001)))
+		assert(client:xwrite("\r\n", "n"))
+		local h = assert(stream:read_headers(0.01))
+		assert.same("/", h:get(":path"))
+		assert.same("bar", h:get("foo"))
+	end)
 	it("Writing to a shutdown connection returns EPIPE", function()
 		local server, client = new_pair(1.1)
 		local stream = client:new_stream()
 		client:shutdown()
 		local headers = new_headers()
-		headers:append(":authority", "myauthority")
 		headers:append(":method", "GET")
+		headers:append(":scheme", "http")
+		headers:append(":authority", "myauthority")
 		headers:append(":path", "/a")
 		assert.same(ce.EPIPE, select(3, stream:write_headers(headers, true)))
 		client:close()
@@ -30,8 +44,9 @@ describe("http1 stream", function()
 		cq:wrap(function()
 			local stream = client:new_stream()
 			local req_headers = new_headers()
-			req_headers:append(":authority", "myauthority")
 			req_headers:append(":method", "GET")
+			req_headers:append(":scheme", "http")
+			req_headers:append(":authority", "myauthority")
 			req_headers:append(":path", "/a")
 			assert(stream:write_headers(req_headers, true))
 			local res_headers = assert(stream:get_headers())
@@ -78,8 +93,9 @@ describe("http1 stream", function()
 		local server, client = new_pair(1.1)
 		local stream = client:new_stream()
 		local headers = new_headers()
-		headers:append(":authority", "myauthority")
 		headers:append(":method", "GET")
+		headers:append(":scheme", "http")
+		headers:append(":authority", "myauthority")
 		headers:append(":path", "/a")
 		assert(stream:write_headers(headers, true))
 		local cq = cqueues.new():wrap(function()
@@ -96,8 +112,9 @@ describe("http1 stream", function()
 		cq:wrap(function()
 			local stream = client:new_stream()
 			local req_headers = new_headers()
-			req_headers:append(":authority", "myauthority")
 			req_headers:append(":method", "GET")
+			req_headers:append(":scheme", "http")
+			req_headers:append(":authority", "myauthority")
 			req_headers:append(":path", "/a")
 			assert(stream:write_headers(req_headers, true))
 			local res_headers = assert(stream:get_headers())
@@ -122,8 +139,9 @@ describe("http1 stream", function()
 		cq:wrap(function()
 			local stream = client:new_stream()
 			local req_headers = new_headers()
-			req_headers:append(":authority", "myauthority")
 			req_headers:append(":method", "GET")
+			req_headers:append(":scheme", "http")
+			req_headers:append(":authority", "myauthority")
 			req_headers:append(":path", "/a")
 			assert(stream:write_headers(req_headers, true))
 			assert(stream:get_headers())
@@ -153,8 +171,9 @@ describe("http1 stream", function()
 		cq:wrap(function()
 			local stream = client:new_stream()
 			local headers = new_headers()
-			headers:append(":authority", "myauthority")
 			headers:append(":method", "GET")
+			headers:append(":scheme", "http")
+			headers:append(":authority", "myauthority")
 			headers:append(":path", "/a")
 			headers:append("transfer-encoding", "chunked")
 			assert(stream:write_headers(headers, false))
@@ -176,14 +195,37 @@ describe("http1 stream", function()
 		server:close()
 		client:close()
 	end)
+	it("doesn't return from last get_next_chunk until trailers are read", function()
+		local server, client = new_pair(1.1)
+		assert(client:write_request_line("GET", "/a", client.version, TEST_TIMEOUT))
+		assert(client:write_header("transfer-encoding", "chunked", TEST_TIMEOUT))
+		assert(client:write_headers_done(TEST_TIMEOUT))
+		assert(client:write_body_chunk("foo", nil, TEST_TIMEOUT))
+		assert(client:write_body_last_chunk(nil, TEST_TIMEOUT))
+		assert(client:write_header("sometrailer", "bar", TEST_TIMEOUT))
+		assert(client:flush(TEST_TIMEOUT))
+		local server_stream = server:get_next_incoming_stream(0.01)
+		assert(server_stream:get_headers(0.01))
+		assert.same("foo", server_stream:get_next_chunk(0.01))
+		-- Shouldn't return `nil` (indicating EOF) until trailers are completely read.
+		assert.same(ce.ETIMEDOUT, select(3, server_stream:get_next_chunk(0.01)))
+		assert.same(ce.ETIMEDOUT, select(3, server_stream:get_headers(0.01)))
+		assert(client:write_headers_done(TEST_TIMEOUT))
+		assert.same({}, {server_stream:get_next_chunk(0.01)})
+		local trailers = assert(server_stream:get_headers(0))
+		assert.same("bar", trailers:get("sometrailer"))
+		server:close()
+		client:close()
+	end)
 	it("waits for trailers when :get_headers is run in a second thread", function()
 		local server, client = new_pair(1.1)
 		local cq = cqueues.new()
 		cq:wrap(function()
 			local stream = client:new_stream()
 			local headers = new_headers()
-			headers:append(":authority", "myauthority")
 			headers:append(":method", "GET")
+			headers:append(":scheme", "http")
+			headers:append(":authority", "myauthority")
 			headers:append(":path", "/a")
 			headers:append("transfer-encoding", "chunked")
 			assert(stream:write_headers(headers, false))
@@ -213,8 +255,9 @@ describe("http1 stream", function()
 			do
 				local stream = client:new_stream()
 				local headers = new_headers()
-				headers:append(":authority", "myauthority")
 				headers:append(":method", "GET")
+				headers:append(":scheme", "http")
+				headers:append(":authority", "myauthority")
 				headers:append(":path", "/a")
 				headers:append("content-length", "100")
 				assert(stream:write_headers(headers, false))
@@ -223,8 +266,9 @@ describe("http1 stream", function()
 			do
 				local stream = client:new_stream()
 				local headers = new_headers()
-				headers:append(":authority", "myauthority")
 				headers:append(":method", "GET")
+				headers:append(":scheme", "http")
+				headers:append(":authority", "myauthority")
 				headers:append(":path", "/b")
 				headers:append("content-length", "0")
 				assert(stream:write_headers(headers, true))
@@ -273,24 +317,35 @@ describe("http1 stream", function()
 			while z:get_next_chunk() do end
 			streams[zh:get(":path")] = z
 		end)
+		local client_sync = cc.new()
 		cq:wrap(function()
+			if client_sync then client_sync:wait() end
 			local a = client:new_stream()
 			local ah = new_headers()
-			ah:append(":authority", "myauthority")
 			ah:append(":method", "GET")
+			ah:append(":scheme", "http")
+			ah:append(":authority", "myauthority")
 			ah:append(":path", "/a")
 			assert(a:write_headers(ah, true))
+		end)
+		cq:wrap(function()
+			client_sync:signal(); client_sync = nil;
 			local b = client:new_stream()
 			local bh = new_headers()
-			bh:append(":authority", "myauthority")
 			bh:append(":method", "POST")
+			bh:append(":scheme", "http")
+			bh:append(":authority", "myauthority")
 			bh:append(":path", "/b")
 			assert(b:write_headers(bh, false))
+			cqueues.sleep(0.01)
 			assert(b:write_chunk("this is some POST data", true))
+		end)
+		cq:wrap(function()
 			local c = client:new_stream()
 			local ch = new_headers()
-			ch:append(":authority", "myauthority")
 			ch:append(":method", "GET")
+			ch:append(":scheme", "http")
+			ch:append(":authority", "myauthority")
 			ch:append(":path", "/c")
 			assert(c:write_headers(ch, true))
 		end)
@@ -298,20 +353,21 @@ describe("http1 stream", function()
 		assert.truthy(cq:empty())
 		-- All requests read; now for responses
 		-- Don't want /a to be first.
-		local sync = cc.new()
+		local server_sync = cc.new()
 		cq:wrap(function()
-			if sync then sync:wait() end
+			if server_sync then server_sync:wait() end
 			local h = new_headers()
 			h:append(":status", "200")
 			assert(streams["/a"]:write_headers(h, true))
 		end)
 		cq:wrap(function()
-			sync:signal(1); sync = nil;
+			server_sync:signal(); server_sync = nil;
 			local h = new_headers()
 			h:append(":status", "200")
 			assert(streams["/b"]:write_headers(h, true))
 		end)
 		cq:wrap(function()
+			if server_sync then server_sync:wait() end
 			local h = new_headers()
 			h:append(":status", "200")
 			assert(streams["/c"]:write_headers(h, true))
@@ -321,14 +377,88 @@ describe("http1 stream", function()
 		server:close()
 		client:close()
 	end)
+	it("modifying pipelined headers doesn't affect what's sent", function()
+		local server, client = new_pair(1.1)
+		local cq = cqueues.new()
+		cq:wrap(function()
+			local a = client:new_stream()
+			local b = client:new_stream()
+			local c = client:new_stream()
+
+			do
+				local h = new_headers()
+				h:append(":method", "POST")
+				h:append(":scheme", "http")
+				h:append(":authority", "myauthority")
+				h:append(":path", "/")
+				h:upsert("id", "a")
+				assert(a:write_headers(h, false))
+				cq:wrap(function()
+					cq:wrap(function()
+						cq:wrap(function()
+							assert(a:write_chunk("a", true))
+						end)
+						h:upsert("id", "c")
+						assert(c:write_headers(h, false))
+						assert(c:write_chunk("c", true))
+					end)
+					h:upsert("id", "b")
+					assert(b:write_headers(h, false))
+					assert(b:write_chunk("b", true))
+				end)
+			end
+			do
+				local h = assert(a:get_headers())
+				assert.same("a", h:get "id")
+			end
+			do
+				local h = assert(b:get_headers())
+				assert.same("b", h:get "id")
+			end
+			do
+				local h = assert(c:get_headers())
+				assert.same("c", h:get "id")
+			end
+		end)
+		cq:wrap(function()
+			local h = new_headers()
+			h:append(":status", "200")
+
+			local a = assert(server:get_next_incoming_stream())
+			assert.same("a", assert(a:get_headers()):get "id")
+			assert.same("a", a:get_body_as_string())
+			cq:wrap(function()
+				h:upsert("id", "a")
+				assert(a:write_headers(h, true))
+			end)
+
+			local b = assert(server:get_next_incoming_stream())
+			assert.same("b", assert(b:get_headers()):get "id")
+			assert.same("b", b:get_body_as_string())
+			h:upsert("id", "b")
+			assert(b:write_headers(h, true))
+
+			local c = assert(server:get_next_incoming_stream())
+			assert.same("c", assert(c:get_headers()):get "id")
+			assert.same("c", c:get_body_as_string())
+			assert(c:get_headers())
+			h:upsert("id", "c")
+			assert(c:write_headers(h, true))
+		end)
+		assert_loop(cq, TEST_TIMEOUT)
+		assert.truthy(cq:empty())
+		server:close()
+		client:close()
+	end)
 	it("allows 100 continue", function()
 		local server, client = new_pair(1.1)
 		local cq = cqueues.new()
 		cq:wrap(function()
 			local a = client:new_stream()
 			local h = new_headers()
-			h:append(":authority", "myauthority")
 			h:append(":method", "POST")
+			h:append(":scheme", "http")
+			h:append(":authority", "myauthority")
 			h:append(":path", "/a")
 			h:append("expect", "100-continue")
 			assert(a:write_headers(h, false))
@@ -360,8 +490,9 @@ describe("http1 stream", function()
 		cq:wrap(function()
 			local a = client:new_stream()
 			local h = new_headers()
-			h:append(":authority", "myauthority")
 			h:append(":method", "GET")
+			h:append(":scheme", "http")
+			h:append(":authority", "myauthority")
 			h:append(":path", "/")
 			assert(a:write_headers(h, true))
 		end)
diff --git a/spec/h2_connection_spec.lua b/spec/h2_connection_spec.lua
index f4ceeef..0b7b87a 100644
--- a/spec/h2_connection_spec.lua
+++ b/spec/h2_connection_spec.lua
@@ -54,8 +54,16 @@ describe("http2 connection", function()
 		test_preface("PRI * HTTP/2.0\r\n\r\nSM\r\n\r") -- missing last \n
 		test_preface(("long string"):rep(1000))
 	end)
+	it("Doesn't busy-loop looking for #preface", function()
+		local s, c = ca.assert(cs.pair())
+		s = assert(h2_connection.new(s, "server"))
+		assert(s:step(0))
+		assert.not_same(0, (s:timeout()))
+		c:close()
+		s:close()
+	end)
 	it("read_http2_frame fails with EILSEQ on corrupt frame", function()
-		local spack = string.pack or require "compat53.string".pack
+		local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143
 		local s, c = ca.assert(cs.pair())
 		local cq = cqueues.new()
 		cq:wrap(function()
@@ -71,6 +79,36 @@ describe("http2 connection", function()
 		assert_loop(cq, TEST_TIMEOUT)
 		assert.truthy(cq:empty())
 	end)
+	it("read_http2_frame is cqueues thread-safe", function()
+		local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143
+		local s, c = ca.assert(cs.pair())
+		c = assert(h2_connection.new(c, "client"))
+		local cq = cqueues.new()
+		cq:wrap(function()
+			local typ, flags, id, payload = assert(c:read_http2_frame())
+			assert.same(0, typ)
+			assert.same(0, flags)
+			assert.same(0, id)
+			assert.same("ninebytes", payload)
+		end)
+		cq:wrap(function()
+			local frame_header = spack(">I3 B B I4", 9, 0, 0, 0)
+			assert(s:xwrite(frame_header .. "nine", "bn"))
+			cqueues.sleep(0.001) -- let other thread block on reading frame body
+			assert(s:xwrite("chars", "bn"))
+			local typ, flags, id, payload = assert(c:read_http2_frame())
+			assert.same(0, typ)
+			assert.same(0, flags)
+			assert.same(0, id)
+			assert.same("ninechars", payload)
+			-- send a 2nd frame
+			assert(s:xwrite(frame_header .. "ninebytes", "bn"))
+			s:close()
+		end)
+		assert_loop(cq, TEST_TIMEOUT)
+		assert.truthy(cq:empty())
+		c:close()
+	end)
 	it("Can #ping back and forth", function()
 		local s, c = new_pair()
 		local cq = cqueues.new()
@@ -110,25 +148,27 @@ describe("http2 connection", function()
 		assert_loop(cq, TEST_TIMEOUT)
 		assert.truthy(cq:empty())
 	end)
-	it("can send a body", function()
+	it("streams used out of order", function()
 		local s, c = new_pair()
 		local cq = cqueues.new()
 		cq:wrap(function()
-			local client_stream = c:new_stream()
+			local client_stream1 = c:new_stream()
+			local client_stream2 = c:new_stream()
 			local req_headers = new_headers()
 			req_headers:append(":method", "GET")
 			req_headers:append(":scheme", "http")
-			req_headers:append(":path", "/")
-			-- use non-integer timeouts to catch errors with integer vs number
-			assert(client_stream:write_headers(req_headers, false, 1.1))
-			assert(client_stream:write_chunk("some body", false, 1.1))
-			assert(client_stream:write_chunk("more body", true, 1.1))
+			req_headers:append(":path", "/2")
+			assert(client_stream2:write_headers(req_headers, true))
+			req_headers:upsert(":path", "/1")
+			assert(client_stream1:write_headers(req_headers, true))
 			assert(c:close())
 		end)
 		cq:wrap(function()
-			local stream = assert(s:get_next_incoming_stream())
-			local body = assert(stream:get_body_as_string(1.1))
-			assert.same("some bodymore body", body)
+			for i=1, 2 do
+				local stream = assert(s:get_next_incoming_stream())
+				local headers = assert(stream:get_headers())
+				assert(string.format("/%d", i), headers:get(":path"))
+			end
 			assert(s:close())
 		end)
 		assert_loop(cq, TEST_TIMEOUT)
@@ -149,12 +189,12 @@ describe("http2 connection", function()
 			cq:wrap(function()
 				ok = ok + 1
 				if ok == 2 then cond:signal() end
-				assert(c.peer_flow_credits_increase:wait(TEST_TIMEOUT/2), "no connection credits")
+				assert(c.peer_flow_credits_change:wait(TEST_TIMEOUT/2), "no connection credits")
 			end)
 			cq:wrap(function()
 				ok = ok + 1
 				if ok == 2 then cond:signal() end
-				assert(client_stream.peer_flow_credits_increase:wait(TEST_TIMEOUT/2), "no stream credits")
+				assert(client_stream.peer_flow_credits_change:wait(TEST_TIMEOUT/2), "no stream credits")
 			end)
 			cond:wait() -- wait for above threads to get scheduled
 			assert(client_stream:write_chunk(("really long string"):rep(1e4), true))
@@ -252,84 +292,4 @@ describe("http2 connection", function()
 			c:close()
 		end)
 	end)
-	describe("correct state transitions", function()
-		it("closes a stream when writing headers to a half-closed stream", function()
-			local s, c = new_pair()
-			local cq = cqueues.new()
-			cq:wrap(function()
-				local client_stream = c:new_stream()
-				local req_headers = new_headers()
-				req_headers:append(":method", "GET")
-				req_headers:append(":scheme", "http")
-				req_headers:append(":path", "/")
-				req_headers:append(":authority", "example.com")
-				assert(client_stream:write_headers(req_headers, false))
-				assert(client_stream:get_headers())
-				assert(c:close())
-			end)
-			cq:wrap(function()
-				local stream = assert(s:get_next_incoming_stream())
-				assert(stream:get_headers())
-				local res_headers = new_headers()
-				res_headers:append(":status", "200")
-				assert(stream:write_headers(res_headers, true))
-				assert("closed", stream.state)
-				assert(s:close())
-			end)
-			assert_loop(cq, TEST_TIMEOUT)
-			assert.truthy(cq:empty())
-		end)
-	end)
-	describe("push_promise", function()
-		it("permits a simple push promise from server => client", function()
-			local s, c = new_pair()
-			local cq = cqueues.new()
-			cq:wrap(function()
-				local client_stream = c:new_stream()
-				local req_headers = new_headers()
-				req_headers:append(":method", "GET")
-				req_headers:append(":scheme", "http")
-				req_headers:append(":path", "/")
-				req_headers:append(":authority", "example.com")
-				assert(client_stream:write_headers(req_headers, true))
-				local pushed_stream = assert(c:get_next_incoming_stream())
-				do
-					local h = assert(pushed_stream:get_headers())
-					assert.same("GET", h:get(":method"))
-					assert.same("http", h:get(":scheme"))
-					assert.same("/foo", h:get(":path"))
-					assert.same(req_headers:get(":authority"), h:get(":authority"))
-					assert.same(nil, pushed_stream:get_next_chunk())
-				end
-				assert(c:close())
-			end)
-			cq:wrap(function()
-				local stream = assert(s:get_next_incoming_stream())
-				do
-					local h = assert(stream:get_headers())
-					assert.same("GET", h:get(":method"))
-					assert.same("http", h:get(":scheme"))
-					assert.same("/", h:get(":path"))
-					assert.same("example.com", h:get(":authority"))
-					assert.same(nil, stream:get_next_chunk())
-				end
-				local pushed_stream do
-					local req_headers = new_headers()
-					req_headers:append(":method", "GET")
-					req_headers:append(":scheme", "http")
-					req_headers:append(":path", "/foo")
-					req_headers:append(":authority", "example.com")
-					pushed_stream = assert(stream:push_promise(req_headers))
-				end
-				do
-					local req_headers = new_headers()
-					req_headers:append(":status", "200")
-					assert(pushed_stream:write_headers(req_headers, true))
-				end
-				assert(s:close())
-			end)
-			assert_loop(cq, TEST_TIMEOUT)
-			assert.truthy(cq:empty())
-		end)
-	end)
 end)
diff --git a/spec/h2_stream_spec.lua b/spec/h2_stream_spec.lua
new file mode 100644
index 0000000..1cfed79
--- /dev/null
+++ b/spec/h2_stream_spec.lua
@@ -0,0 +1,268 @@
+describe("http.h2_stream", function()
+	local h2_connection = require "http.h2_connection"
+	local h2_error = require "http.h2_error"
+	local new_headers = require "http.headers".new
+	local cqueues = require "cqueues"
+	local ca = require "cqueues.auxlib"
+	local cs = require "cqueues.socket"
+	local function new_pair()
+		local s, c = ca.assert(cs.pair())
+		s = assert(h2_connection.new(s, "server"))
+		c = assert(h2_connection.new(c, "client"))
+		return s, c
+	end
+	it("rejects header fields with uppercase characters", function()
+		local s, c = new_pair()
+		local client_stream = c:new_stream()
+		local req_headers = new_headers()
+		req_headers:append(":method", "GET")
+		req_headers:append(":scheme", "http")
+		req_headers:append(":path", "/")
+		req_headers:append("Foo", "bar")
+		assert.has.errors(function()
+			client_stream:write_headers(req_headers, false, 0)
+		end)
+		c:close()
+		s:close()
+	end)
+	it("breaks up a large header block into continuation frames", function()
+		local s, c = new_pair()
+		local cq = cqueues.new()
+		local req_headers = new_headers()
+		req_headers:append(":method", "GET")
+		req_headers:append(":scheme", "http")
+		req_headers:append(":path", "/")
+		req_headers:append("unknown", ("a"):rep(16384*3)) -- at least 3 frames worth
+		cq:wrap(function()
+			local client_stream = c:new_stream()
+			assert(client_stream:write_headers(req_headers, true))
+			assert(c:close())
+		end)
+		cq:wrap(function()
+			local stream = assert(s:get_next_incoming_stream())
+			local response_headers = assert(stream:get_headers())
+			assert.same(req_headers, response_headers)
+			assert(s:close())
+		end)
+		assert_loop(cq, TEST_TIMEOUT)
+		assert.truthy(cq:empty())
+	end)
+	it("can send a body", function()
+		local s, c = new_pair()
+		local cq = cqueues.new()
+		cq:wrap(function()
+			local client_stream = c:new_stream()
+			local req_headers = new_headers()
+			req_headers:append(":method", "GET")
+			req_headers:append(":scheme", "http")
+			req_headers:append(":path", "/")
+			-- use non-integer timeouts to catch errors with integer vs number
+			assert(client_stream:write_headers(req_headers, false, 1.1))
+			assert(client_stream:write_chunk("some body", false, 1.1))
+			assert(client_stream:write_chunk("more body", true, 1.1))
+			assert(c:close())
+		end)
+		cq:wrap(function()
+			local stream = assert(s:get_next_incoming_stream())
+			local body = assert(stream:get_body_as_string(1.1))
+			assert.same("some bodymore body", body)
+			assert(s:close())
+		end)
+		assert_loop(cq, TEST_TIMEOUT)
+		assert.truthy(cq:empty())
+	end)
+	it("errors if content-length is exceeded", function()
+		local s, c = new_pair()
+		local cq = cqueues.new()
+		cq:wrap(function()
+			local client_stream = c:new_stream()
+			local req_headers = new_headers()
+			req_headers:append(":method", "GET")
+			req_headers:append(":scheme", "http")
+			req_headers:append(":path", "/")
+			req_headers:append("content-length", "2")
+			assert(client_stream:write_headers(req_headers, false))
+			assert(client_stream:write_chunk("body longer than 2 bytes", true))
+		end)
+		cq:wrap(function()
+			local stream = assert(s:get_next_incoming_stream())
+			local ok, err = stream:get_body_as_string()
+			assert.falsy(ok)
+			assert.truthy(h2_error.is(err))
+			assert.same(h2_error.errors.PROTOCOL_ERROR.code, err.code)
+			assert.same("content-length exceeded", err.message)
+			assert(s:close())
+		end)
+		assert_loop(cq, TEST_TIMEOUT)
+		assert.truthy(cq:empty())
+		c:close()
+	end)
+	describe("correct state transitions", function()
+		it("closes a stream when writing headers to a half-closed stream", function()
+			local s, c = new_pair()
+			local cq = cqueues.new()
+			cq:wrap(function()
+				local client_stream = c:new_stream()
+				local req_headers = new_headers()
+				req_headers:append(":method", "GET")
+				req_headers:append(":scheme", "http")
+				req_headers:append(":path", "/")
+				req_headers:append(":authority", "example.com")
+				assert(client_stream:write_headers(req_headers, false))
+				assert(client_stream:get_headers())
+				assert(c:close())
+			end)
+			cq:wrap(function()
+				local stream = assert(s:get_next_incoming_stream())
+				assert(stream:get_headers())
+				local res_headers = new_headers()
+				res_headers:append(":status", "200")
+				assert(stream:write_headers(res_headers, true))
+				assert("closed", stream.state)
+				assert(s:close())
+			end)
+			assert_loop(cq, TEST_TIMEOUT)
+			assert.truthy(cq:empty())
+		end)
+		it("ignores delayed RST_STREAM on already closed stream", function()
+			local s, c = new_pair()
+			local cq = cqueues.new()
+			cq:wrap(function()
+				local client_stream = c:new_stream()
+				local req_headers = new_headers()
+				req_headers:append(":method", "GET")
+				req_headers:append(":scheme", "http")
+				req_headers:append(":path", "/")
+				req_headers:append(":authority", "example.com")
+				assert(client_stream:write_headers(req_headers, true))
+				assert(client_stream:get_headers())
+				assert("closed", client_stream.state)
+				-- both sides now have stream in closed state
+				-- send server a RST_STREAM: it should get ignored
+				assert(client_stream:rst_stream("post-closed rst_stream"))
+				assert(c:close())
+			end)
+			cq:wrap(function()
+				local stream = assert(s:get_next_incoming_stream())
+				assert(stream:get_headers())
+				local res_headers = new_headers()
+				res_headers:append(":status", "200")
+				assert(stream:write_headers(res_headers, true))
+				-- both sides now have stream in closed state
+				assert("closed", stream.state)
+				-- process incoming frames until EOF (i.e. drain RST_STREAM)
+				-- the RST_STREAM frame should be ignored.
+				assert(s:loop())
+				assert(s:close())
+			end)
+			cq:wrap(function()
+				assert(s:loop())
+			end)
+			assert_loop(cq, TEST_TIMEOUT)
+			assert.truthy(cq:empty())
+		end)
+	end)
+	describe("push_promise", function()
+		it("permits a simple push promise from server => client", function()
+			local s, c = new_pair()
+			local cq = cqueues.new()
+			cq:wrap(function()
+				local client_stream = c:new_stream()
+				local req_headers = new_headers()
+				req_headers:append(":method", "GET")
+				req_headers:append(":scheme", "http")
+				req_headers:append(":path", "/")
+				req_headers:append(":authority", "example.com")
+				assert(client_stream:write_headers(req_headers, true))
+				local pushed_stream = assert(c:get_next_incoming_stream())
+				do
+					local h = assert(pushed_stream:get_headers())
+					assert.same("GET", h:get(":method"))
+					assert.same("http", h:get(":scheme"))
+					assert.same("/foo", h:get(":path"))
+					assert.same(req_headers:get(":authority"), h:get(":authority"))
+					assert.same(nil, pushed_stream:get_next_chunk())
+				end
+				assert(c:close())
+			end)
+			cq:wrap(function()
+				local stream = assert(s:get_next_incoming_stream())
+				do
+					local h = assert(stream:get_headers())
+					assert.same("GET", h:get(":method"))
+					assert.same("http", h:get(":scheme"))
+					assert.same("/", h:get(":path"))
+					assert.same("example.com", h:get(":authority"))
+					assert.same(nil, stream:get_next_chunk())
+				end
+				local pushed_stream do
+					local req_headers = new_headers()
+					req_headers:append(":method", "GET")
+					req_headers:append(":scheme", "http")
+					req_headers:append(":path", "/foo")
+					req_headers:append(":authority", "example.com")
+					pushed_stream = assert(stream:push_promise(req_headers))
+				end
+				do
+					local req_headers = new_headers()
+					req_headers:append(":status", "200")
+					assert(pushed_stream:write_headers(req_headers, true))
+				end
+				assert(s:close())
+			end)
+			assert_loop(cq, TEST_TIMEOUT)
+			assert.truthy(cq:empty())
+		end)
+		it("handles large header blocks", function()
+			local s, c = new_pair()
+			local cq = cqueues.new()
+			cq:wrap(function()
+				local client_stream = c:new_stream()
+				local req_headers = new_headers()
+				req_headers:append(":method", "GET")
+				req_headers:append(":scheme", "http")
+				req_headers:append(":path", "/")
+				req_headers:append(":authority", "example.com")
+				assert(client_stream:write_headers(req_headers, true))
+				local pushed_stream = assert(c:get_next_incoming_stream())
+				do
+					local h = assert(pushed_stream:get_headers())
+					assert.same("GET", h:get(":method"))
+					assert.same("http", h:get(":scheme"))
+					assert.same("/foo", h:get(":path"))
+					assert.same(req_headers:get(":authority"), h:get(":authority"))
+					assert.same(nil, pushed_stream:get_next_chunk())
+				end
+				assert(c:close())
+			end)
+			cq:wrap(function()
+				local stream = assert(s:get_next_incoming_stream())
+				do
+					local h = assert(stream:get_headers())
+					assert.same("GET", h:get(":method"))
+					assert.same("http", h:get(":scheme"))
+					assert.same("/", h:get(":path"))
+					assert.same("example.com", h:get(":authority"))
+					assert.same(nil, stream:get_next_chunk())
+				end
+				local pushed_stream do
+					local req_headers = new_headers()
+					req_headers:append(":method", "GET")
+					req_headers:append(":scheme", "http")
+					req_headers:append(":path", "/foo")
+					req_headers:append(":authority", "example.com")
+					req_headers:append("unknown", ("a"):rep(16384*3)) -- at least 3 frames worth
+					pushed_stream = assert(stream:push_promise(req_headers))
+				end
+				do
+					local req_headers = new_headers()
+					req_headers:append(":status", "200")
+					assert(pushed_stream:write_headers(req_headers, true))
+				end
+				assert(s:close())
+			end)
+			assert_loop(cq, TEST_TIMEOUT)
+			assert.truthy(cq:empty())
+		end)
+	end)
+end)
diff --git a/spec/helper.lua b/spec/helper.lua
index f81395b..7e134bf 100644
--- a/spec/helper.lua
+++ b/spec/helper.lua
@@ -19,3 +19,13 @@ if has_luacov then
 		return wrap(self, func, ...)
 	end)
 end
+
+-- Allow tests to pick up configured locale
+local locale = os.getenv("LOCALE")
+if locale then
+	os.setlocale(locale)
+	if locale ~= os.setlocale(locale) then
+		print("Locale " .. locale .. " is not available.")
+		os.exit(1) -- busted doesn't fail if helper script throws errors: https://github.com/Olivine-Labs/busted/issues/549
+	end
+end
diff --git a/spec/hsts_spec.lua b/spec/hsts_spec.lua
index e01bc39..90eb6ca 100644
--- a/spec/hsts_spec.lua
+++ b/spec/hsts_spec.lua
@@ -9,12 +9,26 @@ describe("hsts module", function()
 	end)
 	it("can be cloned", function()
 		local s = http_hsts.new_store()
-		assert.same(s, s:clone())
+		do
+			local clone = s:clone()
+			local old_heap = s.expiry_heap
+			s.expiry_heap = nil
+			clone.expiry_heap = nil
+			assert.same(s, clone)
+			s.expiry_heap = old_heap
+		end
 		assert.truthy(s:store("foo.example.com", {
 			["max-age"] = "100";
 		}))
+		do
+			local clone = s:clone()
+			local old_heap = s.expiry_heap
+			s.expiry_heap = nil
+			clone.expiry_heap = nil
+			assert.same(s, clone)
+			s.expiry_heap = old_heap
+		end
 		local clone = s:clone()
-		assert.same(s, clone)
 		assert.truthy(s:check("foo.example.com"))
 		assert.truthy(clone:check("foo.example.com"))
 	end)
@@ -93,4 +107,22 @@ describe("hsts module", function()
 		assert.falsy(s:check("example.com"))
 		assert.truthy(s:check("keep.me"))
 	end)
+	it("enforces .max_items", function()
+		local s = http_hsts.new_store()
+		s.max_items = 0
+		assert.falsy(s:store("example.com", {
+			["max-age"] = "100";
+		}))
+		s.max_items = 1
+		assert.truthy(s:store("example.com", {
+			["max-age"] = "100";
+		}))
+		assert.falsy(s:store("other.com", {
+			["max-age"] = "100";
+		}))
+		s:remove("example.com", "/", "foo")
+		assert.truthy(s:store("other.com", {
+			["max-age"] = "100";
+		}))
+	end)
 end)
diff --git a/spec/request_spec.lua b/spec/request_spec.lua
index 3530e19..e0b1e05 100644
--- a/spec/request_spec.lua
+++ b/spec/request_spec.lua
@@ -629,6 +629,30 @@ describe("http.request module", function()
 				stream:shutdown()
 			end)
 		end)
+		it("works with a proxy server with a path component", function()
+			test(function(stream)
+				local h = assert(stream:get_headers())
+				local _, host, port = stream:localname()
+				local authority = http_util.to_authority(host, port, "http")
+				assert.same(authority, h:get ":authority")
+				assert.same("http://" .. authority .. "/", h:get(":path"))
+				local resp_headers = new_headers()
+				resp_headers:append(":status", "200")
+				assert(stream:write_headers(resp_headers, false))
+				assert(stream:write_chunk("hello world", true))
+			end, function(req)
+				req.proxy = {
+					scheme = "http";
+					host = req.host;
+					port = req.port;
+					path = "/path";
+				}
+				local headers, stream = assert(req:go())
+				assert.same("200", headers:get(":status"))
+				assert.same("hello world", assert(stream:get_body_as_string()))
+				stream:shutdown()
+			end)
+		end)
 		it("works with http proxies on OPTIONS requests", function()
 			test(function(stream)
 				local h = assert(stream:get_headers())
@@ -716,6 +740,34 @@ describe("http.request module", function()
 				stream:shutdown()
 			end)
 		end)
+		it("CONNECT proxy with path component", function()
+			test(function(stream, s)
+				local h = assert(stream:get_headers())
+				local resp_headers = new_headers()
+				resp_headers:append(":status", "200")
+				assert(stream:write_headers(resp_headers, false))
+				if h:get(":method") == "CONNECT" then
+					assert(stream.connection.version < 2)
+					local sock = assert(stream.connection:take_socket())
+					s:add_socket(sock)
+					return true
+				else
+					assert(stream:write_chunk("hello world", true))
+				end
+			end, function(req)
+				req.tls = true
+				req.proxy = {
+					scheme = "http";
+					host = req.host;
+					port = req.port;
+					path = "/path";
+				}
+				local headers, stream = assert(req:go())
+				assert.same("200", headers:get(":status"))
+				assert.same("hello world", assert(stream:get_body_as_string()))
+				stream:shutdown()
+			end)
+		end)
 		it("fails correctly on non CONNECT proxy", function()
 			test(function(stream)
 				local h = assert(stream:get_headers())
@@ -805,6 +857,7 @@ describe("http.request module", function()
 			end)
 			cq:wrap(function() -- SOCKS server
 				local sock = socks_server:accept()
+				sock:setmode("b", "b")
 				assert.same("\5", sock:read(1))
 				local n = assert(sock:read(1)):byte()
 				local available_auth = assert(sock:read(n))
diff --git a/spec/require-all.lua b/spec/require-all.lua
new file mode 100644
index 0000000..7e96311
--- /dev/null
+++ b/spec/require-all.lua
@@ -0,0 +1,27 @@
+-- This file is used for linting .tld files with typedlua
+
+require "http.bit"
+require "http.client"
+require "http.connection_common"
+require "http.cookie"
+require "http.h1_connection"
+require "http.h1_reason_phrases"
+require "http.h1_stream"
+require "http.h2_connection"
+require "http.h2_error"
+require "http.h2_stream"
+require "http.headers"
+require "http.hpack"
+require "http.hsts"
+require "http.proxies"
+require "http.request"
+require "http.server"
+require "http.socks"
+require "http.stream_common"
+require "http.tls"
+require "http.util"
+require "http.version"
+require "http.websocket"
+require "http.zlib"
+require "http.compat.prosody"
+require "http.compat.socket"
diff --git a/spec/server_spec.lua b/spec/server_spec.lua
index 4cd6785..aa415aa 100644
--- a/spec/server_spec.lua
+++ b/spec/server_spec.lua
@@ -1,8 +1,8 @@
 describe("http.server module", function()
-	local server = require "http.server"
-	local client = require "http.client"
+	local http_server = require "http.server"
+	local http_client = require "http.client"
 	local http_tls = require "http.tls"
-	local new_headers = require "http.headers".new
+	local http_headers = require "http.headers"
 	local cqueues = require "cqueues"
 	local ca = require "cqueues.auxlib"
 	local ce = require "cqueues.errno"
@@ -13,7 +13,7 @@ describe("http.server module", function()
 	it("rejects missing 'ctx' field", function()
 		local s, c = ca.assert(cs.pair())
 		assert.has.errors(function()
-			server.new {
+			http_server.new {
 				socket = s;
 				onstream = error;
 			}
@@ -24,7 +24,7 @@ describe("http.server module", function()
 	it("rejects invalid 'cq' field", function()
 		local s, c = ca.assert(cs.pair())
 		assert.has.errors(function()
-			server.new {
+			http_server.new {
 				socket = s;
 				tls = false;
 				onstream = error;
@@ -36,7 +36,7 @@ describe("http.server module", function()
 	end)
 	it("__tostring works", function()
 		local s, c = ca.assert(cs.pair())
-		s = server.new {
+		s = http_server.new {
 			socket = s;
 			tls = false;
 			onstream = error;
@@ -47,7 +47,7 @@ describe("http.server module", function()
 	end)
 	it(":onerror with no arguments doesn't clear", function()
 		local s, c = ca.assert(cs.pair())
-		s = server.new {
+		s = http_server.new {
 			socket = s;
 			tls = false;
 			onstream = error;
@@ -82,7 +82,7 @@ describe("http.server module", function()
 			s:close()
 		end)
 		options.onstream = onstream
-		local s = assert(server.listen(options))
+		local s = assert(http_server.listen(options))
 		assert(s:listen())
 		cq:wrap(function()
 			assert_loop(s)
@@ -103,9 +103,9 @@ describe("http.server module", function()
 				ctx = non_verifying_tls_context;
 				version = client_version;
 			}
-			local conn = assert(client.connect(client_options))
+			local conn = assert(http_client.connect(client_options))
 			local stream = conn:new_stream()
-			local headers = new_headers()
+			local headers = http_headers.new()
 			headers:append(":authority", "myauthority")
 			headers:append(":method", "GET")
 			headers:append(":path", "/")
@@ -179,65 +179,57 @@ describe("http.server module", function()
 	end)
 	it("taking socket from underlying connection is handled well by server", function()
 		local cq = cqueues.new()
-		local onstream = spy.new(function(s, stream)
+		local onstream = spy.new(function(server, stream)
 			local sock = stream.connection:take_socket()
-			s:close()
+			server:close()
 			assert.same("test", sock:read("*a"))
 			sock:close()
 		end);
-		local s = assert(server.listen {
-			host = "localhost";
-			port = 0;
+		local server = assert(http_server.new {
+			tls = false;
 			onstream = onstream;
 		})
-		assert(s:listen())
-		local _, host, port = s:localname()
+		local s, c = ca.assert(cs.pair())
+		server:add_socket(s)
 		cq:wrap(function()
-			assert_loop(s)
+			assert_loop(server)
 		end)
 		cq:wrap(function()
-			local sock = cs.connect {
-				host = host;
-				port = port;
-			}
-			assert(sock:write("test"))
-			assert(sock:flush())
-			sock:close()
+			assert(c:write("test"))
+			assert(c:flush())
+			c:close()
 		end)
 		assert_loop(cq, TEST_TIMEOUT)
 		assert.truthy(cq:empty())
 		assert.spy(onstream).was.called()
 	end)
 	it("an idle http2 stream doesn't block the server", function()
-		local s = assert(server.listen {
-			host = "localhost";
-			port = 0;
+		local server = assert(http_server.new {
+			tls = false;
+			version = 2;
 			onstream = function(_, stream)
 				if stream.id == 1 then
 					stream:get_next_chunk()
 				else
 					assert.same(3, stream.id)
 					assert.same({}, {stream:get_next_chunk()})
-					local headers = new_headers()
+					local headers = http_headers.new()
 					headers:append(":status", "200")
 					assert(stream:write_headers(headers, true))
 				end
 			end;
 		})
-		assert(s:listen())
-		local client_family, client_host, client_port = s:localname()
-		local conn = assert(client.connect({
-			family = client_family;
-			host = client_host;
-			port = client_port;
-			version = 2;
-		}))
+		local s, c = ca.assert(cs.pair())
+		server:add_socket(s)
 		local cq = cqueues.new()
 		cq:wrap(function()
-			assert_loop(s)
+			assert_loop(server)
 		end)
 		cq:wrap(function()
-			local headers = new_headers()
+			local conn = assert(http_client.negotiate(c, {
+				version = 2;
+			}))
+			local headers = http_headers.new()
 			headers:append(":authority", "myauthority")
 			headers:append(":method", "GET")
 			headers:append(":path", "/")
@@ -248,18 +240,61 @@ describe("http.server module", function()
 			assert(stream2:write_headers(headers, true))
 			assert(stream2:get_headers())
 			conn:close()
-			s:close()
+			server:close()
+		end)
+		assert_loop(cq, TEST_TIMEOUT)
+		assert.truthy(cq:empty())
+	end)
+	it("times out clients if intra_stream_timeout is exceeded", function()
+		local server = assert(http_server.new {
+			tls = false;
+			onstream = function(_, stream)
+				assert(stream:get_headers())
+				local headers = http_headers.new()
+				headers:append(":status", "200")
+				assert(stream:write_headers(headers, true))
+			end;
+			intra_stream_timeout = 0.1;
+		})
+		local s, c = ca.assert(cs.pair())
+		server:add_socket(s)
+		local cq = cqueues.new()
+		cq:wrap(function()
+			assert_loop(server)
+		end)
+		cq:wrap(function()
+			local conn = assert(http_client.negotiate(c, {
+				version = 1.1;
+			}))
+			local headers = http_headers.new()
+			headers:append(":method", "GET")
+			headers:append(":scheme", "http")
+			headers:append(":path", "/")
+			headers:append(":authority", "foo")
+			-- Normal request
+			local stream1 = conn:new_stream()
+			assert(stream1:write_headers(headers, true))
+			assert(stream1:get_headers())
+			-- Wait for less than intra_stream_timeout: should work as normal
+			cqueues.sleep(0.05)
+			local stream2 = conn:new_stream()
+			assert(stream2:write_headers(headers, true))
+			assert(stream2:get_headers())
+			-- Wait for more then intra_stream_timeout: server should have closed connection
+			cqueues.sleep(0.2)
+			local stream3 = conn:new_stream()
+			assert.same(ce.EPIPE, select(3, stream3:write_headers(headers, true)))
 		end)
 		assert_loop(cq, TEST_TIMEOUT)
 		assert.truthy(cq:empty())
 	end)
 	it("allows pausing+resuming the server", function()
-		local s = assert(server.listen {
+		local s = assert(http_server.listen {
 			host = "localhost";
 			port = 0;
 			onstream = function(_, stream)
 				assert(stream:get_headers())
-				local headers = new_headers()
+				local headers = http_headers.new()
 				headers:append(":status", "200")
 				assert(stream:write_headers(headers, true))
 			end;
@@ -271,7 +306,7 @@ describe("http.server module", function()
 			host = client_host;
 			port = client_port;
 		}
-		local headers = new_headers()
+		local headers = http_headers.new()
 		headers:append(":authority", "myauthority")
 		headers:append(":method", "GET")
 		headers:append(":path", "/")
@@ -282,7 +317,7 @@ describe("http.server module", function()
 			assert_loop(s)
 		end)
 		local function do_req(timeout)
-			local conn = assert(client.connect(client_options))
+			local conn = assert(http_client.connect(client_options))
 			local stream = assert(conn:new_stream())
 			assert(stream:write_headers(headers, true))
 			local ok, err, errno = stream:get_headers(timeout)
@@ -303,4 +338,22 @@ describe("http.server module", function()
 		assert_loop(cq, TEST_TIMEOUT)
 		assert.truthy(cq:empty())
 	end)
+	it("shouldn't throw an error calling :listen() after :close()", function()
+		local s = assert(http_server.listen {
+			host = "localhost";
+			port = 0;
+			onstream = function() end;
+		})
+		s:close()
+		s:listen()
+	end)
+	it("shouldn't throw an error calling :localname() after :close()", function()
+		local s = assert(http_server.listen {
+			host = "localhost";
+			port = 0;
+			onstream = function() end;
+		})
+		s:close()
+		s:localname()
+	end)
 end)
diff --git a/spec/stream_common_spec.lua b/spec/stream_common_spec.lua
index 2392f0f..bdf35c6 100644
--- a/spec/stream_common_spec.lua
+++ b/spec/stream_common_spec.lua
@@ -10,16 +10,20 @@ describe("http.stream_common", function()
 		c = h1_connection.new(c, "client", version)
 		return s, c
 	end
+	local function new_request_headers()
+		local headers = new_headers()
+		headers:append(":method", "GET")
+		headers:append(":scheme", "http")
+		headers:append(":authority", "myauthority")
+		headers:append(":path", "/")
+		return headers
+	end
 	it("Can read a number of characters", function()
 		local server, client = new_pair(1.1)
 		local cq = cqueues.new()
 		cq:wrap(function()
 			local stream = client:new_stream()
-			local headers = new_headers()
-			headers:append(":authority", "myauthority")
-			headers:append(":method", "GET")
-			headers:append(":path", "/")
-			assert(stream:write_headers(headers, false))
+			assert(stream:write_headers(new_request_headers(), false))
 			assert(stream:write_chunk("foo", false))
 			assert(stream:write_chunk("\nb", false))
 			assert(stream:write_chunk("ar\n", true))
@@ -47,11 +51,7 @@ describe("http.stream_common", function()
 		local cq = cqueues.new()
 		cq:wrap(function()
 			local stream = client:new_stream()
-			local headers = new_headers()
-			headers:append(":authority", "myauthority")
-			headers:append(":method", "GET")
-			headers:append(":path", "/")
-			assert(stream:write_headers(headers, false))
+			assert(stream:write_headers(new_request_headers(), false))
 			assert(stream:write_chunk("foo", false))
 			assert(stream:write_chunk("\nb", false))
 			assert(stream:write_chunk("ar\n", true))
@@ -72,11 +72,7 @@ describe("http.stream_common", function()
 		local cq = cqueues.new()
 		cq:wrap(function()
 			local stream = client:new_stream()
-			local headers = new_headers()
-			headers:append(":authority", "myauthority")
-			headers:append(":method", "GET")
-			headers:append(":path", "/")
-			assert(stream:write_headers(headers, false))
+			assert(stream:write_headers(new_request_headers(), false))
 			assert(stream:write_chunk("hello world!", true))
 		end)
 		cq:wrap(function()
@@ -89,28 +85,116 @@ describe("http.stream_common", function()
 		client:close()
 		server:close()
 	end)
-	it("can write body from temporary file", function()
-		local server, client = new_pair(1.1)
-		local cq = cqueues.new()
-		cq:wrap(function()
-			local file = io.tmpfile()
-			assert(file:write("hello world!"))
-			assert(file:seek("set"))
-			local stream = client:new_stream()
-			local headers = new_headers()
-			headers:append(":authority", "myauthority")
-			headers:append(":method", "GET")
-			headers:append(":path", "/")
-			assert(stream:write_headers(headers, false))
-			assert(stream:write_body_from_file(file))
+	describe("write_body_from_file", function()
+		it("works with a temporary file", function()
+			local server, client = new_pair(1.1)
+			local cq = cqueues.new()
+			cq:wrap(function()
+				local file = io.tmpfile()
+				assert(file:write("hello world!"))
+				assert(file:seek("set"))
+				local stream = client:new_stream()
+				assert(stream:write_headers(new_request_headers(), false))
+				assert(stream:write_body_from_file(file))
+			end)
+			cq:wrap(function()
+				local stream = assert(server:get_next_incoming_stream())
+				assert.same("hello world!", assert(stream:get_body_as_string()))
+			end)
+			assert_loop(cq, TEST_TIMEOUT)
+			assert.truthy(cq:empty())
+			client:close()
+			server:close()
 		end)
-		cq:wrap(function()
-			local stream = assert(server:get_next_incoming_stream())
-			assert.same("hello world!", assert(stream:get_body_as_string()))
+		it("works using the options form", function()
+			local server, client = new_pair(1.1)
+			local cq = cqueues.new()
+			cq:wrap(function()
+				local file = io.tmpfile()
+				assert(file:write("hello world!"))
+				assert(file:seek("set"))
+				local stream = client:new_stream()
+				assert(stream:write_headers(new_request_headers(), false))
+				assert(stream:write_body_from_file({
+					file = file;
+				}))
+			end)
+			cq:wrap(function()
+				local stream = assert(server:get_next_incoming_stream())
+				assert.same("hello world!", assert(stream:get_body_as_string()))
+			end)
+			assert_loop(cq, TEST_TIMEOUT)
+			assert.truthy(cq:empty())
+			client:close()
+			server:close()
+		end)
+		it("validates .count option", function()
+			local server, client = new_pair(1.1)
+			local cq = cqueues.new()
+			cq:wrap(function()
+				local stream = client:new_stream()
+				assert(stream:write_headers(new_request_headers(), false))
+				assert.has_error(function()
+					stream:write_body_from_file({
+						file = io.tmpfile();
+						count = "invalid count field";
+					})
+				end)
+			end)
+			cq:wrap(function()
+				assert(server:get_next_incoming_stream())
+			end)
+			assert_loop(cq, TEST_TIMEOUT)
+			assert.truthy(cq:empty())
+			client:close()
+			server:close()
+		end)
+		it("limits number of bytes when using .count option", function()
+			local server, client = new_pair(1.1)
+			local cq = cqueues.new()
+			cq:wrap(function()
+				local file = io.tmpfile()
+				assert(file:write("hello world!"))
+				assert(file:seek("set"))
+				local stream = client:new_stream()
+				assert(stream:write_headers(new_request_headers(), false))
+				assert(stream:write_body_from_file({
+					file = file;
+					count = 5;
+				}))
+			end)
+			cq:wrap(function()
+				local stream = assert(server:get_next_incoming_stream())
+				assert.same("hello", assert(stream:get_body_as_string()))
+			end)
+			assert_loop(cq, TEST_TIMEOUT)
+			assert.truthy(cq:empty())
+			client:close()
+			server:close()
+		end)
+		it("reports an error on early EOF", function()
+			local server, client = new_pair(1.1)
+			local cq = cqueues.new()
+			cq:wrap(function()
+				local file = io.tmpfile()
+				assert(file:write("hello world!"))
+				assert(file:seek("set"))
+				local stream = client:new_stream()
+				assert(stream:write_headers(new_request_headers(), false))
+				assert.has_error(function()
+					assert(stream:write_body_from_file({
+						file = file;
+						count = 50; -- longer than the file
+					}))
+				end)
+			end)
+			cq:wrap(function()
+				assert(server:get_next_incoming_stream())
+			end)
+			assert_loop(cq, TEST_TIMEOUT)
+			assert.truthy(cq:empty())
+			client:close()
+			server:close()
 		end)
-		assert_loop(cq, TEST_TIMEOUT)
-		assert.truthy(cq:empty())
-		client:close()
-		server:close()
 	end)
 end)
diff --git a/spec/util_spec.lua b/spec/util_spec.lua
index da24fb5..a87aaa4 100644
--- a/spec/util_spec.lua
+++ b/spec/util_spec.lua
@@ -1,5 +1,5 @@
 describe("http.util module", function()
-	local unpack = table.unpack or unpack -- luacheck: ignore 113
+	local unpack = table.unpack or unpack -- luacheck: ignore 113 143
 	local util = require "http.util"
 	it("decodeURI works", function()
 		assert.same("Encoded string", util.decodeURI("Encoded%20string"))
@@ -56,6 +56,25 @@ describe("http.util module", function()
 			assert.same(t, r)
 		end
 	end)
+	it("is_safe_method works", function()
+		assert.same(true, util.is_safe_method "GET")
+		assert.same(true, util.is_safe_method "HEAD")
+		assert.same(true, util.is_safe_method "OPTIONS")
+		assert.same(true, util.is_safe_method "TRACE")
+		assert.same(false, util.is_safe_method "POST")
+		assert.same(false, util.is_safe_method "PUT")
+	end)
+	it("is_ip works", function()
+		assert.same(true, util.is_ip "127.0.0.1")
+		assert.same(true, util.is_ip "192.168.1.1")
+		assert.same(true, util.is_ip "::")
+		assert.same(true, util.is_ip "::1")
+		assert.same(true, util.is_ip "2001:0db8:85a3:0042:1000:8a2e:0370:7334")
+		assert.same(true, util.is_ip "::FFFF:204.152.189.116")
+		assert.same(false, util.is_ip "not an ip")
+		assert.same(false, util.is_ip "0x80")
+		assert.same(false, util.is_ip "::FFFF:0.0.0")
+	end)
 	it("split_authority works", function()
 		assert.same({"example.com", 80}, {util.split_authority("example.com", "http")})
 		assert.same({"example.com", 8000}, {util.split_authority("example.com:8000", "http")})
diff --git a/spec/websocket_spec.lua b/spec/websocket_spec.lua
index fbbdf52..d0f49e6 100644
--- a/spec/websocket_spec.lua
+++ b/spec/websocket_spec.lua
@@ -92,6 +92,7 @@ describe("http.websocket", function()
 		end
 		local correct_headers = http_headers.new()
 		correct_headers:append(":method", "GET")
+		correct_headers:append(":scheme", "http")
 		correct_headers:append(":authority", "example.com")
 		correct_headers:append(":path", "/")
 		correct_headers:append("upgrade", "websocket")
@@ -169,14 +170,18 @@ describe("http.websocket", function()
 	end)
 end)
 describe("http.websocket module two sided tests", function()
+	local onerror  = require "http.connection_common".onerror
 	local server = require "http.server"
 	local util = require "http.util"
 	local websocket = require "http.websocket"
 	local cqueues = require "cqueues"
 	local ca = require "cqueues.auxlib"
+	local ce = require "cqueues.errno"
 	local cs = require "cqueues.socket"
 	local function new_pair()
 		local s, c = ca.assert(cs.pair())
+		s:onerror(onerror)
+		c:onerror(onerror)
 		local ws_server = websocket.new("server")
 		ws_server.socket = s
 		ws_server.readyState = 1
@@ -201,6 +206,26 @@ describe("http.websocket module two sided tests", function()
 		assert_loop(cq, TEST_TIMEOUT)
 		assert.truthy(cq:empty())
 	end)
+	it("timeouts return nil, err, errno", function()
+		local cq = cqueues.new()
+		local c, s = new_pair()
+		local ok, _, errno = c:receive(0)
+		assert.same(nil, ok)
+		assert.same(ce.ETIMEDOUT, errno)
+		-- Check it still works afterwards
+		cq:wrap(function()
+			assert(c:send("hello"))
+			assert.same("world", c:receive())
+			assert(c:close())
+		end)
+		cq:wrap(function()
+			assert.same("hello", s:receive())
+			assert(s:send("world"))
+			assert(s:close())
+		end)
+		assert_loop(cq, TEST_TIMEOUT)
+		assert.truthy(cq:empty())
+	end)
 	it("doesn't fail when data contains a \\r\\n", function()
 		local cq = cqueues.new()
 		local c, s = new_pair()
@@ -351,6 +376,7 @@ describe("http.websocket module two sided tests", function()
 			port = 0;
 			onstream = function(s, stream)
 				local headers = assert(stream:get_headers())
+				assert.same("http", headers:get(":scheme"))
 				local ws = websocket.new_from_stream(stream, headers)
 				assert(ws:accept())
 				assert(ws:close())